2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/kthread.h>
29 #include <linux/console.h>
30 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_atomic_helper.h>
34 #include <drm/amdgpu_drm.h>
35 #include <linux/vgaarb.h>
36 #include <linux/vga_switcheroo.h>
37 #include <linux/efi.h>
39 #include "amdgpu_trace.h"
40 #include "amdgpu_i2c.h"
42 #include "amdgpu_atombios.h"
43 #include "amdgpu_atomfirmware.h"
45 #ifdef CONFIG_DRM_AMDGPU_SI
48 #ifdef CONFIG_DRM_AMDGPU_CIK
53 #include "bif/bif_4_1_d.h"
54 #include <linux/pci.h>
55 #include <linux/firmware.h>
56 #include "amdgpu_vf_error.h"
58 #include "amdgpu_amdkfd.h"
59 #include "amdgpu_pm.h"
61 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
62 MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
64 #define AMDGPU_RESUME_MS 2000
66 static const char *amdgpu_asic_name
[] = {
90 bool amdgpu_device_is_px(struct drm_device
*dev
)
92 struct amdgpu_device
*adev
= dev
->dev_private
;
94 if (adev
->flags
& AMD_IS_PX
)
100 * MMIO register access helper functions.
102 uint32_t amdgpu_mm_rreg(struct amdgpu_device
*adev
, uint32_t reg
,
107 if (!(acc_flags
& AMDGPU_REGS_NO_KIQ
) && amdgpu_sriov_runtime(adev
))
108 return amdgpu_virt_kiq_rreg(adev
, reg
);
110 if ((reg
* 4) < adev
->rmmio_size
&& !(acc_flags
& AMDGPU_REGS_IDX
))
111 ret
= readl(((void __iomem
*)adev
->rmmio
) + (reg
* 4));
115 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
116 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
117 ret
= readl(((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
118 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
120 trace_amdgpu_mm_rreg(adev
->pdev
->device
, reg
, ret
);
124 void amdgpu_mm_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
,
127 trace_amdgpu_mm_wreg(adev
->pdev
->device
, reg
, v
);
129 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 0) {
130 adev
->last_mm_index
= v
;
133 if (!(acc_flags
& AMDGPU_REGS_NO_KIQ
) && amdgpu_sriov_runtime(adev
))
134 return amdgpu_virt_kiq_wreg(adev
, reg
, v
);
136 if ((reg
* 4) < adev
->rmmio_size
&& !(acc_flags
& AMDGPU_REGS_IDX
))
137 writel(v
, ((void __iomem
*)adev
->rmmio
) + (reg
* 4));
141 spin_lock_irqsave(&adev
->mmio_idx_lock
, flags
);
142 writel((reg
* 4), ((void __iomem
*)adev
->rmmio
) + (mmMM_INDEX
* 4));
143 writel(v
, ((void __iomem
*)adev
->rmmio
) + (mmMM_DATA
* 4));
144 spin_unlock_irqrestore(&adev
->mmio_idx_lock
, flags
);
147 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 1 && adev
->last_mm_index
== 0x5702C) {
152 u32
amdgpu_io_rreg(struct amdgpu_device
*adev
, u32 reg
)
154 if ((reg
* 4) < adev
->rio_mem_size
)
155 return ioread32(adev
->rio_mem
+ (reg
* 4));
157 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
158 return ioread32(adev
->rio_mem
+ (mmMM_DATA
* 4));
162 void amdgpu_io_wreg(struct amdgpu_device
*adev
, u32 reg
, u32 v
)
164 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 0) {
165 adev
->last_mm_index
= v
;
168 if ((reg
* 4) < adev
->rio_mem_size
)
169 iowrite32(v
, adev
->rio_mem
+ (reg
* 4));
171 iowrite32((reg
* 4), adev
->rio_mem
+ (mmMM_INDEX
* 4));
172 iowrite32(v
, adev
->rio_mem
+ (mmMM_DATA
* 4));
175 if (adev
->asic_type
>= CHIP_VEGA10
&& reg
== 1 && adev
->last_mm_index
== 0x5702C) {
181 * amdgpu_mm_rdoorbell - read a doorbell dword
183 * @adev: amdgpu_device pointer
184 * @index: doorbell index
186 * Returns the value in the doorbell aperture at the
187 * requested doorbell index (CIK).
189 u32
amdgpu_mm_rdoorbell(struct amdgpu_device
*adev
, u32 index
)
191 if (index
< adev
->doorbell
.num_doorbells
) {
192 return readl(adev
->doorbell
.ptr
+ index
);
194 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
200 * amdgpu_mm_wdoorbell - write a doorbell dword
202 * @adev: amdgpu_device pointer
203 * @index: doorbell index
206 * Writes @v to the doorbell aperture at the
207 * requested doorbell index (CIK).
209 void amdgpu_mm_wdoorbell(struct amdgpu_device
*adev
, u32 index
, u32 v
)
211 if (index
< adev
->doorbell
.num_doorbells
) {
212 writel(v
, adev
->doorbell
.ptr
+ index
);
214 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
219 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
221 * @adev: amdgpu_device pointer
222 * @index: doorbell index
224 * Returns the value in the doorbell aperture at the
225 * requested doorbell index (VEGA10+).
227 u64
amdgpu_mm_rdoorbell64(struct amdgpu_device
*adev
, u32 index
)
229 if (index
< adev
->doorbell
.num_doorbells
) {
230 return atomic64_read((atomic64_t
*)(adev
->doorbell
.ptr
+ index
));
232 DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index
);
238 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
240 * @adev: amdgpu_device pointer
241 * @index: doorbell index
244 * Writes @v to the doorbell aperture at the
245 * requested doorbell index (VEGA10+).
247 void amdgpu_mm_wdoorbell64(struct amdgpu_device
*adev
, u32 index
, u64 v
)
249 if (index
< adev
->doorbell
.num_doorbells
) {
250 atomic64_set((atomic64_t
*)(adev
->doorbell
.ptr
+ index
), v
);
252 DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index
);
257 * amdgpu_invalid_rreg - dummy reg read function
259 * @adev: amdgpu device pointer
260 * @reg: offset of register
262 * Dummy register read function. Used for register blocks
263 * that certain asics don't have (all asics).
264 * Returns the value in the register.
266 static uint32_t amdgpu_invalid_rreg(struct amdgpu_device
*adev
, uint32_t reg
)
268 DRM_ERROR("Invalid callback to read register 0x%04X\n", reg
);
274 * amdgpu_invalid_wreg - dummy reg write function
276 * @adev: amdgpu device pointer
277 * @reg: offset of register
278 * @v: value to write to the register
280 * Dummy register read function. Used for register blocks
281 * that certain asics don't have (all asics).
283 static void amdgpu_invalid_wreg(struct amdgpu_device
*adev
, uint32_t reg
, uint32_t v
)
285 DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
291 * amdgpu_block_invalid_rreg - dummy reg read function
293 * @adev: amdgpu device pointer
294 * @block: offset of instance
295 * @reg: offset of register
297 * Dummy register read function. Used for register blocks
298 * that certain asics don't have (all asics).
299 * Returns the value in the register.
301 static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device
*adev
,
302 uint32_t block
, uint32_t reg
)
304 DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
311 * amdgpu_block_invalid_wreg - dummy reg write function
313 * @adev: amdgpu device pointer
314 * @block: offset of instance
315 * @reg: offset of register
316 * @v: value to write to the register
318 * Dummy register read function. Used for register blocks
319 * that certain asics don't have (all asics).
321 static void amdgpu_block_invalid_wreg(struct amdgpu_device
*adev
,
323 uint32_t reg
, uint32_t v
)
325 DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
330 static int amdgpu_device_vram_scratch_init(struct amdgpu_device
*adev
)
332 return amdgpu_bo_create_kernel(adev
, AMDGPU_GPU_PAGE_SIZE
,
333 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
,
334 &adev
->vram_scratch
.robj
,
335 &adev
->vram_scratch
.gpu_addr
,
336 (void **)&adev
->vram_scratch
.ptr
);
339 static void amdgpu_device_vram_scratch_fini(struct amdgpu_device
*adev
)
341 amdgpu_bo_free_kernel(&adev
->vram_scratch
.robj
, NULL
, NULL
);
345 * amdgpu_device_program_register_sequence - program an array of registers.
347 * @adev: amdgpu_device pointer
348 * @registers: pointer to the register array
349 * @array_size: size of the register array
351 * Programs an array or registers with and and or masks.
352 * This is a helper for setting golden registers.
354 void amdgpu_device_program_register_sequence(struct amdgpu_device
*adev
,
355 const u32
*registers
,
356 const u32 array_size
)
358 u32 tmp
, reg
, and_mask
, or_mask
;
364 for (i
= 0; i
< array_size
; i
+=3) {
365 reg
= registers
[i
+ 0];
366 and_mask
= registers
[i
+ 1];
367 or_mask
= registers
[i
+ 2];
369 if (and_mask
== 0xffffffff) {
380 void amdgpu_device_pci_config_reset(struct amdgpu_device
*adev
)
382 pci_write_config_dword(adev
->pdev
, 0x7c, AMDGPU_ASIC_RESET_DATA
);
386 * GPU doorbell aperture helpers function.
389 * amdgpu_device_doorbell_init - Init doorbell driver information.
391 * @adev: amdgpu_device pointer
393 * Init doorbell driver information (CIK)
394 * Returns 0 on success, error on failure.
396 static int amdgpu_device_doorbell_init(struct amdgpu_device
*adev
)
398 /* No doorbell on SI hardware generation */
399 if (adev
->asic_type
< CHIP_BONAIRE
) {
400 adev
->doorbell
.base
= 0;
401 adev
->doorbell
.size
= 0;
402 adev
->doorbell
.num_doorbells
= 0;
403 adev
->doorbell
.ptr
= NULL
;
407 if (pci_resource_flags(adev
->pdev
, 2) & IORESOURCE_UNSET
)
410 /* doorbell bar mapping */
411 adev
->doorbell
.base
= pci_resource_start(adev
->pdev
, 2);
412 adev
->doorbell
.size
= pci_resource_len(adev
->pdev
, 2);
414 adev
->doorbell
.num_doorbells
= min_t(u32
, adev
->doorbell
.size
/ sizeof(u32
),
415 AMDGPU_DOORBELL_MAX_ASSIGNMENT
+1);
416 if (adev
->doorbell
.num_doorbells
== 0)
419 adev
->doorbell
.ptr
= ioremap(adev
->doorbell
.base
,
420 adev
->doorbell
.num_doorbells
*
422 if (adev
->doorbell
.ptr
== NULL
)
429 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
431 * @adev: amdgpu_device pointer
433 * Tear down doorbell driver information (CIK)
435 static void amdgpu_device_doorbell_fini(struct amdgpu_device
*adev
)
437 iounmap(adev
->doorbell
.ptr
);
438 adev
->doorbell
.ptr
= NULL
;
444 * amdgpu_device_wb_*()
445 * Writeback is the method by which the GPU updates special pages in memory
446 * with the status of certain GPU events (fences, ring pointers,etc.).
450 * amdgpu_device_wb_fini - Disable Writeback and free memory
452 * @adev: amdgpu_device pointer
454 * Disables Writeback and frees the Writeback memory (all asics).
455 * Used at driver shutdown.
457 static void amdgpu_device_wb_fini(struct amdgpu_device
*adev
)
459 if (adev
->wb
.wb_obj
) {
460 amdgpu_bo_free_kernel(&adev
->wb
.wb_obj
,
462 (void **)&adev
->wb
.wb
);
463 adev
->wb
.wb_obj
= NULL
;
468 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
470 * @adev: amdgpu_device pointer
472 * Initializes writeback and allocates writeback memory (all asics).
473 * Used at driver startup.
474 * Returns 0 on success or an -error on failure.
476 static int amdgpu_device_wb_init(struct amdgpu_device
*adev
)
480 if (adev
->wb
.wb_obj
== NULL
) {
481 /* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
482 r
= amdgpu_bo_create_kernel(adev
, AMDGPU_MAX_WB
* sizeof(uint32_t) * 8,
483 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
484 &adev
->wb
.wb_obj
, &adev
->wb
.gpu_addr
,
485 (void **)&adev
->wb
.wb
);
487 dev_warn(adev
->dev
, "(%d) create WB bo failed\n", r
);
491 adev
->wb
.num_wb
= AMDGPU_MAX_WB
;
492 memset(&adev
->wb
.used
, 0, sizeof(adev
->wb
.used
));
494 /* clear wb memory */
495 memset((char *)adev
->wb
.wb
, 0, AMDGPU_MAX_WB
* sizeof(uint32_t) * 8);
502 * amdgpu_device_wb_get - Allocate a wb entry
504 * @adev: amdgpu_device pointer
507 * Allocate a wb slot for use by the driver (all asics).
508 * Returns 0 on success or -EINVAL on failure.
510 int amdgpu_device_wb_get(struct amdgpu_device
*adev
, u32
*wb
)
512 unsigned long offset
= find_first_zero_bit(adev
->wb
.used
, adev
->wb
.num_wb
);
514 if (offset
< adev
->wb
.num_wb
) {
515 __set_bit(offset
, adev
->wb
.used
);
516 *wb
= offset
<< 3; /* convert to dw offset */
524 * amdgpu_device_wb_free - Free a wb entry
526 * @adev: amdgpu_device pointer
529 * Free a wb slot allocated for use by the driver (all asics)
531 void amdgpu_device_wb_free(struct amdgpu_device
*adev
, u32 wb
)
534 if (wb
< adev
->wb
.num_wb
)
535 __clear_bit(wb
, adev
->wb
.used
);
539 * amdgpu_device_vram_location - try to find VRAM location
540 * @adev: amdgpu device structure holding all necessary informations
541 * @mc: memory controller structure holding memory informations
542 * @base: base address at which to put VRAM
544 * Function will try to place VRAM at base address provided
547 void amdgpu_device_vram_location(struct amdgpu_device
*adev
,
548 struct amdgpu_mc
*mc
, u64 base
)
550 uint64_t limit
= (uint64_t)amdgpu_vram_limit
<< 20;
552 mc
->vram_start
= base
;
553 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
554 if (limit
&& limit
< mc
->real_vram_size
)
555 mc
->real_vram_size
= limit
;
556 dev_info(adev
->dev
, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
557 mc
->mc_vram_size
>> 20, mc
->vram_start
,
558 mc
->vram_end
, mc
->real_vram_size
>> 20);
562 * amdgpu_device_gart_location - try to find GTT location
563 * @adev: amdgpu device structure holding all necessary informations
564 * @mc: memory controller structure holding memory informations
566 * Function will place try to place GTT before or after VRAM.
568 * If GTT size is bigger than space left then we ajust GTT size.
569 * Thus function will never fails.
571 * FIXME: when reducing GTT size align new size on power of 2.
573 void amdgpu_device_gart_location(struct amdgpu_device
*adev
,
574 struct amdgpu_mc
*mc
)
576 u64 size_af
, size_bf
;
578 size_af
= adev
->mc
.mc_mask
- mc
->vram_end
;
579 size_bf
= mc
->vram_start
;
580 if (size_bf
> size_af
) {
581 if (mc
->gart_size
> size_bf
) {
582 dev_warn(adev
->dev
, "limiting GTT\n");
583 mc
->gart_size
= size_bf
;
587 if (mc
->gart_size
> size_af
) {
588 dev_warn(adev
->dev
, "limiting GTT\n");
589 mc
->gart_size
= size_af
;
591 /* VCE doesn't like it when BOs cross a 4GB segment, so align
592 * the GART base on a 4GB boundary as well.
594 mc
->gart_start
= ALIGN(mc
->vram_end
+ 1, 0x100000000ULL
);
596 mc
->gart_end
= mc
->gart_start
+ mc
->gart_size
- 1;
597 dev_info(adev
->dev
, "GTT: %lluM 0x%016llX - 0x%016llX\n",
598 mc
->gart_size
>> 20, mc
->gart_start
, mc
->gart_end
);
602 * amdgpu_device_resize_fb_bar - try to resize FB BAR
604 * @adev: amdgpu_device pointer
606 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
607 * to fail, but if any of the BARs is not accessible after the size we abort
608 * driver loading by returning -ENODEV.
610 int amdgpu_device_resize_fb_bar(struct amdgpu_device
*adev
)
612 u64 space_needed
= roundup_pow_of_two(adev
->mc
.real_vram_size
);
613 u32 rbar_size
= order_base_2(((space_needed
>> 20) | 1)) - 1;
614 struct pci_bus
*root
;
615 struct resource
*res
;
621 if (amdgpu_sriov_vf(adev
))
624 /* Check if the root BUS has 64bit memory resources */
625 root
= adev
->pdev
->bus
;
629 pci_bus_for_each_resource(root
, res
, i
) {
630 if (res
&& res
->flags
& (IORESOURCE_MEM
| IORESOURCE_MEM_64
) &&
631 res
->start
> 0x100000000ull
)
635 /* Trying to resize is pointless without a root hub window above 4GB */
639 /* Disable memory decoding while we change the BAR addresses and size */
640 pci_read_config_word(adev
->pdev
, PCI_COMMAND
, &cmd
);
641 pci_write_config_word(adev
->pdev
, PCI_COMMAND
,
642 cmd
& ~PCI_COMMAND_MEMORY
);
644 /* Free the VRAM and doorbell BAR, we most likely need to move both. */
645 amdgpu_device_doorbell_fini(adev
);
646 if (adev
->asic_type
>= CHIP_BONAIRE
)
647 pci_release_resource(adev
->pdev
, 2);
649 pci_release_resource(adev
->pdev
, 0);
651 r
= pci_resize_resource(adev
->pdev
, 0, rbar_size
);
653 DRM_INFO("Not enough PCI address space for a large BAR.");
654 else if (r
&& r
!= -ENOTSUPP
)
655 DRM_ERROR("Problem resizing BAR0 (%d).", r
);
657 pci_assign_unassigned_bus_resources(adev
->pdev
->bus
);
659 /* When the doorbell or fb BAR isn't available we have no chance of
662 r
= amdgpu_device_doorbell_init(adev
);
663 if (r
|| (pci_resource_flags(adev
->pdev
, 0) & IORESOURCE_UNSET
))
666 pci_write_config_word(adev
->pdev
, PCI_COMMAND
, cmd
);
672 * GPU helpers function.
675 * amdgpu_device_need_post - check if the hw need post or not
677 * @adev: amdgpu_device pointer
679 * Check if the asic has been initialized (all asics) at driver startup
680 * or post is needed if hw reset is performed.
681 * Returns true if need or false if not.
683 bool amdgpu_device_need_post(struct amdgpu_device
*adev
)
687 if (amdgpu_sriov_vf(adev
))
690 if (amdgpu_passthrough(adev
)) {
691 /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
692 * some old smc fw still need driver do vPost otherwise gpu hang, while
693 * those smc fw version above 22.15 doesn't have this flaw, so we force
694 * vpost executed for smc version below 22.15
696 if (adev
->asic_type
== CHIP_FIJI
) {
699 err
= request_firmware(&adev
->pm
.fw
, "amdgpu/fiji_smc.bin", adev
->dev
);
700 /* force vPost if error occured */
704 fw_ver
= *((uint32_t *)adev
->pm
.fw
->data
+ 69);
705 if (fw_ver
< 0x00160e00)
710 if (adev
->has_hw_reset
) {
711 adev
->has_hw_reset
= false;
715 /* bios scratch used on CIK+ */
716 if (adev
->asic_type
>= CHIP_BONAIRE
)
717 return amdgpu_atombios_scratch_need_asic_init(adev
);
719 /* check MEM_SIZE for older asics */
720 reg
= amdgpu_asic_get_config_memsize(adev
);
722 if ((reg
!= 0) && (reg
!= 0xffffffff))
728 /* if we get transitioned to only one device, take VGA back */
730 * amdgpu_device_vga_set_decode - enable/disable vga decode
732 * @cookie: amdgpu_device pointer
733 * @state: enable/disable vga decode
735 * Enable/disable vga decode (all asics).
736 * Returns VGA resource flags.
738 static unsigned int amdgpu_device_vga_set_decode(void *cookie
, bool state
)
740 struct amdgpu_device
*adev
= cookie
;
741 amdgpu_asic_set_vga_state(adev
, state
);
743 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
744 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
746 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
749 static void amdgpu_device_check_block_size(struct amdgpu_device
*adev
)
751 /* defines number of bits in page table versus page directory,
752 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
753 * page table and the remaining bits are in the page directory */
754 if (amdgpu_vm_block_size
== -1)
757 if (amdgpu_vm_block_size
< 9) {
758 dev_warn(adev
->dev
, "VM page table size (%d) too small\n",
759 amdgpu_vm_block_size
);
760 amdgpu_vm_block_size
= -1;
764 static void amdgpu_device_check_vm_size(struct amdgpu_device
*adev
)
766 /* no need to check the default value */
767 if (amdgpu_vm_size
== -1)
770 if (amdgpu_vm_size
< 1) {
771 dev_warn(adev
->dev
, "VM size (%d) too small, min is 1GB\n",
778 * amdgpu_device_check_arguments - validate module params
780 * @adev: amdgpu_device pointer
782 * Validates certain module parameters and updates
783 * the associated values used by the driver (all asics).
785 static void amdgpu_device_check_arguments(struct amdgpu_device
*adev
)
787 if (amdgpu_sched_jobs
< 4) {
788 dev_warn(adev
->dev
, "sched jobs (%d) must be at least 4\n",
790 amdgpu_sched_jobs
= 4;
791 } else if (!is_power_of_2(amdgpu_sched_jobs
)){
792 dev_warn(adev
->dev
, "sched jobs (%d) must be a power of 2\n",
794 amdgpu_sched_jobs
= roundup_pow_of_two(amdgpu_sched_jobs
);
797 if (amdgpu_gart_size
!= -1 && amdgpu_gart_size
< 32) {
798 /* gart size must be greater or equal to 32M */
799 dev_warn(adev
->dev
, "gart size (%d) too small\n",
801 amdgpu_gart_size
= -1;
804 if (amdgpu_gtt_size
!= -1 && amdgpu_gtt_size
< 32) {
805 /* gtt size must be greater or equal to 32M */
806 dev_warn(adev
->dev
, "gtt size (%d) too small\n",
808 amdgpu_gtt_size
= -1;
811 /* valid range is between 4 and 9 inclusive */
812 if (amdgpu_vm_fragment_size
!= -1 &&
813 (amdgpu_vm_fragment_size
> 9 || amdgpu_vm_fragment_size
< 4)) {
814 dev_warn(adev
->dev
, "valid range is between 4 and 9\n");
815 amdgpu_vm_fragment_size
= -1;
818 amdgpu_device_check_vm_size(adev
);
820 amdgpu_device_check_block_size(adev
);
822 if (amdgpu_vram_page_split
!= -1 && (amdgpu_vram_page_split
< 16 ||
823 !is_power_of_2(amdgpu_vram_page_split
))) {
824 dev_warn(adev
->dev
, "invalid VRAM page split (%d)\n",
825 amdgpu_vram_page_split
);
826 amdgpu_vram_page_split
= 1024;
829 if (amdgpu_lockup_timeout
== 0) {
830 dev_warn(adev
->dev
, "lockup_timeout msut be > 0, adjusting to 10000\n");
831 amdgpu_lockup_timeout
= 10000;
836 * amdgpu_switcheroo_set_state - set switcheroo state
838 * @pdev: pci dev pointer
839 * @state: vga_switcheroo state
841 * Callback for the switcheroo driver. Suspends or resumes the
842 * the asics before or after it is powered up using ACPI methods.
844 static void amdgpu_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
846 struct drm_device
*dev
= pci_get_drvdata(pdev
);
848 if (amdgpu_device_is_px(dev
) && state
== VGA_SWITCHEROO_OFF
)
851 if (state
== VGA_SWITCHEROO_ON
) {
852 pr_info("amdgpu: switched on\n");
853 /* don't suspend or resume card normally */
854 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
856 amdgpu_device_resume(dev
, true, true);
858 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
859 drm_kms_helper_poll_enable(dev
);
861 pr_info("amdgpu: switched off\n");
862 drm_kms_helper_poll_disable(dev
);
863 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
864 amdgpu_device_suspend(dev
, true, true);
865 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
870 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
872 * @pdev: pci dev pointer
874 * Callback for the switcheroo driver. Check of the switcheroo
875 * state can be changed.
876 * Returns true if the state can be changed, false if not.
878 static bool amdgpu_switcheroo_can_switch(struct pci_dev
*pdev
)
880 struct drm_device
*dev
= pci_get_drvdata(pdev
);
883 * FIXME: open_count is protected by drm_global_mutex but that would lead to
884 * locking inversion with the driver load path. And the access here is
885 * completely racy anyway. So don't bother with locking for now.
887 return dev
->open_count
== 0;
890 static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops
= {
891 .set_gpu_state
= amdgpu_switcheroo_set_state
,
893 .can_switch
= amdgpu_switcheroo_can_switch
,
896 int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device
*adev
,
897 enum amd_ip_block_type block_type
,
898 enum amd_clockgating_state state
)
902 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
903 if (!adev
->ip_blocks
[i
].status
.valid
)
905 if (adev
->ip_blocks
[i
].version
->type
!= block_type
)
907 if (!adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state
)
909 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state(
910 (void *)adev
, state
);
912 DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
913 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
918 int amdgpu_device_ip_set_powergating_state(struct amdgpu_device
*adev
,
919 enum amd_ip_block_type block_type
,
920 enum amd_powergating_state state
)
924 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
925 if (!adev
->ip_blocks
[i
].status
.valid
)
927 if (adev
->ip_blocks
[i
].version
->type
!= block_type
)
929 if (!adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state
)
931 r
= adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state(
932 (void *)adev
, state
);
934 DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
935 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
940 void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device
*adev
,
945 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
946 if (!adev
->ip_blocks
[i
].status
.valid
)
948 if (adev
->ip_blocks
[i
].version
->funcs
->get_clockgating_state
)
949 adev
->ip_blocks
[i
].version
->funcs
->get_clockgating_state((void *)adev
, flags
);
953 int amdgpu_device_ip_wait_for_idle(struct amdgpu_device
*adev
,
954 enum amd_ip_block_type block_type
)
958 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
959 if (!adev
->ip_blocks
[i
].status
.valid
)
961 if (adev
->ip_blocks
[i
].version
->type
== block_type
) {
962 r
= adev
->ip_blocks
[i
].version
->funcs
->wait_for_idle((void *)adev
);
972 bool amdgpu_device_ip_is_idle(struct amdgpu_device
*adev
,
973 enum amd_ip_block_type block_type
)
977 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
978 if (!adev
->ip_blocks
[i
].status
.valid
)
980 if (adev
->ip_blocks
[i
].version
->type
== block_type
)
981 return adev
->ip_blocks
[i
].version
->funcs
->is_idle((void *)adev
);
987 struct amdgpu_ip_block
*
988 amdgpu_device_ip_get_ip_block(struct amdgpu_device
*adev
,
989 enum amd_ip_block_type type
)
993 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
994 if (adev
->ip_blocks
[i
].version
->type
== type
)
995 return &adev
->ip_blocks
[i
];
1001 * amdgpu_device_ip_block_version_cmp
1003 * @adev: amdgpu_device pointer
1004 * @type: enum amd_ip_block_type
1005 * @major: major version
1006 * @minor: minor version
1008 * return 0 if equal or greater
1009 * return 1 if smaller or the ip_block doesn't exist
1011 int amdgpu_device_ip_block_version_cmp(struct amdgpu_device
*adev
,
1012 enum amd_ip_block_type type
,
1013 u32 major
, u32 minor
)
1015 struct amdgpu_ip_block
*ip_block
= amdgpu_device_ip_get_ip_block(adev
, type
);
1017 if (ip_block
&& ((ip_block
->version
->major
> major
) ||
1018 ((ip_block
->version
->major
== major
) &&
1019 (ip_block
->version
->minor
>= minor
))))
1026 * amdgpu_device_ip_block_add
1028 * @adev: amdgpu_device pointer
1029 * @ip_block_version: pointer to the IP to add
1031 * Adds the IP block driver information to the collection of IPs
1034 int amdgpu_device_ip_block_add(struct amdgpu_device
*adev
,
1035 const struct amdgpu_ip_block_version
*ip_block_version
)
1037 if (!ip_block_version
)
1040 DRM_DEBUG("add ip block number %d <%s>\n", adev
->num_ip_blocks
,
1041 ip_block_version
->funcs
->name
);
1043 adev
->ip_blocks
[adev
->num_ip_blocks
++].version
= ip_block_version
;
1048 static void amdgpu_device_enable_virtual_display(struct amdgpu_device
*adev
)
1050 adev
->enable_virtual_display
= false;
1052 if (amdgpu_virtual_display
) {
1053 struct drm_device
*ddev
= adev
->ddev
;
1054 const char *pci_address_name
= pci_name(ddev
->pdev
);
1055 char *pciaddstr
, *pciaddstr_tmp
, *pciaddname_tmp
, *pciaddname
;
1057 pciaddstr
= kstrdup(amdgpu_virtual_display
, GFP_KERNEL
);
1058 pciaddstr_tmp
= pciaddstr
;
1059 while ((pciaddname_tmp
= strsep(&pciaddstr_tmp
, ";"))) {
1060 pciaddname
= strsep(&pciaddname_tmp
, ",");
1061 if (!strcmp("all", pciaddname
)
1062 || !strcmp(pci_address_name
, pciaddname
)) {
1066 adev
->enable_virtual_display
= true;
1069 res
= kstrtol(pciaddname_tmp
, 10,
1077 adev
->mode_info
.num_crtc
= num_crtc
;
1079 adev
->mode_info
.num_crtc
= 1;
1085 DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
1086 amdgpu_virtual_display
, pci_address_name
,
1087 adev
->enable_virtual_display
, adev
->mode_info
.num_crtc
);
1093 static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device
*adev
)
1095 const char *chip_name
;
1098 const struct gpu_info_firmware_header_v1_0
*hdr
;
1100 adev
->firmware
.gpu_info_fw
= NULL
;
1102 switch (adev
->asic_type
) {
1106 case CHIP_POLARIS11
:
1107 case CHIP_POLARIS10
:
1108 case CHIP_POLARIS12
:
1111 #ifdef CONFIG_DRM_AMDGPU_SI
1118 #ifdef CONFIG_DRM_AMDGPU_CIK
1128 chip_name
= "vega10";
1131 chip_name
= "raven";
1135 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_gpu_info.bin", chip_name
);
1136 err
= request_firmware(&adev
->firmware
.gpu_info_fw
, fw_name
, adev
->dev
);
1139 "Failed to load gpu_info firmware \"%s\"\n",
1143 err
= amdgpu_ucode_validate(adev
->firmware
.gpu_info_fw
);
1146 "Failed to validate gpu_info firmware \"%s\"\n",
1151 hdr
= (const struct gpu_info_firmware_header_v1_0
*)adev
->firmware
.gpu_info_fw
->data
;
1152 amdgpu_ucode_print_gpu_info_hdr(&hdr
->header
);
1154 switch (hdr
->version_major
) {
1157 const struct gpu_info_firmware_v1_0
*gpu_info_fw
=
1158 (const struct gpu_info_firmware_v1_0
*)(adev
->firmware
.gpu_info_fw
->data
+
1159 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
1161 adev
->gfx
.config
.max_shader_engines
= le32_to_cpu(gpu_info_fw
->gc_num_se
);
1162 adev
->gfx
.config
.max_cu_per_sh
= le32_to_cpu(gpu_info_fw
->gc_num_cu_per_sh
);
1163 adev
->gfx
.config
.max_sh_per_se
= le32_to_cpu(gpu_info_fw
->gc_num_sh_per_se
);
1164 adev
->gfx
.config
.max_backends_per_se
= le32_to_cpu(gpu_info_fw
->gc_num_rb_per_se
);
1165 adev
->gfx
.config
.max_texture_channel_caches
=
1166 le32_to_cpu(gpu_info_fw
->gc_num_tccs
);
1167 adev
->gfx
.config
.max_gprs
= le32_to_cpu(gpu_info_fw
->gc_num_gprs
);
1168 adev
->gfx
.config
.max_gs_threads
= le32_to_cpu(gpu_info_fw
->gc_num_max_gs_thds
);
1169 adev
->gfx
.config
.gs_vgt_table_depth
= le32_to_cpu(gpu_info_fw
->gc_gs_table_depth
);
1170 adev
->gfx
.config
.gs_prim_buffer_depth
= le32_to_cpu(gpu_info_fw
->gc_gsprim_buff_depth
);
1171 adev
->gfx
.config
.double_offchip_lds_buf
=
1172 le32_to_cpu(gpu_info_fw
->gc_double_offchip_lds_buffer
);
1173 adev
->gfx
.cu_info
.wave_front_size
= le32_to_cpu(gpu_info_fw
->gc_wave_size
);
1174 adev
->gfx
.cu_info
.max_waves_per_simd
=
1175 le32_to_cpu(gpu_info_fw
->gc_max_waves_per_simd
);
1176 adev
->gfx
.cu_info
.max_scratch_slots_per_cu
=
1177 le32_to_cpu(gpu_info_fw
->gc_max_scratch_slots_per_cu
);
1178 adev
->gfx
.cu_info
.lds_size
= le32_to_cpu(gpu_info_fw
->gc_lds_size
);
1183 "Unsupported gpu_info table %d\n", hdr
->header
.ucode_version
);
1191 static int amdgpu_device_ip_early_init(struct amdgpu_device
*adev
)
1195 amdgpu_device_enable_virtual_display(adev
);
1197 switch (adev
->asic_type
) {
1201 case CHIP_POLARIS11
:
1202 case CHIP_POLARIS10
:
1203 case CHIP_POLARIS12
:
1206 if (adev
->asic_type
== CHIP_CARRIZO
|| adev
->asic_type
== CHIP_STONEY
)
1207 adev
->family
= AMDGPU_FAMILY_CZ
;
1209 adev
->family
= AMDGPU_FAMILY_VI
;
1211 r
= vi_set_ip_blocks(adev
);
1215 #ifdef CONFIG_DRM_AMDGPU_SI
1221 adev
->family
= AMDGPU_FAMILY_SI
;
1222 r
= si_set_ip_blocks(adev
);
1227 #ifdef CONFIG_DRM_AMDGPU_CIK
1233 if ((adev
->asic_type
== CHIP_BONAIRE
) || (adev
->asic_type
== CHIP_HAWAII
))
1234 adev
->family
= AMDGPU_FAMILY_CI
;
1236 adev
->family
= AMDGPU_FAMILY_KV
;
1238 r
= cik_set_ip_blocks(adev
);
1245 if (adev
->asic_type
== CHIP_RAVEN
)
1246 adev
->family
= AMDGPU_FAMILY_RV
;
1248 adev
->family
= AMDGPU_FAMILY_AI
;
1250 r
= soc15_set_ip_blocks(adev
);
1255 /* FIXME: not supported yet */
1259 r
= amdgpu_device_parse_gpu_info_fw(adev
);
1263 amdgpu_amdkfd_device_probe(adev
);
1265 if (amdgpu_sriov_vf(adev
)) {
1266 r
= amdgpu_virt_request_full_gpu(adev
, true);
1271 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1272 if ((amdgpu_ip_block_mask
& (1 << i
)) == 0) {
1273 DRM_ERROR("disabled ip block: %d <%s>\n",
1274 i
, adev
->ip_blocks
[i
].version
->funcs
->name
);
1275 adev
->ip_blocks
[i
].status
.valid
= false;
1277 if (adev
->ip_blocks
[i
].version
->funcs
->early_init
) {
1278 r
= adev
->ip_blocks
[i
].version
->funcs
->early_init((void *)adev
);
1280 adev
->ip_blocks
[i
].status
.valid
= false;
1282 DRM_ERROR("early_init of IP block <%s> failed %d\n",
1283 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1286 adev
->ip_blocks
[i
].status
.valid
= true;
1289 adev
->ip_blocks
[i
].status
.valid
= true;
1294 adev
->cg_flags
&= amdgpu_cg_mask
;
1295 adev
->pg_flags
&= amdgpu_pg_mask
;
1300 static int amdgpu_device_ip_init(struct amdgpu_device
*adev
)
1304 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1305 if (!adev
->ip_blocks
[i
].status
.valid
)
1307 r
= adev
->ip_blocks
[i
].version
->funcs
->sw_init((void *)adev
);
1309 DRM_ERROR("sw_init of IP block <%s> failed %d\n",
1310 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1313 adev
->ip_blocks
[i
].status
.sw
= true;
1314 /* need to do gmc hw init early so we can allocate gpu mem */
1315 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) {
1316 r
= amdgpu_device_vram_scratch_init(adev
);
1318 DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r
);
1321 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init((void *)adev
);
1323 DRM_ERROR("hw_init %d failed %d\n", i
, r
);
1326 r
= amdgpu_device_wb_init(adev
);
1328 DRM_ERROR("amdgpu_device_wb_init failed %d\n", r
);
1331 adev
->ip_blocks
[i
].status
.hw
= true;
1333 /* right after GMC hw init, we create CSA */
1334 if (amdgpu_sriov_vf(adev
)) {
1335 r
= amdgpu_allocate_static_csa(adev
);
1337 DRM_ERROR("allocate CSA failed %d\n", r
);
1344 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1345 if (!adev
->ip_blocks
[i
].status
.sw
)
1347 /* gmc hw init is done early */
1348 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
)
1350 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_init((void *)adev
);
1352 DRM_ERROR("hw_init of IP block <%s> failed %d\n",
1353 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1356 adev
->ip_blocks
[i
].status
.hw
= true;
1359 amdgpu_amdkfd_device_init(adev
);
1361 if (amdgpu_sriov_vf(adev
))
1362 amdgpu_virt_release_full_gpu(adev
, true);
1367 static void amdgpu_device_fill_reset_magic(struct amdgpu_device
*adev
)
1369 memcpy(adev
->reset_magic
, adev
->gart
.ptr
, AMDGPU_RESET_MAGIC_NUM
);
1372 static bool amdgpu_device_check_vram_lost(struct amdgpu_device
*adev
)
1374 return !!memcmp(adev
->gart
.ptr
, adev
->reset_magic
,
1375 AMDGPU_RESET_MAGIC_NUM
);
1378 static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device
*adev
)
1382 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1383 if (!adev
->ip_blocks
[i
].status
.valid
)
1385 /* skip CG for VCE/UVD, it's handled specially */
1386 if (adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_UVD
&&
1387 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCE
) {
1388 /* enable clockgating to save power */
1389 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state((void *)adev
,
1392 DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
1393 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1401 static int amdgpu_device_ip_late_init(struct amdgpu_device
*adev
)
1405 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1406 if (!adev
->ip_blocks
[i
].status
.valid
)
1408 if (adev
->ip_blocks
[i
].version
->funcs
->late_init
) {
1409 r
= adev
->ip_blocks
[i
].version
->funcs
->late_init((void *)adev
);
1411 DRM_ERROR("late_init of IP block <%s> failed %d\n",
1412 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1415 adev
->ip_blocks
[i
].status
.late_initialized
= true;
1419 mod_delayed_work(system_wq
, &adev
->late_init_work
,
1420 msecs_to_jiffies(AMDGPU_RESUME_MS
));
1422 amdgpu_device_fill_reset_magic(adev
);
1427 static int amdgpu_device_ip_fini(struct amdgpu_device
*adev
)
1431 amdgpu_amdkfd_device_fini(adev
);
1432 /* need to disable SMC first */
1433 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1434 if (!adev
->ip_blocks
[i
].status
.hw
)
1436 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_SMC
) {
1437 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1438 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state((void *)adev
,
1439 AMD_CG_STATE_UNGATE
);
1441 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1442 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1445 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_fini((void *)adev
);
1446 /* XXX handle errors */
1448 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1449 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1451 adev
->ip_blocks
[i
].status
.hw
= false;
1456 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1457 if (!adev
->ip_blocks
[i
].status
.hw
)
1460 if (adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_UVD
&&
1461 adev
->ip_blocks
[i
].version
->type
!= AMD_IP_BLOCK_TYPE_VCE
) {
1462 /* ungate blocks before hw fini so that we can shutdown the blocks safely */
1463 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state((void *)adev
,
1464 AMD_CG_STATE_UNGATE
);
1466 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1467 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1472 r
= adev
->ip_blocks
[i
].version
->funcs
->hw_fini((void *)adev
);
1473 /* XXX handle errors */
1475 DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
1476 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1479 adev
->ip_blocks
[i
].status
.hw
= false;
1482 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1483 if (!adev
->ip_blocks
[i
].status
.sw
)
1486 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) {
1487 amdgpu_free_static_csa(adev
);
1488 amdgpu_device_wb_fini(adev
);
1489 amdgpu_device_vram_scratch_fini(adev
);
1492 r
= adev
->ip_blocks
[i
].version
->funcs
->sw_fini((void *)adev
);
1493 /* XXX handle errors */
1495 DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
1496 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1498 adev
->ip_blocks
[i
].status
.sw
= false;
1499 adev
->ip_blocks
[i
].status
.valid
= false;
1502 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1503 if (!adev
->ip_blocks
[i
].status
.late_initialized
)
1505 if (adev
->ip_blocks
[i
].version
->funcs
->late_fini
)
1506 adev
->ip_blocks
[i
].version
->funcs
->late_fini((void *)adev
);
1507 adev
->ip_blocks
[i
].status
.late_initialized
= false;
1510 if (amdgpu_sriov_vf(adev
))
1511 if (amdgpu_virt_release_full_gpu(adev
, false))
1512 DRM_ERROR("failed to release exclusive mode on fini\n");
1517 static void amdgpu_device_ip_late_init_func_handler(struct work_struct
*work
)
1519 struct amdgpu_device
*adev
=
1520 container_of(work
, struct amdgpu_device
, late_init_work
.work
);
1521 amdgpu_device_ip_late_set_cg_state(adev
);
1524 int amdgpu_device_ip_suspend(struct amdgpu_device
*adev
)
1528 if (amdgpu_sriov_vf(adev
))
1529 amdgpu_virt_request_full_gpu(adev
, false);
1531 /* ungate SMC block first */
1532 r
= amdgpu_device_ip_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_SMC
,
1533 AMD_CG_STATE_UNGATE
);
1535 DRM_ERROR("set_clockgating_state(ungate) SMC failed %d\n", r
);
1538 for (i
= adev
->num_ip_blocks
- 1; i
>= 0; i
--) {
1539 if (!adev
->ip_blocks
[i
].status
.valid
)
1541 /* ungate blocks so that suspend can properly shut them down */
1542 if (i
!= AMD_IP_BLOCK_TYPE_SMC
) {
1543 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state((void *)adev
,
1544 AMD_CG_STATE_UNGATE
);
1546 DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
1547 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1550 /* XXX handle errors */
1551 r
= adev
->ip_blocks
[i
].version
->funcs
->suspend(adev
);
1552 /* XXX handle errors */
1554 DRM_ERROR("suspend of IP block <%s> failed %d\n",
1555 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1559 if (amdgpu_sriov_vf(adev
))
1560 amdgpu_virt_release_full_gpu(adev
, false);
1565 static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device
*adev
)
1569 static enum amd_ip_block_type ip_order
[] = {
1570 AMD_IP_BLOCK_TYPE_GMC
,
1571 AMD_IP_BLOCK_TYPE_COMMON
,
1572 AMD_IP_BLOCK_TYPE_IH
,
1575 for (i
= 0; i
< ARRAY_SIZE(ip_order
); i
++) {
1577 struct amdgpu_ip_block
*block
;
1579 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
1580 block
= &adev
->ip_blocks
[j
];
1582 if (block
->version
->type
!= ip_order
[i
] ||
1583 !block
->status
.valid
)
1586 r
= block
->version
->funcs
->hw_init(adev
);
1587 DRM_INFO("RE-INIT: %s %s\n", block
->version
->funcs
->name
, r
?"failed":"successed");
1594 static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device
*adev
)
1598 static enum amd_ip_block_type ip_order
[] = {
1599 AMD_IP_BLOCK_TYPE_SMC
,
1600 AMD_IP_BLOCK_TYPE_PSP
,
1601 AMD_IP_BLOCK_TYPE_DCE
,
1602 AMD_IP_BLOCK_TYPE_GFX
,
1603 AMD_IP_BLOCK_TYPE_SDMA
,
1604 AMD_IP_BLOCK_TYPE_UVD
,
1605 AMD_IP_BLOCK_TYPE_VCE
1608 for (i
= 0; i
< ARRAY_SIZE(ip_order
); i
++) {
1610 struct amdgpu_ip_block
*block
;
1612 for (j
= 0; j
< adev
->num_ip_blocks
; j
++) {
1613 block
= &adev
->ip_blocks
[j
];
1615 if (block
->version
->type
!= ip_order
[i
] ||
1616 !block
->status
.valid
)
1619 r
= block
->version
->funcs
->hw_init(adev
);
1620 DRM_INFO("RE-INIT: %s %s\n", block
->version
->funcs
->name
, r
?"failed":"successed");
1627 static int amdgpu_device_ip_resume_phase1(struct amdgpu_device
*adev
)
1631 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1632 if (!adev
->ip_blocks
[i
].status
.valid
)
1634 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
||
1635 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
||
1636 adev
->ip_blocks
[i
].version
->type
==
1637 AMD_IP_BLOCK_TYPE_IH
) {
1638 r
= adev
->ip_blocks
[i
].version
->funcs
->resume(adev
);
1640 DRM_ERROR("resume of IP block <%s> failed %d\n",
1641 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1650 static int amdgpu_device_ip_resume_phase2(struct amdgpu_device
*adev
)
1654 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
1655 if (!adev
->ip_blocks
[i
].status
.valid
)
1657 if (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_COMMON
||
1658 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
||
1659 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_IH
)
1661 r
= adev
->ip_blocks
[i
].version
->funcs
->resume(adev
);
1663 DRM_ERROR("resume of IP block <%s> failed %d\n",
1664 adev
->ip_blocks
[i
].version
->funcs
->name
, r
);
1672 static int amdgpu_device_ip_resume(struct amdgpu_device
*adev
)
1676 r
= amdgpu_device_ip_resume_phase1(adev
);
1679 r
= amdgpu_device_ip_resume_phase2(adev
);
1684 static void amdgpu_device_detect_sriov_bios(struct amdgpu_device
*adev
)
1686 if (amdgpu_sriov_vf(adev
)) {
1687 if (adev
->is_atom_fw
) {
1688 if (amdgpu_atomfirmware_gpu_supports_virtualization(adev
))
1689 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
;
1691 if (amdgpu_atombios_has_gpu_virtualization_table(adev
))
1692 adev
->virt
.caps
|= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
;
1695 if (!(adev
->virt
.caps
& AMDGPU_SRIOV_CAPS_SRIOV_VBIOS
))
1696 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_NO_VBIOS
, 0, 0);
1700 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type
)
1702 switch (asic_type
) {
1703 #if defined(CONFIG_DRM_AMD_DC)
1709 case CHIP_POLARIS11
:
1710 case CHIP_POLARIS10
:
1711 case CHIP_POLARIS12
:
1714 #if defined(CONFIG_DRM_AMD_DC_PRE_VEGA)
1715 return amdgpu_dc
!= 0;
1719 return amdgpu_dc
> 0;
1721 #if defined(CONFIG_DRM_AMD_DC_DCN1_0)
1724 return amdgpu_dc
!= 0;
1732 * amdgpu_device_has_dc_support - check if dc is supported
1734 * @adev: amdgpu_device_pointer
1736 * Returns true for supported, false for not supported
1738 bool amdgpu_device_has_dc_support(struct amdgpu_device
*adev
)
1740 if (amdgpu_sriov_vf(adev
))
1743 return amdgpu_device_asic_has_dc_support(adev
->asic_type
);
1747 * amdgpu_device_init - initialize the driver
1749 * @adev: amdgpu_device pointer
1750 * @pdev: drm dev pointer
1751 * @pdev: pci dev pointer
1752 * @flags: driver flags
1754 * Initializes the driver info and hw (all asics).
1755 * Returns 0 for success or an error on failure.
1756 * Called at driver startup.
1758 int amdgpu_device_init(struct amdgpu_device
*adev
,
1759 struct drm_device
*ddev
,
1760 struct pci_dev
*pdev
,
1764 bool runtime
= false;
1767 adev
->shutdown
= false;
1768 adev
->dev
= &pdev
->dev
;
1771 adev
->flags
= flags
;
1772 adev
->asic_type
= flags
& AMD_ASIC_MASK
;
1773 adev
->usec_timeout
= AMDGPU_MAX_USEC_TIMEOUT
;
1774 adev
->mc
.gart_size
= 512 * 1024 * 1024;
1775 adev
->accel_working
= false;
1776 adev
->num_rings
= 0;
1777 adev
->mman
.buffer_funcs
= NULL
;
1778 adev
->mman
.buffer_funcs_ring
= NULL
;
1779 adev
->vm_manager
.vm_pte_funcs
= NULL
;
1780 adev
->vm_manager
.vm_pte_num_rings
= 0;
1781 adev
->gart
.gart_funcs
= NULL
;
1782 adev
->fence_context
= dma_fence_context_alloc(AMDGPU_MAX_RINGS
);
1783 bitmap_zero(adev
->gfx
.pipe_reserve_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
1785 adev
->smc_rreg
= &amdgpu_invalid_rreg
;
1786 adev
->smc_wreg
= &amdgpu_invalid_wreg
;
1787 adev
->pcie_rreg
= &amdgpu_invalid_rreg
;
1788 adev
->pcie_wreg
= &amdgpu_invalid_wreg
;
1789 adev
->pciep_rreg
= &amdgpu_invalid_rreg
;
1790 adev
->pciep_wreg
= &amdgpu_invalid_wreg
;
1791 adev
->uvd_ctx_rreg
= &amdgpu_invalid_rreg
;
1792 adev
->uvd_ctx_wreg
= &amdgpu_invalid_wreg
;
1793 adev
->didt_rreg
= &amdgpu_invalid_rreg
;
1794 adev
->didt_wreg
= &amdgpu_invalid_wreg
;
1795 adev
->gc_cac_rreg
= &amdgpu_invalid_rreg
;
1796 adev
->gc_cac_wreg
= &amdgpu_invalid_wreg
;
1797 adev
->audio_endpt_rreg
= &amdgpu_block_invalid_rreg
;
1798 adev
->audio_endpt_wreg
= &amdgpu_block_invalid_wreg
;
1800 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1801 amdgpu_asic_name
[adev
->asic_type
], pdev
->vendor
, pdev
->device
,
1802 pdev
->subsystem_vendor
, pdev
->subsystem_device
, pdev
->revision
);
1804 /* mutex initialization are all done here so we
1805 * can recall function without having locking issues */
1806 atomic_set(&adev
->irq
.ih
.lock
, 0);
1807 mutex_init(&adev
->firmware
.mutex
);
1808 mutex_init(&adev
->pm
.mutex
);
1809 mutex_init(&adev
->gfx
.gpu_clock_mutex
);
1810 mutex_init(&adev
->srbm_mutex
);
1811 mutex_init(&adev
->gfx
.pipe_reserve_mutex
);
1812 mutex_init(&adev
->grbm_idx_mutex
);
1813 mutex_init(&adev
->mn_lock
);
1814 mutex_init(&adev
->virt
.vf_errors
.lock
);
1815 hash_init(adev
->mn_hash
);
1816 mutex_init(&adev
->lock_reset
);
1818 amdgpu_device_check_arguments(adev
);
1820 spin_lock_init(&adev
->mmio_idx_lock
);
1821 spin_lock_init(&adev
->smc_idx_lock
);
1822 spin_lock_init(&adev
->pcie_idx_lock
);
1823 spin_lock_init(&adev
->uvd_ctx_idx_lock
);
1824 spin_lock_init(&adev
->didt_idx_lock
);
1825 spin_lock_init(&adev
->gc_cac_idx_lock
);
1826 spin_lock_init(&adev
->se_cac_idx_lock
);
1827 spin_lock_init(&adev
->audio_endpt_idx_lock
);
1828 spin_lock_init(&adev
->mm_stats
.lock
);
1830 INIT_LIST_HEAD(&adev
->shadow_list
);
1831 mutex_init(&adev
->shadow_list_lock
);
1833 INIT_LIST_HEAD(&adev
->ring_lru_list
);
1834 spin_lock_init(&adev
->ring_lru_list_lock
);
1836 INIT_DELAYED_WORK(&adev
->late_init_work
,
1837 amdgpu_device_ip_late_init_func_handler
);
1839 /* Registers mapping */
1840 /* TODO: block userspace mapping of io register */
1841 if (adev
->asic_type
>= CHIP_BONAIRE
) {
1842 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 5);
1843 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 5);
1845 adev
->rmmio_base
= pci_resource_start(adev
->pdev
, 2);
1846 adev
->rmmio_size
= pci_resource_len(adev
->pdev
, 2);
1849 adev
->rmmio
= ioremap(adev
->rmmio_base
, adev
->rmmio_size
);
1850 if (adev
->rmmio
== NULL
) {
1853 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev
->rmmio_base
);
1854 DRM_INFO("register mmio size: %u\n", (unsigned)adev
->rmmio_size
);
1856 /* doorbell bar mapping */
1857 amdgpu_device_doorbell_init(adev
);
1859 /* io port mapping */
1860 for (i
= 0; i
< DEVICE_COUNT_RESOURCE
; i
++) {
1861 if (pci_resource_flags(adev
->pdev
, i
) & IORESOURCE_IO
) {
1862 adev
->rio_mem_size
= pci_resource_len(adev
->pdev
, i
);
1863 adev
->rio_mem
= pci_iomap(adev
->pdev
, i
, adev
->rio_mem_size
);
1867 if (adev
->rio_mem
== NULL
)
1868 DRM_INFO("PCI I/O BAR is not found.\n");
1870 /* early init functions */
1871 r
= amdgpu_device_ip_early_init(adev
);
1875 /* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
1876 /* this will fail for cards that aren't VGA class devices, just
1878 vga_client_register(adev
->pdev
, adev
, NULL
, amdgpu_device_vga_set_decode
);
1880 if (amdgpu_device_is_px(ddev
))
1882 if (!pci_is_thunderbolt_attached(adev
->pdev
))
1883 vga_switcheroo_register_client(adev
->pdev
,
1884 &amdgpu_switcheroo_ops
, runtime
);
1886 vga_switcheroo_init_domain_pm_ops(adev
->dev
, &adev
->vga_pm_domain
);
1889 if (!amdgpu_get_bios(adev
)) {
1894 r
= amdgpu_atombios_init(adev
);
1896 dev_err(adev
->dev
, "amdgpu_atombios_init failed\n");
1897 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL
, 0, 0);
1901 /* detect if we are with an SRIOV vbios */
1902 amdgpu_device_detect_sriov_bios(adev
);
1904 /* Post card if necessary */
1905 if (amdgpu_device_need_post(adev
)) {
1907 dev_err(adev
->dev
, "no vBIOS found\n");
1911 DRM_INFO("GPU posting now...\n");
1912 r
= amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
1914 dev_err(adev
->dev
, "gpu post error!\n");
1919 if (adev
->is_atom_fw
) {
1920 /* Initialize clocks */
1921 r
= amdgpu_atomfirmware_get_clock_info(adev
);
1923 dev_err(adev
->dev
, "amdgpu_atomfirmware_get_clock_info failed\n");
1924 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL
, 0, 0);
1928 /* Initialize clocks */
1929 r
= amdgpu_atombios_get_clock_info(adev
);
1931 dev_err(adev
->dev
, "amdgpu_atombios_get_clock_info failed\n");
1932 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL
, 0, 0);
1935 /* init i2c buses */
1936 if (!amdgpu_device_has_dc_support(adev
))
1937 amdgpu_atombios_i2c_init(adev
);
1941 r
= amdgpu_fence_driver_init(adev
);
1943 dev_err(adev
->dev
, "amdgpu_fence_driver_init failed\n");
1944 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_FENCE_INIT_FAIL
, 0, 0);
1948 /* init the mode config */
1949 drm_mode_config_init(adev
->ddev
);
1951 r
= amdgpu_device_ip_init(adev
);
1953 /* failed in exclusive mode due to timeout */
1954 if (amdgpu_sriov_vf(adev
) &&
1955 !amdgpu_sriov_runtime(adev
) &&
1956 amdgpu_virt_mmio_blocked(adev
) &&
1957 !amdgpu_virt_wait_reset(adev
)) {
1958 dev_err(adev
->dev
, "VF exclusive mode timeout\n");
1959 /* Don't send request since VF is inactive. */
1960 adev
->virt
.caps
&= ~AMDGPU_SRIOV_CAPS_RUNTIME
;
1961 adev
->virt
.ops
= NULL
;
1965 dev_err(adev
->dev
, "amdgpu_device_ip_init failed\n");
1966 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL
, 0, 0);
1967 amdgpu_device_ip_fini(adev
);
1971 adev
->accel_working
= true;
1973 amdgpu_vm_check_compute_bug(adev
);
1975 /* Initialize the buffer migration limit. */
1976 if (amdgpu_moverate
>= 0)
1977 max_MBps
= amdgpu_moverate
;
1979 max_MBps
= 8; /* Allow 8 MB/s. */
1980 /* Get a log2 for easy divisions. */
1981 adev
->mm_stats
.log2_max_MBps
= ilog2(max(1u, max_MBps
));
1983 r
= amdgpu_ib_pool_init(adev
);
1985 dev_err(adev
->dev
, "IB initialization failed (%d).\n", r
);
1986 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_IB_INIT_FAIL
, 0, r
);
1990 r
= amdgpu_ib_ring_tests(adev
);
1992 DRM_ERROR("ib ring test failed (%d).\n", r
);
1994 if (amdgpu_sriov_vf(adev
))
1995 amdgpu_virt_init_data_exchange(adev
);
1997 amdgpu_fbdev_init(adev
);
1999 r
= amdgpu_pm_sysfs_init(adev
);
2001 DRM_ERROR("registering pm debugfs failed (%d).\n", r
);
2003 r
= amdgpu_debugfs_gem_init(adev
);
2005 DRM_ERROR("registering gem debugfs failed (%d).\n", r
);
2007 r
= amdgpu_debugfs_regs_init(adev
);
2009 DRM_ERROR("registering register debugfs failed (%d).\n", r
);
2011 r
= amdgpu_debugfs_firmware_init(adev
);
2013 DRM_ERROR("registering firmware debugfs failed (%d).\n", r
);
2015 r
= amdgpu_debugfs_init(adev
);
2017 DRM_ERROR("Creating debugfs files failed (%d).\n", r
);
2019 if ((amdgpu_testing
& 1)) {
2020 if (adev
->accel_working
)
2021 amdgpu_test_moves(adev
);
2023 DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
2025 if (amdgpu_benchmarking
) {
2026 if (adev
->accel_working
)
2027 amdgpu_benchmark(adev
, amdgpu_benchmarking
);
2029 DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
2032 /* enable clockgating, etc. after ib tests, etc. since some blocks require
2033 * explicit gating rather than handling it automatically.
2035 r
= amdgpu_device_ip_late_init(adev
);
2037 dev_err(adev
->dev
, "amdgpu_device_ip_late_init failed\n");
2038 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL
, 0, r
);
2045 amdgpu_vf_error_trans_all(adev
);
2047 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
2053 * amdgpu_device_fini - tear down the driver
2055 * @adev: amdgpu_device pointer
2057 * Tear down the driver info (all asics).
2058 * Called at driver shutdown.
2060 void amdgpu_device_fini(struct amdgpu_device
*adev
)
2064 DRM_INFO("amdgpu: finishing device.\n");
2065 adev
->shutdown
= true;
2066 if (adev
->mode_info
.mode_config_initialized
)
2067 drm_crtc_force_disable_all(adev
->ddev
);
2069 amdgpu_ib_pool_fini(adev
);
2070 amdgpu_fence_driver_fini(adev
);
2071 amdgpu_fbdev_fini(adev
);
2072 r
= amdgpu_device_ip_fini(adev
);
2073 if (adev
->firmware
.gpu_info_fw
) {
2074 release_firmware(adev
->firmware
.gpu_info_fw
);
2075 adev
->firmware
.gpu_info_fw
= NULL
;
2077 adev
->accel_working
= false;
2078 cancel_delayed_work_sync(&adev
->late_init_work
);
2079 /* free i2c buses */
2080 if (!amdgpu_device_has_dc_support(adev
))
2081 amdgpu_i2c_fini(adev
);
2082 amdgpu_atombios_fini(adev
);
2085 if (!pci_is_thunderbolt_attached(adev
->pdev
))
2086 vga_switcheroo_unregister_client(adev
->pdev
);
2087 if (adev
->flags
& AMD_IS_PX
)
2088 vga_switcheroo_fini_domain_pm_ops(adev
->dev
);
2089 vga_client_register(adev
->pdev
, NULL
, NULL
, NULL
);
2091 pci_iounmap(adev
->pdev
, adev
->rio_mem
);
2092 adev
->rio_mem
= NULL
;
2093 iounmap(adev
->rmmio
);
2095 amdgpu_device_doorbell_fini(adev
);
2096 amdgpu_pm_sysfs_fini(adev
);
2097 amdgpu_debugfs_regs_cleanup(adev
);
2105 * amdgpu_device_suspend - initiate device suspend
2107 * @pdev: drm dev pointer
2108 * @state: suspend state
2110 * Puts the hw in the suspend state (all asics).
2111 * Returns 0 for success or an error on failure.
2112 * Called at driver suspend.
2114 int amdgpu_device_suspend(struct drm_device
*dev
, bool suspend
, bool fbcon
)
2116 struct amdgpu_device
*adev
;
2117 struct drm_crtc
*crtc
;
2118 struct drm_connector
*connector
;
2121 if (dev
== NULL
|| dev
->dev_private
== NULL
) {
2125 adev
= dev
->dev_private
;
2127 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
2130 drm_kms_helper_poll_disable(dev
);
2132 if (!amdgpu_device_has_dc_support(adev
)) {
2133 /* turn off display hw */
2134 drm_modeset_lock_all(dev
);
2135 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2136 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_OFF
);
2138 drm_modeset_unlock_all(dev
);
2141 amdgpu_amdkfd_suspend(adev
);
2143 /* unpin the front buffers and cursors */
2144 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2145 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2146 struct amdgpu_framebuffer
*rfb
= to_amdgpu_framebuffer(crtc
->primary
->fb
);
2147 struct amdgpu_bo
*robj
;
2149 if (amdgpu_crtc
->cursor_bo
) {
2150 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
2151 r
= amdgpu_bo_reserve(aobj
, true);
2153 amdgpu_bo_unpin(aobj
);
2154 amdgpu_bo_unreserve(aobj
);
2158 if (rfb
== NULL
|| rfb
->obj
== NULL
) {
2161 robj
= gem_to_amdgpu_bo(rfb
->obj
);
2162 /* don't unpin kernel fb objects */
2163 if (!amdgpu_fbdev_robj_is_fb(adev
, robj
)) {
2164 r
= amdgpu_bo_reserve(robj
, true);
2166 amdgpu_bo_unpin(robj
);
2167 amdgpu_bo_unreserve(robj
);
2171 /* evict vram memory */
2172 amdgpu_bo_evict_vram(adev
);
2174 amdgpu_fence_driver_suspend(adev
);
2176 r
= amdgpu_device_ip_suspend(adev
);
2178 /* evict remaining vram memory
2179 * This second call to evict vram is to evict the gart page table
2182 amdgpu_bo_evict_vram(adev
);
2184 pci_save_state(dev
->pdev
);
2186 /* Shut down the device */
2187 pci_disable_device(dev
->pdev
);
2188 pci_set_power_state(dev
->pdev
, PCI_D3hot
);
2190 r
= amdgpu_asic_reset(adev
);
2192 DRM_ERROR("amdgpu asic reset failed\n");
2197 amdgpu_fbdev_set_suspend(adev
, 1);
2204 * amdgpu_device_resume - initiate device resume
2206 * @pdev: drm dev pointer
2208 * Bring the hw back to operating state (all asics).
2209 * Returns 0 for success or an error on failure.
2210 * Called at driver resume.
2212 int amdgpu_device_resume(struct drm_device
*dev
, bool resume
, bool fbcon
)
2214 struct drm_connector
*connector
;
2215 struct amdgpu_device
*adev
= dev
->dev_private
;
2216 struct drm_crtc
*crtc
;
2219 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
2226 pci_set_power_state(dev
->pdev
, PCI_D0
);
2227 pci_restore_state(dev
->pdev
);
2228 r
= pci_enable_device(dev
->pdev
);
2234 if (amdgpu_device_need_post(adev
)) {
2235 r
= amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
2237 DRM_ERROR("amdgpu asic init failed\n");
2240 r
= amdgpu_device_ip_resume(adev
);
2242 DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r
);
2245 amdgpu_fence_driver_resume(adev
);
2248 r
= amdgpu_ib_ring_tests(adev
);
2250 DRM_ERROR("ib ring test failed (%d).\n", r
);
2253 r
= amdgpu_device_ip_late_init(adev
);
2258 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
2259 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
2261 if (amdgpu_crtc
->cursor_bo
) {
2262 struct amdgpu_bo
*aobj
= gem_to_amdgpu_bo(amdgpu_crtc
->cursor_bo
);
2263 r
= amdgpu_bo_reserve(aobj
, true);
2265 r
= amdgpu_bo_pin(aobj
,
2266 AMDGPU_GEM_DOMAIN_VRAM
,
2267 &amdgpu_crtc
->cursor_addr
);
2269 DRM_ERROR("Failed to pin cursor BO (%d)\n", r
);
2270 amdgpu_bo_unreserve(aobj
);
2274 r
= amdgpu_amdkfd_resume(adev
);
2278 /* blat the mode back in */
2280 if (!amdgpu_device_has_dc_support(adev
)) {
2282 drm_helper_resume_force_mode(dev
);
2284 /* turn on display hw */
2285 drm_modeset_lock_all(dev
);
2286 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
2287 drm_helper_connector_dpms(connector
, DRM_MODE_DPMS_ON
);
2289 drm_modeset_unlock_all(dev
);
2293 drm_kms_helper_poll_enable(dev
);
2296 * Most of the connector probing functions try to acquire runtime pm
2297 * refs to ensure that the GPU is powered on when connector polling is
2298 * performed. Since we're calling this from a runtime PM callback,
2299 * trying to acquire rpm refs will cause us to deadlock.
2301 * Since we're guaranteed to be holding the rpm lock, it's safe to
2302 * temporarily disable the rpm helpers so this doesn't deadlock us.
2305 dev
->dev
->power
.disable_depth
++;
2307 if (!amdgpu_device_has_dc_support(adev
))
2308 drm_helper_hpd_irq_event(dev
);
2310 drm_kms_helper_hotplug_event(dev
);
2312 dev
->dev
->power
.disable_depth
--;
2316 amdgpu_fbdev_set_suspend(adev
, 0);
2325 static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device
*adev
)
2328 bool asic_hang
= false;
2330 if (amdgpu_sriov_vf(adev
))
2333 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2334 if (!adev
->ip_blocks
[i
].status
.valid
)
2336 if (adev
->ip_blocks
[i
].version
->funcs
->check_soft_reset
)
2337 adev
->ip_blocks
[i
].status
.hang
=
2338 adev
->ip_blocks
[i
].version
->funcs
->check_soft_reset(adev
);
2339 if (adev
->ip_blocks
[i
].status
.hang
) {
2340 DRM_INFO("IP block:%s is hung!\n", adev
->ip_blocks
[i
].version
->funcs
->name
);
2347 static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device
*adev
)
2351 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2352 if (!adev
->ip_blocks
[i
].status
.valid
)
2354 if (adev
->ip_blocks
[i
].status
.hang
&&
2355 adev
->ip_blocks
[i
].version
->funcs
->pre_soft_reset
) {
2356 r
= adev
->ip_blocks
[i
].version
->funcs
->pre_soft_reset(adev
);
2365 static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device
*adev
)
2369 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2370 if (!adev
->ip_blocks
[i
].status
.valid
)
2372 if ((adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_GMC
) ||
2373 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_SMC
) ||
2374 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_ACP
) ||
2375 (adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_DCE
) ||
2376 adev
->ip_blocks
[i
].version
->type
== AMD_IP_BLOCK_TYPE_PSP
) {
2377 if (adev
->ip_blocks
[i
].status
.hang
) {
2378 DRM_INFO("Some block need full reset!\n");
2386 static int amdgpu_device_ip_soft_reset(struct amdgpu_device
*adev
)
2390 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2391 if (!adev
->ip_blocks
[i
].status
.valid
)
2393 if (adev
->ip_blocks
[i
].status
.hang
&&
2394 adev
->ip_blocks
[i
].version
->funcs
->soft_reset
) {
2395 r
= adev
->ip_blocks
[i
].version
->funcs
->soft_reset(adev
);
2404 static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device
*adev
)
2408 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
2409 if (!adev
->ip_blocks
[i
].status
.valid
)
2411 if (adev
->ip_blocks
[i
].status
.hang
&&
2412 adev
->ip_blocks
[i
].version
->funcs
->post_soft_reset
)
2413 r
= adev
->ip_blocks
[i
].version
->funcs
->post_soft_reset(adev
);
2421 static int amdgpu_device_recover_vram_from_shadow(struct amdgpu_device
*adev
,
2422 struct amdgpu_ring
*ring
,
2423 struct amdgpu_bo
*bo
,
2424 struct dma_fence
**fence
)
2432 r
= amdgpu_bo_reserve(bo
, true);
2435 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
2436 /* if bo has been evicted, then no need to recover */
2437 if (domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
2438 r
= amdgpu_bo_validate(bo
->shadow
);
2440 DRM_ERROR("bo validate failed!\n");
2444 r
= amdgpu_bo_restore_from_shadow(adev
, ring
, bo
,
2447 DRM_ERROR("recover page table failed!\n");
2452 amdgpu_bo_unreserve(bo
);
2457 * amdgpu_device_reset - reset ASIC/GPU for bare-metal or passthrough
2459 * @adev: amdgpu device pointer
2460 * @reset_flags: output param tells caller the reset result
2462 * attempt to do soft-reset or full-reset and reinitialize Asic
2463 * return 0 means successed otherwise failed
2465 static int amdgpu_device_reset(struct amdgpu_device
*adev
,
2466 uint64_t* reset_flags
)
2468 bool need_full_reset
, vram_lost
= 0;
2471 need_full_reset
= amdgpu_device_ip_need_full_reset(adev
);
2473 if (!need_full_reset
) {
2474 amdgpu_device_ip_pre_soft_reset(adev
);
2475 r
= amdgpu_device_ip_soft_reset(adev
);
2476 amdgpu_device_ip_post_soft_reset(adev
);
2477 if (r
|| amdgpu_device_ip_check_soft_reset(adev
)) {
2478 DRM_INFO("soft reset failed, will fallback to full reset!\n");
2479 need_full_reset
= true;
2484 if (need_full_reset
) {
2485 r
= amdgpu_device_ip_suspend(adev
);
2488 r
= amdgpu_asic_reset(adev
);
2490 amdgpu_atom_asic_init(adev
->mode_info
.atom_context
);
2493 dev_info(adev
->dev
, "GPU reset succeeded, trying to resume\n");
2494 r
= amdgpu_device_ip_resume_phase1(adev
);
2498 vram_lost
= amdgpu_device_check_vram_lost(adev
);
2500 DRM_ERROR("VRAM is lost!\n");
2501 atomic_inc(&adev
->vram_lost_counter
);
2504 r
= amdgpu_gtt_mgr_recover(
2505 &adev
->mman
.bdev
.man
[TTM_PL_TT
]);
2509 r
= amdgpu_device_ip_resume_phase2(adev
);
2514 amdgpu_device_fill_reset_magic(adev
);
2520 amdgpu_irq_gpu_reset_resume_helper(adev
);
2521 r
= amdgpu_ib_ring_tests(adev
);
2523 dev_err(adev
->dev
, "ib ring test failed (%d).\n", r
);
2524 r
= amdgpu_device_ip_suspend(adev
);
2525 need_full_reset
= true;
2532 (*reset_flags
) |= AMDGPU_RESET_INFO_VRAM_LOST
;
2534 if (need_full_reset
)
2535 (*reset_flags
) |= AMDGPU_RESET_INFO_FULLRESET
;
2542 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
2544 * @adev: amdgpu device pointer
2545 * @reset_flags: output param tells caller the reset result
2547 * do VF FLR and reinitialize Asic
2548 * return 0 means successed otherwise failed
2550 static int amdgpu_device_reset_sriov(struct amdgpu_device
*adev
,
2551 uint64_t *reset_flags
,
2552 bool from_hypervisor
)
2556 if (from_hypervisor
)
2557 r
= amdgpu_virt_request_full_gpu(adev
, true);
2559 r
= amdgpu_virt_reset_gpu(adev
);
2563 /* Resume IP prior to SMC */
2564 r
= amdgpu_device_ip_reinit_early_sriov(adev
);
2568 /* we need recover gart prior to run SMC/CP/SDMA resume */
2569 amdgpu_gtt_mgr_recover(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
2571 /* now we are okay to resume SMC/CP/SDMA */
2572 r
= amdgpu_device_ip_reinit_late_sriov(adev
);
2576 amdgpu_irq_gpu_reset_resume_helper(adev
);
2577 r
= amdgpu_ib_ring_tests(adev
);
2579 dev_err(adev
->dev
, "[GPU_RESET] ib ring test failed (%d).\n", r
);
2582 /* release full control of GPU after ib test */
2583 amdgpu_virt_release_full_gpu(adev
, true);
2586 if (adev
->virt
.gim_feature
& AMDGIM_FEATURE_GIM_FLR_VRAMLOST
) {
2587 (*reset_flags
) |= AMDGPU_RESET_INFO_VRAM_LOST
;
2588 atomic_inc(&adev
->vram_lost_counter
);
2591 /* VF FLR or hotlink reset is always full-reset */
2592 (*reset_flags
) |= AMDGPU_RESET_INFO_FULLRESET
;
2599 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
2601 * @adev: amdgpu device pointer
2602 * @job: which job trigger hang
2603 * @force forces reset regardless of amdgpu_gpu_recovery
2605 * Attempt to reset the GPU if it has hung (all asics).
2606 * Returns 0 for success or an error on failure.
2608 int amdgpu_device_gpu_recover(struct amdgpu_device
*adev
,
2609 struct amdgpu_job
*job
, bool force
)
2611 struct drm_atomic_state
*state
= NULL
;
2612 uint64_t reset_flags
= 0;
2615 if (!force
&& !amdgpu_device_ip_check_soft_reset(adev
)) {
2616 DRM_INFO("No hardware hang detected. Did some blocks stall?\n");
2620 if (!force
&& (amdgpu_gpu_recovery
== 0 ||
2621 (amdgpu_gpu_recovery
== -1 && !amdgpu_sriov_vf(adev
)))) {
2622 DRM_INFO("GPU recovery disabled.\n");
2626 dev_info(adev
->dev
, "GPU reset begin!\n");
2628 mutex_lock(&adev
->lock_reset
);
2629 atomic_inc(&adev
->gpu_reset_counter
);
2630 adev
->in_gpu_reset
= 1;
2633 resched
= ttm_bo_lock_delayed_workqueue(&adev
->mman
.bdev
);
2634 /* store modesetting */
2635 if (amdgpu_device_has_dc_support(adev
))
2636 state
= drm_atomic_helper_suspend(adev
->ddev
);
2638 /* block scheduler */
2639 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2640 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2642 if (!ring
|| !ring
->sched
.thread
)
2645 /* only focus on the ring hit timeout if &job not NULL */
2646 if (job
&& job
->ring
->idx
!= i
)
2649 kthread_park(ring
->sched
.thread
);
2650 drm_sched_hw_job_reset(&ring
->sched
, &job
->base
);
2652 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
2653 amdgpu_fence_driver_force_completion(ring
);
2656 if (amdgpu_sriov_vf(adev
))
2657 r
= amdgpu_device_reset_sriov(adev
, &reset_flags
, job
? false : true);
2659 r
= amdgpu_device_reset(adev
, &reset_flags
);
2662 if (((reset_flags
& AMDGPU_RESET_INFO_FULLRESET
) && !(adev
->flags
& AMD_IS_APU
)) ||
2663 (reset_flags
& AMDGPU_RESET_INFO_VRAM_LOST
)) {
2664 struct amdgpu_ring
*ring
= adev
->mman
.buffer_funcs_ring
;
2665 struct amdgpu_bo
*bo
, *tmp
;
2666 struct dma_fence
*fence
= NULL
, *next
= NULL
;
2668 DRM_INFO("recover vram bo from shadow\n");
2669 mutex_lock(&adev
->shadow_list_lock
);
2670 list_for_each_entry_safe(bo
, tmp
, &adev
->shadow_list
, shadow_list
) {
2672 amdgpu_device_recover_vram_from_shadow(adev
, ring
, bo
, &next
);
2674 r
= dma_fence_wait(fence
, false);
2676 WARN(r
, "recovery from shadow isn't completed\n");
2681 dma_fence_put(fence
);
2684 mutex_unlock(&adev
->shadow_list_lock
);
2686 r
= dma_fence_wait(fence
, false);
2688 WARN(r
, "recovery from shadow isn't completed\n");
2690 dma_fence_put(fence
);
2693 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2694 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2696 if (!ring
|| !ring
->sched
.thread
)
2699 /* only focus on the ring hit timeout if &job not NULL */
2700 if (job
&& job
->ring
->idx
!= i
)
2703 drm_sched_job_recovery(&ring
->sched
);
2704 kthread_unpark(ring
->sched
.thread
);
2707 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
2708 struct amdgpu_ring
*ring
= adev
->rings
[i
];
2710 if (!ring
|| !ring
->sched
.thread
)
2713 /* only focus on the ring hit timeout if &job not NULL */
2714 if (job
&& job
->ring
->idx
!= i
)
2717 kthread_unpark(adev
->rings
[i
]->sched
.thread
);
2721 if (amdgpu_device_has_dc_support(adev
)) {
2722 if (drm_atomic_helper_resume(adev
->ddev
, state
))
2723 dev_info(adev
->dev
, "drm resume failed:%d\n", r
);
2725 drm_helper_resume_force_mode(adev
->ddev
);
2728 ttm_bo_unlock_delayed_workqueue(&adev
->mman
.bdev
, resched
);
2731 /* bad news, how to tell it to userspace ? */
2732 dev_info(adev
->dev
, "GPU reset(%d) failed\n", atomic_read(&adev
->gpu_reset_counter
));
2733 amdgpu_vf_error_put(adev
, AMDGIM_ERROR_VF_GPU_RESET_FAIL
, 0, r
);
2735 dev_info(adev
->dev
, "GPU reset(%d) successed!\n",atomic_read(&adev
->gpu_reset_counter
));
2738 amdgpu_vf_error_trans_all(adev
);
2739 adev
->in_gpu_reset
= 0;
2740 mutex_unlock(&adev
->lock_reset
);
2744 void amdgpu_device_get_pcie_info(struct amdgpu_device
*adev
)
2749 if (amdgpu_pcie_gen_cap
)
2750 adev
->pm
.pcie_gen_mask
= amdgpu_pcie_gen_cap
;
2752 if (amdgpu_pcie_lane_cap
)
2753 adev
->pm
.pcie_mlw_mask
= amdgpu_pcie_lane_cap
;
2755 /* covers APUs as well */
2756 if (pci_is_root_bus(adev
->pdev
->bus
)) {
2757 if (adev
->pm
.pcie_gen_mask
== 0)
2758 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2759 if (adev
->pm
.pcie_mlw_mask
== 0)
2760 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;
2764 if (adev
->pm
.pcie_gen_mask
== 0) {
2765 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
2767 adev
->pm
.pcie_gen_mask
= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1
|
2768 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2
|
2769 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3
);
2771 if (mask
& DRM_PCIE_SPEED_25
)
2772 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
;
2773 if (mask
& DRM_PCIE_SPEED_50
)
2774 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
;
2775 if (mask
& DRM_PCIE_SPEED_80
)
2776 adev
->pm
.pcie_gen_mask
|= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
;
2778 adev
->pm
.pcie_gen_mask
= AMDGPU_DEFAULT_PCIE_GEN_MASK
;
2781 if (adev
->pm
.pcie_mlw_mask
== 0) {
2782 ret
= drm_pcie_get_max_link_width(adev
->ddev
, &mask
);
2786 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32
|
2787 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2788 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2789 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2790 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2791 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2792 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2795 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
|
2796 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2797 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2798 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2799 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2800 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2803 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
|
2804 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2805 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2806 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2807 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2810 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
|
2811 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2812 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2813 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2816 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
|
2817 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2818 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2821 adev
->pm
.pcie_mlw_mask
= (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
|
2822 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
);
2825 adev
->pm
.pcie_mlw_mask
= CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
;
2831 adev
->pm
.pcie_mlw_mask
= AMDGPU_DEFAULT_PCIE_MLW_MASK
;