2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/acpi.h>
29 #include <linux/firmware.h>
30 #include <drm/amdgpu_drm.h>
32 #include "cgs_linux.h"
34 #include "amdgpu_ucode.h"
36 struct amdgpu_cgs_device
{
37 struct cgs_device base
;
38 struct amdgpu_device
*adev
;
41 #define CGS_FUNC_ADEV \
42 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev
45 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device
*cgs_device
,
46 enum cgs_gpu_mem_type type
,
47 uint64_t size
, uint64_t align
,
48 uint64_t min_offset
, uint64_t max_offset
,
55 struct amdgpu_bo
*obj
;
56 struct ttm_placement placement
;
57 struct ttm_place place
;
59 if (min_offset
> max_offset
) {
64 /* fail if the alignment is not a power of 2 */
65 if (((align
!= 1) && (align
& (align
- 1)))
66 || size
== 0 || align
== 0)
71 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
72 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
73 flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
74 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
75 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
76 if (max_offset
> adev
->mc
.real_vram_size
)
78 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
79 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
80 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
83 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
84 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
85 flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
86 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
;
87 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
88 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
90 max(min_offset
, adev
->mc
.visible_vram_size
) >> PAGE_SHIFT
;
92 min(max_offset
, adev
->mc
.real_vram_size
) >> PAGE_SHIFT
;
93 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
98 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
99 domain
= AMDGPU_GEM_DOMAIN_GTT
;
100 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
101 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
102 place
.flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_TT
;
104 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
105 flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
106 domain
= AMDGPU_GEM_DOMAIN_GTT
;
107 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
108 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
109 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_TT
|
110 TTM_PL_FLAG_UNCACHED
;
119 placement
.placement
= &place
;
120 placement
.num_placement
= 1;
121 placement
.busy_placement
= &place
;
122 placement
.num_busy_placement
= 1;
124 ret
= amdgpu_bo_create_restricted(adev
, size
, PAGE_SIZE
,
126 NULL
, &placement
, NULL
,
129 DRM_ERROR("(%d) bo create failed\n", ret
);
132 *handle
= (cgs_handle_t
)obj
;
137 static int amdgpu_cgs_free_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
139 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
142 int r
= amdgpu_bo_reserve(obj
, true);
143 if (likely(r
== 0)) {
144 amdgpu_bo_kunmap(obj
);
145 amdgpu_bo_unpin(obj
);
146 amdgpu_bo_unreserve(obj
);
148 amdgpu_bo_unref(&obj
);
154 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
,
158 u64 min_offset
, max_offset
;
159 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
161 WARN_ON_ONCE(obj
->placement
.num_placement
> 1);
163 min_offset
= obj
->placements
[0].fpfn
<< PAGE_SHIFT
;
164 max_offset
= obj
->placements
[0].lpfn
<< PAGE_SHIFT
;
166 r
= amdgpu_bo_reserve(obj
, true);
167 if (unlikely(r
!= 0))
169 r
= amdgpu_bo_pin_restricted(obj
, obj
->prefered_domains
,
170 min_offset
, max_offset
, mcaddr
);
171 amdgpu_bo_unreserve(obj
);
175 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
178 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
179 r
= amdgpu_bo_reserve(obj
, true);
180 if (unlikely(r
!= 0))
182 r
= amdgpu_bo_unpin(obj
);
183 amdgpu_bo_unreserve(obj
);
187 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
,
191 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
192 r
= amdgpu_bo_reserve(obj
, true);
193 if (unlikely(r
!= 0))
195 r
= amdgpu_bo_kmap(obj
, map
);
196 amdgpu_bo_unreserve(obj
);
200 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
203 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
204 r
= amdgpu_bo_reserve(obj
, true);
205 if (unlikely(r
!= 0))
207 amdgpu_bo_kunmap(obj
);
208 amdgpu_bo_unreserve(obj
);
212 static uint32_t amdgpu_cgs_read_register(struct cgs_device
*cgs_device
, unsigned offset
)
215 return RREG32(offset
);
218 static void amdgpu_cgs_write_register(struct cgs_device
*cgs_device
, unsigned offset
,
222 WREG32(offset
, value
);
225 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device
*cgs_device
,
226 enum cgs_ind_reg space
,
231 case CGS_IND_REG__MMIO
:
232 return RREG32_IDX(index
);
233 case CGS_IND_REG__PCIE
:
234 return RREG32_PCIE(index
);
235 case CGS_IND_REG__SMC
:
236 return RREG32_SMC(index
);
237 case CGS_IND_REG__UVD_CTX
:
238 return RREG32_UVD_CTX(index
);
239 case CGS_IND_REG__DIDT
:
240 return RREG32_DIDT(index
);
241 case CGS_IND_REG_GC_CAC
:
242 return RREG32_GC_CAC(index
);
243 case CGS_IND_REG__AUDIO_ENDPT
:
244 DRM_ERROR("audio endpt register access not implemented.\n");
247 WARN(1, "Invalid indirect register space");
251 static void amdgpu_cgs_write_ind_register(struct cgs_device
*cgs_device
,
252 enum cgs_ind_reg space
,
253 unsigned index
, uint32_t value
)
257 case CGS_IND_REG__MMIO
:
258 return WREG32_IDX(index
, value
);
259 case CGS_IND_REG__PCIE
:
260 return WREG32_PCIE(index
, value
);
261 case CGS_IND_REG__SMC
:
262 return WREG32_SMC(index
, value
);
263 case CGS_IND_REG__UVD_CTX
:
264 return WREG32_UVD_CTX(index
, value
);
265 case CGS_IND_REG__DIDT
:
266 return WREG32_DIDT(index
, value
);
267 case CGS_IND_REG_GC_CAC
:
268 return WREG32_GC_CAC(index
, value
);
269 case CGS_IND_REG__AUDIO_ENDPT
:
270 DRM_ERROR("audio endpt register access not implemented.\n");
273 WARN(1, "Invalid indirect register space");
276 static int amdgpu_cgs_get_pci_resource(struct cgs_device
*cgs_device
,
277 enum cgs_resource_type resource_type
,
280 uint64_t *resource_base
)
284 if (resource_base
== NULL
)
287 switch (resource_type
) {
288 case CGS_RESOURCE_TYPE_MMIO
:
289 if (adev
->rmmio_size
== 0)
291 if ((offset
+ size
) > adev
->rmmio_size
)
293 *resource_base
= adev
->rmmio_base
;
295 case CGS_RESOURCE_TYPE_DOORBELL
:
296 if (adev
->doorbell
.size
== 0)
298 if ((offset
+ size
) > adev
->doorbell
.size
)
300 *resource_base
= adev
->doorbell
.base
;
302 case CGS_RESOURCE_TYPE_FB
:
303 case CGS_RESOURCE_TYPE_IO
:
304 case CGS_RESOURCE_TYPE_ROM
:
310 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device
*cgs_device
,
311 unsigned table
, uint16_t *size
,
312 uint8_t *frev
, uint8_t *crev
)
317 if (amdgpu_atom_parse_data_header(
318 adev
->mode_info
.atom_context
, table
, size
,
319 frev
, crev
, &data_start
))
320 return (uint8_t*)adev
->mode_info
.atom_context
->bios
+
326 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device
*cgs_device
, unsigned table
,
327 uint8_t *frev
, uint8_t *crev
)
331 if (amdgpu_atom_parse_cmd_header(
332 adev
->mode_info
.atom_context
, table
,
339 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device
*cgs_device
, unsigned table
,
344 return amdgpu_atom_execute_table(
345 adev
->mode_info
.atom_context
, table
, args
);
348 struct cgs_irq_params
{
350 cgs_irq_source_set_func_t set
;
351 cgs_irq_handler_func_t handler
;
355 static int cgs_set_irq_state(struct amdgpu_device
*adev
,
356 struct amdgpu_irq_src
*src
,
358 enum amdgpu_interrupt_state state
)
360 struct cgs_irq_params
*irq_params
=
361 (struct cgs_irq_params
*)src
->data
;
364 if (!irq_params
->set
)
366 return irq_params
->set(irq_params
->private_data
,
372 static int cgs_process_irq(struct amdgpu_device
*adev
,
373 struct amdgpu_irq_src
*source
,
374 struct amdgpu_iv_entry
*entry
)
376 struct cgs_irq_params
*irq_params
=
377 (struct cgs_irq_params
*)source
->data
;
380 if (!irq_params
->handler
)
382 return irq_params
->handler(irq_params
->private_data
,
387 static const struct amdgpu_irq_src_funcs cgs_irq_funcs
= {
388 .set
= cgs_set_irq_state
,
389 .process
= cgs_process_irq
,
392 static int amdgpu_cgs_add_irq_source(void *cgs_device
,
396 cgs_irq_source_set_func_t set
,
397 cgs_irq_handler_func_t handler
,
402 struct cgs_irq_params
*irq_params
;
403 struct amdgpu_irq_src
*source
=
404 kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
408 kzalloc(sizeof(struct cgs_irq_params
), GFP_KERNEL
);
413 source
->num_types
= num_types
;
414 source
->funcs
= &cgs_irq_funcs
;
415 irq_params
->src_id
= src_id
;
416 irq_params
->set
= set
;
417 irq_params
->handler
= handler
;
418 irq_params
->private_data
= private_data
;
419 source
->data
= (void *)irq_params
;
420 ret
= amdgpu_irq_add_id(adev
, client_id
, src_id
, source
);
429 static int amdgpu_cgs_irq_get(void *cgs_device
, unsigned client_id
,
430 unsigned src_id
, unsigned type
)
434 if (!adev
->irq
.client
[client_id
].sources
)
437 return amdgpu_irq_get(adev
, adev
->irq
.client
[client_id
].sources
[src_id
], type
);
440 static int amdgpu_cgs_irq_put(void *cgs_device
, unsigned client_id
,
441 unsigned src_id
, unsigned type
)
445 if (!adev
->irq
.client
[client_id
].sources
)
448 return amdgpu_irq_put(adev
, adev
->irq
.client
[client_id
].sources
[src_id
], type
);
451 static int amdgpu_cgs_set_clockgating_state(struct cgs_device
*cgs_device
,
452 enum amd_ip_block_type block_type
,
453 enum amd_clockgating_state state
)
458 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
459 if (!adev
->ip_blocks
[i
].status
.valid
)
462 if (adev
->ip_blocks
[i
].version
->type
== block_type
) {
463 r
= adev
->ip_blocks
[i
].version
->funcs
->set_clockgating_state(
472 static int amdgpu_cgs_set_powergating_state(struct cgs_device
*cgs_device
,
473 enum amd_ip_block_type block_type
,
474 enum amd_powergating_state state
)
479 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
480 if (!adev
->ip_blocks
[i
].status
.valid
)
483 if (adev
->ip_blocks
[i
].version
->type
== block_type
) {
484 r
= adev
->ip_blocks
[i
].version
->funcs
->set_powergating_state(
494 static uint32_t fw_type_convert(struct cgs_device
*cgs_device
, uint32_t fw_type
)
497 enum AMDGPU_UCODE_ID result
= AMDGPU_UCODE_ID_MAXIMUM
;
500 case CGS_UCODE_ID_SDMA0
:
501 result
= AMDGPU_UCODE_ID_SDMA0
;
503 case CGS_UCODE_ID_SDMA1
:
504 result
= AMDGPU_UCODE_ID_SDMA1
;
506 case CGS_UCODE_ID_CP_CE
:
507 result
= AMDGPU_UCODE_ID_CP_CE
;
509 case CGS_UCODE_ID_CP_PFP
:
510 result
= AMDGPU_UCODE_ID_CP_PFP
;
512 case CGS_UCODE_ID_CP_ME
:
513 result
= AMDGPU_UCODE_ID_CP_ME
;
515 case CGS_UCODE_ID_CP_MEC
:
516 case CGS_UCODE_ID_CP_MEC_JT1
:
517 result
= AMDGPU_UCODE_ID_CP_MEC1
;
519 case CGS_UCODE_ID_CP_MEC_JT2
:
520 /* for VI. JT2 should be the same as JT1, because:
521 1, MEC2 and MEC1 use exactly same FW.
522 2, JT2 is not pached but JT1 is.
524 if (adev
->asic_type
>= CHIP_TOPAZ
)
525 result
= AMDGPU_UCODE_ID_CP_MEC1
;
527 result
= AMDGPU_UCODE_ID_CP_MEC2
;
529 case CGS_UCODE_ID_RLC_G
:
530 result
= AMDGPU_UCODE_ID_RLC_G
;
532 case CGS_UCODE_ID_STORAGE
:
533 result
= AMDGPU_UCODE_ID_STORAGE
;
536 DRM_ERROR("Firmware type not supported\n");
541 static int amdgpu_cgs_rel_firmware(struct cgs_device
*cgs_device
, enum cgs_ucode_id type
)
544 if ((CGS_UCODE_ID_SMU
== type
) || (CGS_UCODE_ID_SMU_SK
== type
)) {
545 release_firmware(adev
->pm
.fw
);
549 /* cannot release other firmware because they are not created by cgs */
553 static uint16_t amdgpu_get_firmware_version(struct cgs_device
*cgs_device
,
554 enum cgs_ucode_id type
)
557 uint16_t fw_version
= 0;
560 case CGS_UCODE_ID_SDMA0
:
561 fw_version
= adev
->sdma
.instance
[0].fw_version
;
563 case CGS_UCODE_ID_SDMA1
:
564 fw_version
= adev
->sdma
.instance
[1].fw_version
;
566 case CGS_UCODE_ID_CP_CE
:
567 fw_version
= adev
->gfx
.ce_fw_version
;
569 case CGS_UCODE_ID_CP_PFP
:
570 fw_version
= adev
->gfx
.pfp_fw_version
;
572 case CGS_UCODE_ID_CP_ME
:
573 fw_version
= adev
->gfx
.me_fw_version
;
575 case CGS_UCODE_ID_CP_MEC
:
576 fw_version
= adev
->gfx
.mec_fw_version
;
578 case CGS_UCODE_ID_CP_MEC_JT1
:
579 fw_version
= adev
->gfx
.mec_fw_version
;
581 case CGS_UCODE_ID_CP_MEC_JT2
:
582 fw_version
= adev
->gfx
.mec_fw_version
;
584 case CGS_UCODE_ID_RLC_G
:
585 fw_version
= adev
->gfx
.rlc_fw_version
;
587 case CGS_UCODE_ID_STORAGE
:
590 DRM_ERROR("firmware type %d do not have version\n", type
);
596 static int amdgpu_cgs_enter_safe_mode(struct cgs_device
*cgs_device
,
601 if (adev
->gfx
.rlc
.funcs
->enter_safe_mode
== NULL
||
602 adev
->gfx
.rlc
.funcs
->exit_safe_mode
== NULL
)
606 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
608 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
613 static int amdgpu_cgs_get_firmware_info(struct cgs_device
*cgs_device
,
614 enum cgs_ucode_id type
,
615 struct cgs_firmware_info
*info
)
619 if ((CGS_UCODE_ID_SMU
!= type
) && (CGS_UCODE_ID_SMU_SK
!= type
)) {
622 const struct gfx_firmware_header_v1_0
*header
;
623 enum AMDGPU_UCODE_ID id
;
624 struct amdgpu_firmware_info
*ucode
;
626 id
= fw_type_convert(cgs_device
, type
);
627 ucode
= &adev
->firmware
.ucode
[id
];
628 if (ucode
->fw
== NULL
)
631 gpu_addr
= ucode
->mc_addr
;
632 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
633 data_size
= le32_to_cpu(header
->header
.ucode_size_bytes
);
635 if ((type
== CGS_UCODE_ID_CP_MEC_JT1
) ||
636 (type
== CGS_UCODE_ID_CP_MEC_JT2
)) {
637 gpu_addr
+= ALIGN(le32_to_cpu(header
->header
.ucode_size_bytes
), PAGE_SIZE
);
638 data_size
= le32_to_cpu(header
->jt_size
) << 2;
641 info
->kptr
= ucode
->kaddr
;
642 info
->image_size
= data_size
;
643 info
->mc_addr
= gpu_addr
;
644 info
->version
= (uint16_t)le32_to_cpu(header
->header
.ucode_version
);
646 if (CGS_UCODE_ID_CP_MEC
== type
)
647 info
->image_size
= (header
->jt_offset
) << 2;
649 info
->fw_version
= amdgpu_get_firmware_version(cgs_device
, type
);
650 info
->feature_version
= (uint16_t)le32_to_cpu(header
->ucode_feature_version
);
652 char fw_name
[30] = {0};
655 uint32_t ucode_start_address
;
657 const struct smc_firmware_header_v1_0
*hdr
;
658 const struct common_firmware_header
*header
;
659 struct amdgpu_firmware_info
*ucode
= NULL
;
662 switch (adev
->asic_type
) {
664 if (((adev
->pdev
->device
== 0x6900) && (adev
->pdev
->revision
== 0x81)) ||
665 ((adev
->pdev
->device
== 0x6900) && (adev
->pdev
->revision
== 0x83)) ||
666 ((adev
->pdev
->device
== 0x6907) && (adev
->pdev
->revision
== 0x87))) {
667 info
->is_kicker
= true;
668 strcpy(fw_name
, "amdgpu/topaz_k_smc.bin");
670 strcpy(fw_name
, "amdgpu/topaz_smc.bin");
673 if (((adev
->pdev
->device
== 0x6939) && (adev
->pdev
->revision
== 0xf1)) ||
674 ((adev
->pdev
->device
== 0x6938) && (adev
->pdev
->revision
== 0xf1))) {
675 info
->is_kicker
= true;
676 strcpy(fw_name
, "amdgpu/tonga_k_smc.bin");
678 strcpy(fw_name
, "amdgpu/tonga_smc.bin");
681 strcpy(fw_name
, "amdgpu/fiji_smc.bin");
684 if (type
== CGS_UCODE_ID_SMU
) {
685 if (((adev
->pdev
->device
== 0x67ef) &&
686 ((adev
->pdev
->revision
== 0xe0) ||
687 (adev
->pdev
->revision
== 0xe2) ||
688 (adev
->pdev
->revision
== 0xe5))) ||
689 ((adev
->pdev
->device
== 0x67ff) &&
690 ((adev
->pdev
->revision
== 0xcf) ||
691 (adev
->pdev
->revision
== 0xef) ||
692 (adev
->pdev
->revision
== 0xff)))) {
693 info
->is_kicker
= true;
694 strcpy(fw_name
, "amdgpu/polaris11_k_smc.bin");
696 strcpy(fw_name
, "amdgpu/polaris11_smc.bin");
697 } else if (type
== CGS_UCODE_ID_SMU_SK
) {
698 strcpy(fw_name
, "amdgpu/polaris11_smc_sk.bin");
702 if (type
== CGS_UCODE_ID_SMU
) {
703 if ((adev
->pdev
->device
== 0x67df) &&
704 ((adev
->pdev
->revision
== 0xe0) ||
705 (adev
->pdev
->revision
== 0xe3) ||
706 (adev
->pdev
->revision
== 0xe4) ||
707 (adev
->pdev
->revision
== 0xe5) ||
708 (adev
->pdev
->revision
== 0xe7) ||
709 (adev
->pdev
->revision
== 0xef))) {
710 info
->is_kicker
= true;
711 strcpy(fw_name
, "amdgpu/polaris10_k_smc.bin");
713 strcpy(fw_name
, "amdgpu/polaris10_smc.bin");
714 } else if (type
== CGS_UCODE_ID_SMU_SK
) {
715 strcpy(fw_name
, "amdgpu/polaris10_smc_sk.bin");
719 strcpy(fw_name
, "amdgpu/polaris12_smc.bin");
722 strcpy(fw_name
, "amdgpu/vega10_smc.bin");
725 DRM_ERROR("SMC firmware not supported\n");
729 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
731 DRM_ERROR("Failed to request firmware\n");
735 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
737 DRM_ERROR("Failed to load firmware \"%s\"", fw_name
);
738 release_firmware(adev
->pm
.fw
);
743 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
744 ucode
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_SMC
];
745 ucode
->ucode_id
= AMDGPU_UCODE_ID_SMC
;
746 ucode
->fw
= adev
->pm
.fw
;
747 header
= (const struct common_firmware_header
*)ucode
->fw
->data
;
748 adev
->firmware
.fw_size
+=
749 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
753 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
754 amdgpu_ucode_print_smc_hdr(&hdr
->header
);
755 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
756 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
);
757 ucode_start_address
= le32_to_cpu(hdr
->ucode_start_addr
);
758 src
= (const uint8_t *)(adev
->pm
.fw
->data
+
759 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
761 info
->version
= adev
->pm
.fw_version
;
762 info
->image_size
= ucode_size
;
763 info
->ucode_start_address
= ucode_start_address
;
764 info
->kptr
= (void *)src
;
769 static int amdgpu_cgs_is_virtualization_enabled(void *cgs_device
)
772 return amdgpu_sriov_vf(adev
);
775 static int amdgpu_cgs_query_system_info(struct cgs_device
*cgs_device
,
776 struct cgs_system_info
*sys_info
)
780 if (NULL
== sys_info
)
783 if (sizeof(struct cgs_system_info
) != sys_info
->size
)
786 switch (sys_info
->info_id
) {
787 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID
:
788 sys_info
->value
= adev
->pdev
->devfn
| (adev
->pdev
->bus
->number
<< 8);
790 case CGS_SYSTEM_INFO_PCIE_GEN_INFO
:
791 sys_info
->value
= adev
->pm
.pcie_gen_mask
;
793 case CGS_SYSTEM_INFO_PCIE_MLW
:
794 sys_info
->value
= adev
->pm
.pcie_mlw_mask
;
796 case CGS_SYSTEM_INFO_PCIE_DEV
:
797 sys_info
->value
= adev
->pdev
->device
;
799 case CGS_SYSTEM_INFO_PCIE_REV
:
800 sys_info
->value
= adev
->pdev
->revision
;
802 case CGS_SYSTEM_INFO_CG_FLAGS
:
803 sys_info
->value
= adev
->cg_flags
;
805 case CGS_SYSTEM_INFO_PG_FLAGS
:
806 sys_info
->value
= adev
->pg_flags
;
808 case CGS_SYSTEM_INFO_GFX_CU_INFO
:
809 sys_info
->value
= adev
->gfx
.cu_info
.number
;
811 case CGS_SYSTEM_INFO_GFX_SE_INFO
:
812 sys_info
->value
= adev
->gfx
.config
.max_shader_engines
;
814 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID
:
815 sys_info
->value
= adev
->pdev
->subsystem_device
;
817 case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID
:
818 sys_info
->value
= adev
->pdev
->subsystem_vendor
;
827 static int amdgpu_cgs_get_active_displays_info(struct cgs_device
*cgs_device
,
828 struct cgs_display_info
*info
)
831 struct amdgpu_crtc
*amdgpu_crtc
;
832 struct drm_device
*ddev
= adev
->ddev
;
833 struct drm_crtc
*crtc
;
834 uint32_t line_time_us
, vblank_lines
;
835 struct cgs_mode_info
*mode_info
;
840 mode_info
= info
->mode_info
;
842 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
843 list_for_each_entry(crtc
,
844 &ddev
->mode_config
.crtc_list
, head
) {
845 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
847 info
->active_display_mask
|= (1 << amdgpu_crtc
->crtc_id
);
848 info
->display_count
++;
850 if (mode_info
!= NULL
&&
851 crtc
->enabled
&& amdgpu_crtc
->enabled
&&
852 amdgpu_crtc
->hw_mode
.clock
) {
853 line_time_us
= (amdgpu_crtc
->hw_mode
.crtc_htotal
* 1000) /
854 amdgpu_crtc
->hw_mode
.clock
;
855 vblank_lines
= amdgpu_crtc
->hw_mode
.crtc_vblank_end
-
856 amdgpu_crtc
->hw_mode
.crtc_vdisplay
+
857 (amdgpu_crtc
->v_border
* 2);
858 mode_info
->vblank_time_us
= vblank_lines
* line_time_us
;
859 mode_info
->refresh_rate
= drm_mode_vrefresh(&amdgpu_crtc
->hw_mode
);
860 mode_info
->ref_clock
= adev
->clock
.spll
.reference_freq
;
870 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device
*cgs_device
, bool enabled
)
874 adev
->pm
.dpm_enabled
= enabled
;
879 /** \brief evaluate acpi namespace object, handle or pathname must be valid
881 * \param info input/output arguments for the control method
885 #if defined(CONFIG_ACPI)
886 static int amdgpu_cgs_acpi_eval_object(struct cgs_device
*cgs_device
,
887 struct cgs_acpi_method_info
*info
)
891 struct acpi_object_list input
;
892 struct acpi_buffer output
= { ACPI_ALLOCATE_BUFFER
, NULL
};
893 union acpi_object
*params
, *obj
;
894 uint8_t name
[5] = {'\0'};
895 struct cgs_acpi_method_argument
*argument
;
900 handle
= ACPI_HANDLE(&adev
->pdev
->dev
);
904 memset(&input
, 0, sizeof(struct acpi_object_list
));
906 /* validate input info */
907 if (info
->size
!= sizeof(struct cgs_acpi_method_info
))
910 input
.count
= info
->input_count
;
911 if (info
->input_count
> 0) {
912 if (info
->pinput_argument
== NULL
)
914 argument
= info
->pinput_argument
;
915 for (i
= 0; i
< info
->input_count
; i
++) {
916 if (((argument
->type
== ACPI_TYPE_STRING
) ||
917 (argument
->type
== ACPI_TYPE_BUFFER
)) &&
918 (argument
->pointer
== NULL
))
924 if (info
->output_count
> 0) {
925 if (info
->poutput_argument
== NULL
)
927 argument
= info
->poutput_argument
;
928 for (i
= 0; i
< info
->output_count
; i
++) {
929 if (((argument
->type
== ACPI_TYPE_STRING
) ||
930 (argument
->type
== ACPI_TYPE_BUFFER
))
931 && (argument
->pointer
== NULL
))
937 /* The path name passed to acpi_evaluate_object should be null terminated */
938 if ((info
->field
& CGS_ACPI_FIELD_METHOD_NAME
) != 0) {
939 strncpy(name
, (char *)&(info
->name
), sizeof(uint32_t));
943 /* parse input parameters */
944 if (input
.count
> 0) {
945 input
.pointer
= params
=
946 kzalloc(sizeof(union acpi_object
) * input
.count
, GFP_KERNEL
);
950 argument
= info
->pinput_argument
;
952 for (i
= 0; i
< input
.count
; i
++) {
953 params
->type
= argument
->type
;
954 switch (params
->type
) {
955 case ACPI_TYPE_INTEGER
:
956 params
->integer
.value
= argument
->value
;
958 case ACPI_TYPE_STRING
:
959 params
->string
.length
= argument
->data_length
;
960 params
->string
.pointer
= argument
->pointer
;
962 case ACPI_TYPE_BUFFER
:
963 params
->buffer
.length
= argument
->data_length
;
964 params
->buffer
.pointer
= argument
->pointer
;
974 /* parse output info */
975 count
= info
->output_count
;
976 argument
= info
->poutput_argument
;
978 /* evaluate the acpi method */
979 status
= acpi_evaluate_object(handle
, name
, &input
, &output
);
981 if (ACPI_FAILURE(status
)) {
986 /* return the output info */
987 obj
= output
.pointer
;
990 if ((obj
->type
!= ACPI_TYPE_PACKAGE
) ||
991 (obj
->package
.count
!= count
)) {
995 params
= obj
->package
.elements
;
999 if (params
== NULL
) {
1004 for (i
= 0; i
< count
; i
++) {
1005 if (argument
->type
!= params
->type
) {
1009 switch (params
->type
) {
1010 case ACPI_TYPE_INTEGER
:
1011 argument
->value
= params
->integer
.value
;
1013 case ACPI_TYPE_STRING
:
1014 if ((params
->string
.length
!= argument
->data_length
) ||
1015 (params
->string
.pointer
== NULL
)) {
1019 strncpy(argument
->pointer
,
1020 params
->string
.pointer
,
1021 params
->string
.length
);
1023 case ACPI_TYPE_BUFFER
:
1024 if (params
->buffer
.pointer
== NULL
) {
1028 memcpy(argument
->pointer
,
1029 params
->buffer
.pointer
,
1030 argument
->data_length
);
1043 kfree((void *)input
.pointer
);
1047 static int amdgpu_cgs_acpi_eval_object(struct cgs_device
*cgs_device
,
1048 struct cgs_acpi_method_info
*info
)
1054 static int amdgpu_cgs_call_acpi_method(struct cgs_device
*cgs_device
,
1055 uint32_t acpi_method
,
1056 uint32_t acpi_function
,
1057 void *pinput
, void *poutput
,
1058 uint32_t output_count
,
1059 uint32_t input_size
,
1060 uint32_t output_size
)
1062 struct cgs_acpi_method_argument acpi_input
[2] = { {0}, {0} };
1063 struct cgs_acpi_method_argument acpi_output
= {0};
1064 struct cgs_acpi_method_info info
= {0};
1066 acpi_input
[0].type
= CGS_ACPI_TYPE_INTEGER
;
1067 acpi_input
[0].data_length
= sizeof(uint32_t);
1068 acpi_input
[0].value
= acpi_function
;
1070 acpi_input
[1].type
= CGS_ACPI_TYPE_BUFFER
;
1071 acpi_input
[1].data_length
= input_size
;
1072 acpi_input
[1].pointer
= pinput
;
1074 acpi_output
.type
= CGS_ACPI_TYPE_BUFFER
;
1075 acpi_output
.data_length
= output_size
;
1076 acpi_output
.pointer
= poutput
;
1078 info
.size
= sizeof(struct cgs_acpi_method_info
);
1079 info
.field
= CGS_ACPI_FIELD_METHOD_NAME
| CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT
;
1080 info
.input_count
= 2;
1081 info
.name
= acpi_method
;
1082 info
.pinput_argument
= acpi_input
;
1083 info
.output_count
= output_count
;
1084 info
.poutput_argument
= &acpi_output
;
1086 return amdgpu_cgs_acpi_eval_object(cgs_device
, &info
);
1089 static const struct cgs_ops amdgpu_cgs_ops
= {
1090 .alloc_gpu_mem
= amdgpu_cgs_alloc_gpu_mem
,
1091 .free_gpu_mem
= amdgpu_cgs_free_gpu_mem
,
1092 .gmap_gpu_mem
= amdgpu_cgs_gmap_gpu_mem
,
1093 .gunmap_gpu_mem
= amdgpu_cgs_gunmap_gpu_mem
,
1094 .kmap_gpu_mem
= amdgpu_cgs_kmap_gpu_mem
,
1095 .kunmap_gpu_mem
= amdgpu_cgs_kunmap_gpu_mem
,
1096 .read_register
= amdgpu_cgs_read_register
,
1097 .write_register
= amdgpu_cgs_write_register
,
1098 .read_ind_register
= amdgpu_cgs_read_ind_register
,
1099 .write_ind_register
= amdgpu_cgs_write_ind_register
,
1100 .get_pci_resource
= amdgpu_cgs_get_pci_resource
,
1101 .atom_get_data_table
= amdgpu_cgs_atom_get_data_table
,
1102 .atom_get_cmd_table_revs
= amdgpu_cgs_atom_get_cmd_table_revs
,
1103 .atom_exec_cmd_table
= amdgpu_cgs_atom_exec_cmd_table
,
1104 .get_firmware_info
= amdgpu_cgs_get_firmware_info
,
1105 .rel_firmware
= amdgpu_cgs_rel_firmware
,
1106 .set_powergating_state
= amdgpu_cgs_set_powergating_state
,
1107 .set_clockgating_state
= amdgpu_cgs_set_clockgating_state
,
1108 .get_active_displays_info
= amdgpu_cgs_get_active_displays_info
,
1109 .notify_dpm_enabled
= amdgpu_cgs_notify_dpm_enabled
,
1110 .call_acpi_method
= amdgpu_cgs_call_acpi_method
,
1111 .query_system_info
= amdgpu_cgs_query_system_info
,
1112 .is_virtualization_enabled
= amdgpu_cgs_is_virtualization_enabled
,
1113 .enter_safe_mode
= amdgpu_cgs_enter_safe_mode
,
1116 static const struct cgs_os_ops amdgpu_cgs_os_ops
= {
1117 .add_irq_source
= amdgpu_cgs_add_irq_source
,
1118 .irq_get
= amdgpu_cgs_irq_get
,
1119 .irq_put
= amdgpu_cgs_irq_put
1122 struct cgs_device
*amdgpu_cgs_create_device(struct amdgpu_device
*adev
)
1124 struct amdgpu_cgs_device
*cgs_device
=
1125 kmalloc(sizeof(*cgs_device
), GFP_KERNEL
);
1128 DRM_ERROR("Couldn't allocate CGS device structure\n");
1132 cgs_device
->base
.ops
= &amdgpu_cgs_ops
;
1133 cgs_device
->base
.os_ops
= &amdgpu_cgs_os_ops
;
1134 cgs_device
->adev
= adev
;
1136 return (struct cgs_device
*)cgs_device
;
1139 void amdgpu_cgs_destroy_device(struct cgs_device
*cgs_device
)