2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
28 #include <linux/firmware.h>
29 #include <drm/amdgpu_drm.h>
31 #include "cgs_linux.h"
33 #include "amdgpu_ucode.h"
36 struct amdgpu_cgs_device
{
37 struct cgs_device base
;
38 struct amdgpu_device
*adev
;
41 #define CGS_FUNC_ADEV \
42 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev
45 static int amdgpu_cgs_gpu_mem_info(void *cgs_device
, enum cgs_gpu_mem_type type
,
46 uint64_t *mc_start
, uint64_t *mc_size
,
51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
52 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
54 *mc_size
= adev
->mc
.visible_vram_size
;
55 *mem_size
= adev
->mc
.visible_vram_size
- adev
->vram_pin_size
;
57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
59 *mc_start
= adev
->mc
.visible_vram_size
;
60 *mc_size
= adev
->mc
.real_vram_size
- adev
->mc
.visible_vram_size
;
63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
65 *mc_start
= adev
->mc
.gtt_start
;
66 *mc_size
= adev
->mc
.gtt_size
;
67 *mem_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
76 static int amdgpu_cgs_gmap_kmem(void *cgs_device
, void *kmem
,
78 uint64_t min_offset
, uint64_t max_offset
,
79 cgs_handle_t
*kmem_handle
, uint64_t *mcaddr
)
84 struct page
*kmem_page
= vmalloc_to_page(kmem
);
85 int npages
= ALIGN(size
, PAGE_SIZE
) >> PAGE_SHIFT
;
87 struct sg_table
*sg
= drm_prime_pages_to_sg(&kmem_page
, npages
);
88 ret
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, false,
89 AMDGPU_GEM_DOMAIN_GTT
, 0, sg
, NULL
, &bo
);
92 ret
= amdgpu_bo_reserve(bo
, false);
93 if (unlikely(ret
!= 0))
96 /* pin buffer into GTT */
97 ret
= amdgpu_bo_pin_restricted(bo
, AMDGPU_GEM_DOMAIN_GTT
,
98 min_offset
, max_offset
, mcaddr
);
99 amdgpu_bo_unreserve(bo
);
101 *kmem_handle
= (cgs_handle_t
)bo
;
105 static int amdgpu_cgs_gunmap_kmem(void *cgs_device
, cgs_handle_t kmem_handle
)
107 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)kmem_handle
;
110 int r
= amdgpu_bo_reserve(obj
, false);
111 if (likely(r
== 0)) {
112 amdgpu_bo_unpin(obj
);
113 amdgpu_bo_unreserve(obj
);
115 amdgpu_bo_unref(&obj
);
121 static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device
,
122 enum cgs_gpu_mem_type type
,
123 uint64_t size
, uint64_t align
,
124 uint64_t min_offset
, uint64_t max_offset
,
125 cgs_handle_t
*handle
)
131 struct amdgpu_bo
*obj
;
132 struct ttm_placement placement
;
133 struct ttm_place place
;
135 if (min_offset
> max_offset
) {
140 /* fail if the alignment is not a power of 2 */
141 if (((align
!= 1) && (align
& (align
- 1)))
142 || size
== 0 || align
== 0)
147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
148 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
149 flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
150 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
151 if (max_offset
> adev
->mc
.real_vram_size
)
153 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
154 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
155 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
160 flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
161 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
162 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
164 max(min_offset
, adev
->mc
.visible_vram_size
) >> PAGE_SHIFT
;
166 min(max_offset
, adev
->mc
.real_vram_size
) >> PAGE_SHIFT
;
167 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
173 domain
= AMDGPU_GEM_DOMAIN_GTT
;
174 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
175 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
176 place
.flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_TT
;
178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
179 flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
180 domain
= AMDGPU_GEM_DOMAIN_GTT
;
181 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
182 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
183 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_TT
|
184 TTM_PL_FLAG_UNCACHED
;
193 placement
.placement
= &place
;
194 placement
.num_placement
= 1;
195 placement
.busy_placement
= &place
;
196 placement
.num_busy_placement
= 1;
198 ret
= amdgpu_bo_create_restricted(adev
, size
, PAGE_SIZE
,
200 NULL
, &placement
, NULL
,
203 DRM_ERROR("(%d) bo create failed\n", ret
);
206 *handle
= (cgs_handle_t
)obj
;
211 static int amdgpu_cgs_free_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
213 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
216 int r
= amdgpu_bo_reserve(obj
, false);
217 if (likely(r
== 0)) {
218 amdgpu_bo_kunmap(obj
);
219 amdgpu_bo_unpin(obj
);
220 amdgpu_bo_unreserve(obj
);
222 amdgpu_bo_unref(&obj
);
228 static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
,
232 u64 min_offset
, max_offset
;
233 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
235 WARN_ON_ONCE(obj
->placement
.num_placement
> 1);
237 min_offset
= obj
->placements
[0].fpfn
<< PAGE_SHIFT
;
238 max_offset
= obj
->placements
[0].lpfn
<< PAGE_SHIFT
;
240 r
= amdgpu_bo_reserve(obj
, false);
241 if (unlikely(r
!= 0))
243 r
= amdgpu_bo_pin_restricted(obj
, AMDGPU_GEM_DOMAIN_GTT
,
244 min_offset
, max_offset
, mcaddr
);
245 amdgpu_bo_unreserve(obj
);
249 static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
252 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
253 r
= amdgpu_bo_reserve(obj
, false);
254 if (unlikely(r
!= 0))
256 r
= amdgpu_bo_unpin(obj
);
257 amdgpu_bo_unreserve(obj
);
261 static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
,
265 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
266 r
= amdgpu_bo_reserve(obj
, false);
267 if (unlikely(r
!= 0))
269 r
= amdgpu_bo_kmap(obj
, map
);
270 amdgpu_bo_unreserve(obj
);
274 static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device
, cgs_handle_t handle
)
277 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
278 r
= amdgpu_bo_reserve(obj
, false);
279 if (unlikely(r
!= 0))
281 amdgpu_bo_kunmap(obj
);
282 amdgpu_bo_unreserve(obj
);
286 static uint32_t amdgpu_cgs_read_register(void *cgs_device
, unsigned offset
)
289 return RREG32(offset
);
292 static void amdgpu_cgs_write_register(void *cgs_device
, unsigned offset
,
296 WREG32(offset
, value
);
299 static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device
,
300 enum cgs_ind_reg space
,
305 case CGS_IND_REG__MMIO
:
306 return RREG32_IDX(index
);
307 case CGS_IND_REG__PCIE
:
308 return RREG32_PCIE(index
);
309 case CGS_IND_REG__SMC
:
310 return RREG32_SMC(index
);
311 case CGS_IND_REG__UVD_CTX
:
312 return RREG32_UVD_CTX(index
);
313 case CGS_IND_REG__DIDT
:
314 return RREG32_DIDT(index
);
315 case CGS_IND_REG__AUDIO_ENDPT
:
316 DRM_ERROR("audio endpt register access not implemented.\n");
319 WARN(1, "Invalid indirect register space");
323 static void amdgpu_cgs_write_ind_register(void *cgs_device
,
324 enum cgs_ind_reg space
,
325 unsigned index
, uint32_t value
)
329 case CGS_IND_REG__MMIO
:
330 return WREG32_IDX(index
, value
);
331 case CGS_IND_REG__PCIE
:
332 return WREG32_PCIE(index
, value
);
333 case CGS_IND_REG__SMC
:
334 return WREG32_SMC(index
, value
);
335 case CGS_IND_REG__UVD_CTX
:
336 return WREG32_UVD_CTX(index
, value
);
337 case CGS_IND_REG__DIDT
:
338 return WREG32_DIDT(index
, value
);
339 case CGS_IND_REG__AUDIO_ENDPT
:
340 DRM_ERROR("audio endpt register access not implemented.\n");
343 WARN(1, "Invalid indirect register space");
346 static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device
, unsigned addr
)
350 int ret
= pci_read_config_byte(adev
->pdev
, addr
, &val
);
351 if (WARN(ret
, "pci_read_config_byte error"))
356 static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device
, unsigned addr
)
360 int ret
= pci_read_config_word(adev
->pdev
, addr
, &val
);
361 if (WARN(ret
, "pci_read_config_word error"))
366 static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device
,
371 int ret
= pci_read_config_dword(adev
->pdev
, addr
, &val
);
372 if (WARN(ret
, "pci_read_config_dword error"))
377 static void amdgpu_cgs_write_pci_config_byte(void *cgs_device
, unsigned addr
,
381 int ret
= pci_write_config_byte(adev
->pdev
, addr
, value
);
382 WARN(ret
, "pci_write_config_byte error");
385 static void amdgpu_cgs_write_pci_config_word(void *cgs_device
, unsigned addr
,
389 int ret
= pci_write_config_word(adev
->pdev
, addr
, value
);
390 WARN(ret
, "pci_write_config_word error");
393 static void amdgpu_cgs_write_pci_config_dword(void *cgs_device
, unsigned addr
,
397 int ret
= pci_write_config_dword(adev
->pdev
, addr
, value
);
398 WARN(ret
, "pci_write_config_dword error");
401 static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device
,
402 unsigned table
, uint16_t *size
,
403 uint8_t *frev
, uint8_t *crev
)
408 if (amdgpu_atom_parse_data_header(
409 adev
->mode_info
.atom_context
, table
, size
,
410 frev
, crev
, &data_start
))
411 return (uint8_t*)adev
->mode_info
.atom_context
->bios
+
417 static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device
, unsigned table
,
418 uint8_t *frev
, uint8_t *crev
)
422 if (amdgpu_atom_parse_cmd_header(
423 adev
->mode_info
.atom_context
, table
,
430 static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device
, unsigned table
,
435 return amdgpu_atom_execute_table(
436 adev
->mode_info
.atom_context
, table
, args
);
439 static int amdgpu_cgs_create_pm_request(void *cgs_device
, cgs_handle_t
*request
)
445 static int amdgpu_cgs_destroy_pm_request(void *cgs_device
, cgs_handle_t request
)
451 static int amdgpu_cgs_set_pm_request(void *cgs_device
, cgs_handle_t request
,
458 static int amdgpu_cgs_pm_request_clock(void *cgs_device
, cgs_handle_t request
,
459 enum cgs_clock clock
, unsigned freq
)
465 static int amdgpu_cgs_pm_request_engine(void *cgs_device
, cgs_handle_t request
,
466 enum cgs_engine engine
, int powered
)
474 static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device
,
475 enum cgs_clock clock
,
476 struct cgs_clock_limits
*limits
)
482 static int amdgpu_cgs_set_camera_voltages(void *cgs_device
, uint32_t mask
,
483 const uint32_t *voltages
)
485 DRM_ERROR("not implemented");
489 struct cgs_irq_params
{
491 cgs_irq_source_set_func_t set
;
492 cgs_irq_handler_func_t handler
;
496 static int cgs_set_irq_state(struct amdgpu_device
*adev
,
497 struct amdgpu_irq_src
*src
,
499 enum amdgpu_interrupt_state state
)
501 struct cgs_irq_params
*irq_params
=
502 (struct cgs_irq_params
*)src
->data
;
505 if (!irq_params
->set
)
507 return irq_params
->set(irq_params
->private_data
,
513 static int cgs_process_irq(struct amdgpu_device
*adev
,
514 struct amdgpu_irq_src
*source
,
515 struct amdgpu_iv_entry
*entry
)
517 struct cgs_irq_params
*irq_params
=
518 (struct cgs_irq_params
*)source
->data
;
521 if (!irq_params
->handler
)
523 return irq_params
->handler(irq_params
->private_data
,
528 static const struct amdgpu_irq_src_funcs cgs_irq_funcs
= {
529 .set
= cgs_set_irq_state
,
530 .process
= cgs_process_irq
,
533 static int amdgpu_cgs_add_irq_source(void *cgs_device
, unsigned src_id
,
535 cgs_irq_source_set_func_t set
,
536 cgs_irq_handler_func_t handler
,
541 struct cgs_irq_params
*irq_params
;
542 struct amdgpu_irq_src
*source
=
543 kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
547 kzalloc(sizeof(struct cgs_irq_params
), GFP_KERNEL
);
552 source
->num_types
= num_types
;
553 source
->funcs
= &cgs_irq_funcs
;
554 irq_params
->src_id
= src_id
;
555 irq_params
->set
= set
;
556 irq_params
->handler
= handler
;
557 irq_params
->private_data
= private_data
;
558 source
->data
= (void *)irq_params
;
559 ret
= amdgpu_irq_add_id(adev
, src_id
, source
);
568 static int amdgpu_cgs_irq_get(void *cgs_device
, unsigned src_id
, unsigned type
)
571 return amdgpu_irq_get(adev
, adev
->irq
.sources
[src_id
], type
);
574 static int amdgpu_cgs_irq_put(void *cgs_device
, unsigned src_id
, unsigned type
)
577 return amdgpu_irq_put(adev
, adev
->irq
.sources
[src_id
], type
);
580 int amdgpu_cgs_set_clockgating_state(void *cgs_device
,
581 enum amd_ip_block_type block_type
,
582 enum amd_clockgating_state state
)
587 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
588 if (!adev
->ip_block_status
[i
].valid
)
591 if (adev
->ip_blocks
[i
].type
== block_type
) {
592 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state(
601 int amdgpu_cgs_set_powergating_state(void *cgs_device
,
602 enum amd_ip_block_type block_type
,
603 enum amd_powergating_state state
)
608 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
609 if (!adev
->ip_block_status
[i
].valid
)
612 if (adev
->ip_blocks
[i
].type
== block_type
) {
613 r
= adev
->ip_blocks
[i
].funcs
->set_powergating_state(
623 static uint32_t fw_type_convert(void *cgs_device
, uint32_t fw_type
)
626 enum AMDGPU_UCODE_ID result
= AMDGPU_UCODE_ID_MAXIMUM
;
629 case CGS_UCODE_ID_SDMA0
:
630 result
= AMDGPU_UCODE_ID_SDMA0
;
632 case CGS_UCODE_ID_SDMA1
:
633 result
= AMDGPU_UCODE_ID_SDMA1
;
635 case CGS_UCODE_ID_CP_CE
:
636 result
= AMDGPU_UCODE_ID_CP_CE
;
638 case CGS_UCODE_ID_CP_PFP
:
639 result
= AMDGPU_UCODE_ID_CP_PFP
;
641 case CGS_UCODE_ID_CP_ME
:
642 result
= AMDGPU_UCODE_ID_CP_ME
;
644 case CGS_UCODE_ID_CP_MEC
:
645 case CGS_UCODE_ID_CP_MEC_JT1
:
646 result
= AMDGPU_UCODE_ID_CP_MEC1
;
648 case CGS_UCODE_ID_CP_MEC_JT2
:
649 if (adev
->asic_type
== CHIP_TONGA
)
650 result
= AMDGPU_UCODE_ID_CP_MEC2
;
651 else if (adev
->asic_type
== CHIP_CARRIZO
)
652 result
= AMDGPU_UCODE_ID_CP_MEC1
;
654 case CGS_UCODE_ID_RLC_G
:
655 result
= AMDGPU_UCODE_ID_RLC_G
;
658 DRM_ERROR("Firmware type not supported\n");
663 static int amdgpu_cgs_get_firmware_info(void *cgs_device
,
664 enum cgs_ucode_id type
,
665 struct cgs_firmware_info
*info
)
669 if (CGS_UCODE_ID_SMU
!= type
) {
672 const struct gfx_firmware_header_v1_0
*header
;
673 enum AMDGPU_UCODE_ID id
;
674 struct amdgpu_firmware_info
*ucode
;
676 id
= fw_type_convert(cgs_device
, type
);
677 ucode
= &adev
->firmware
.ucode
[id
];
678 if (ucode
->fw
== NULL
)
681 gpu_addr
= ucode
->mc_addr
;
682 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
683 data_size
= le32_to_cpu(header
->header
.ucode_size_bytes
);
685 if ((type
== CGS_UCODE_ID_CP_MEC_JT1
) ||
686 (type
== CGS_UCODE_ID_CP_MEC_JT2
)) {
687 gpu_addr
+= le32_to_cpu(header
->jt_offset
) << 2;
688 data_size
= le32_to_cpu(header
->jt_size
) << 2;
690 info
->mc_addr
= gpu_addr
;
691 info
->image_size
= data_size
;
692 info
->version
= (uint16_t)le32_to_cpu(header
->header
.ucode_version
);
693 info
->feature_version
= (uint16_t)le32_to_cpu(header
->ucode_feature_version
);
695 char fw_name
[30] = {0};
698 uint32_t ucode_start_address
;
700 const struct smc_firmware_header_v1_0
*hdr
;
702 switch (adev
->asic_type
) {
704 strcpy(fw_name
, "amdgpu/tonga_smc.bin");
707 DRM_ERROR("SMC firmware not supported\n");
711 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
713 DRM_ERROR("Failed to request firmware\n");
717 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
719 DRM_ERROR("Failed to load firmware \"%s\"", fw_name
);
720 release_firmware(adev
->pm
.fw
);
725 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
726 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
727 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
);
728 ucode_start_address
= le32_to_cpu(hdr
->ucode_start_addr
);
729 src
= (const uint8_t *)(adev
->pm
.fw
->data
+
730 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
732 info
->version
= adev
->pm
.fw_version
;
733 info
->image_size
= ucode_size
;
734 info
->kptr
= (void *)src
;
739 static const struct cgs_ops amdgpu_cgs_ops
= {
740 amdgpu_cgs_gpu_mem_info
,
741 amdgpu_cgs_gmap_kmem
,
742 amdgpu_cgs_gunmap_kmem
,
743 amdgpu_cgs_alloc_gpu_mem
,
744 amdgpu_cgs_free_gpu_mem
,
745 amdgpu_cgs_gmap_gpu_mem
,
746 amdgpu_cgs_gunmap_gpu_mem
,
747 amdgpu_cgs_kmap_gpu_mem
,
748 amdgpu_cgs_kunmap_gpu_mem
,
749 amdgpu_cgs_read_register
,
750 amdgpu_cgs_write_register
,
751 amdgpu_cgs_read_ind_register
,
752 amdgpu_cgs_write_ind_register
,
753 amdgpu_cgs_read_pci_config_byte
,
754 amdgpu_cgs_read_pci_config_word
,
755 amdgpu_cgs_read_pci_config_dword
,
756 amdgpu_cgs_write_pci_config_byte
,
757 amdgpu_cgs_write_pci_config_word
,
758 amdgpu_cgs_write_pci_config_dword
,
759 amdgpu_cgs_atom_get_data_table
,
760 amdgpu_cgs_atom_get_cmd_table_revs
,
761 amdgpu_cgs_atom_exec_cmd_table
,
762 amdgpu_cgs_create_pm_request
,
763 amdgpu_cgs_destroy_pm_request
,
764 amdgpu_cgs_set_pm_request
,
765 amdgpu_cgs_pm_request_clock
,
766 amdgpu_cgs_pm_request_engine
,
767 amdgpu_cgs_pm_query_clock_limits
,
768 amdgpu_cgs_set_camera_voltages
,
769 amdgpu_cgs_get_firmware_info
,
770 amdgpu_cgs_set_powergating_state
,
771 amdgpu_cgs_set_clockgating_state
774 static const struct cgs_os_ops amdgpu_cgs_os_ops
= {
775 amdgpu_cgs_add_irq_source
,
780 void *amdgpu_cgs_create_device(struct amdgpu_device
*adev
)
782 struct amdgpu_cgs_device
*cgs_device
=
783 kmalloc(sizeof(*cgs_device
), GFP_KERNEL
);
786 DRM_ERROR("Couldn't allocate CGS device structure\n");
790 cgs_device
->base
.ops
= &amdgpu_cgs_ops
;
791 cgs_device
->base
.os_ops
= &amdgpu_cgs_os_ops
;
792 cgs_device
->adev
= adev
;
797 void amdgpu_cgs_destroy_device(void *cgs_device
)