2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_sched.h"
32 #include "amdgpu_uvd.h"
33 #include "amdgpu_vce.h"
36 #include <linux/vga_switcheroo.h>
37 #include <linux/slab.h>
38 #include <linux/pm_runtime.h>
39 #include "amdgpu_amdkfd.h"
40 #include "amdgpu_gem.h"
41 #include "amdgpu_display.h"
43 static void amdgpu_unregister_gpu_instance(struct amdgpu_device
*adev
)
45 struct amdgpu_gpu_instance
*gpu_instance
;
48 mutex_lock(&mgpu_info
.mutex
);
50 for (i
= 0; i
< mgpu_info
.num_gpu
; i
++) {
51 gpu_instance
= &(mgpu_info
.gpu_ins
[i
]);
52 if (gpu_instance
->adev
== adev
) {
53 mgpu_info
.gpu_ins
[i
] =
54 mgpu_info
.gpu_ins
[mgpu_info
.num_gpu
- 1];
56 if (adev
->flags
& AMD_IS_APU
)
64 mutex_unlock(&mgpu_info
.mutex
);
68 * amdgpu_driver_unload_kms - Main unload function for KMS.
70 * @dev: drm dev pointer
72 * This is the main unload function for KMS (all asics).
73 * Returns 0 on success.
75 void amdgpu_driver_unload_kms(struct drm_device
*dev
)
77 struct amdgpu_device
*adev
= dev
->dev_private
;
82 amdgpu_unregister_gpu_instance(adev
);
84 if (adev
->rmmio
== NULL
)
87 if (amdgpu_sriov_vf(adev
))
88 amdgpu_virt_request_full_gpu(adev
, false);
90 if (amdgpu_device_is_px(dev
)) {
91 pm_runtime_get_sync(dev
->dev
);
92 pm_runtime_forbid(dev
->dev
);
95 amdgpu_acpi_fini(adev
);
97 amdgpu_device_fini(adev
);
101 dev
->dev_private
= NULL
;
104 static void amdgpu_register_gpu_instance(struct amdgpu_device
*adev
)
106 struct amdgpu_gpu_instance
*gpu_instance
;
108 mutex_lock(&mgpu_info
.mutex
);
110 if (mgpu_info
.num_gpu
>= MAX_GPU_INSTANCE
) {
111 DRM_ERROR("Cannot register more gpu instance\n");
112 mutex_unlock(&mgpu_info
.mutex
);
116 gpu_instance
= &(mgpu_info
.gpu_ins
[mgpu_info
.num_gpu
]);
117 gpu_instance
->adev
= adev
;
118 gpu_instance
->mgpu_fan_enabled
= 0;
121 if (adev
->flags
& AMD_IS_APU
)
124 mgpu_info
.num_dgpu
++;
126 mutex_unlock(&mgpu_info
.mutex
);
130 * amdgpu_driver_load_kms - Main load function for KMS.
132 * @dev: drm dev pointer
133 * @flags: device flags
135 * This is the main load function for KMS (all asics).
136 * Returns 0 on success, error on failure.
138 int amdgpu_driver_load_kms(struct drm_device
*dev
, unsigned long flags
)
140 struct amdgpu_device
*adev
;
143 #ifdef CONFIG_DRM_AMDGPU_SI
144 if (!amdgpu_si_support
) {
145 switch (flags
& AMD_ASIC_MASK
) {
152 "SI support provided by radeon.\n");
154 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
160 #ifdef CONFIG_DRM_AMDGPU_CIK
161 if (!amdgpu_cik_support
) {
162 switch (flags
& AMD_ASIC_MASK
) {
169 "CIK support provided by radeon.\n");
171 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
178 adev
= kzalloc(sizeof(struct amdgpu_device
), GFP_KERNEL
);
182 dev
->dev_private
= (void *)adev
;
184 if ((amdgpu_runtime_pm
!= 0) &&
186 (amdgpu_is_atpx_hybrid() ||
187 amdgpu_has_atpx_dgpu_power_cntl()) &&
188 ((flags
& AMD_IS_APU
) == 0) &&
189 !pci_is_thunderbolt_attached(dev
->pdev
))
192 /* amdgpu_device_init should report only fatal error
193 * like memory allocation failure or iomapping failure,
194 * or memory manager initialization failure, it must
195 * properly initialize the GPU MC controller and permit
198 r
= amdgpu_device_init(adev
, dev
, dev
->pdev
, flags
);
200 dev_err(&dev
->pdev
->dev
, "Fatal error during GPU init\n");
204 /* Call ACPI methods: require modeset init
205 * but failure is not fatal
208 acpi_status
= amdgpu_acpi_init(adev
);
210 dev_dbg(&dev
->pdev
->dev
,
211 "Error during ACPI methods call\n");
214 if (amdgpu_device_is_px(dev
)) {
215 dev_pm_set_driver_flags(dev
->dev
, DPM_FLAG_NEVER_SKIP
);
216 pm_runtime_use_autosuspend(dev
->dev
);
217 pm_runtime_set_autosuspend_delay(dev
->dev
, 5000);
218 pm_runtime_set_active(dev
->dev
);
219 pm_runtime_allow(dev
->dev
);
220 pm_runtime_mark_last_busy(dev
->dev
);
221 pm_runtime_put_autosuspend(dev
->dev
);
224 amdgpu_register_gpu_instance(adev
);
227 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
228 if (adev
->rmmio
&& amdgpu_device_is_px(dev
))
229 pm_runtime_put_noidle(dev
->dev
);
230 amdgpu_driver_unload_kms(dev
);
236 static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware
*fw_info
,
237 struct drm_amdgpu_query_fw
*query_fw
,
238 struct amdgpu_device
*adev
)
240 switch (query_fw
->fw_type
) {
241 case AMDGPU_INFO_FW_VCE
:
242 fw_info
->ver
= adev
->vce
.fw_version
;
243 fw_info
->feature
= adev
->vce
.fb_version
;
245 case AMDGPU_INFO_FW_UVD
:
246 fw_info
->ver
= adev
->uvd
.fw_version
;
247 fw_info
->feature
= 0;
249 case AMDGPU_INFO_FW_VCN
:
250 fw_info
->ver
= adev
->vcn
.fw_version
;
251 fw_info
->feature
= 0;
253 case AMDGPU_INFO_FW_GMC
:
254 fw_info
->ver
= adev
->gmc
.fw_version
;
255 fw_info
->feature
= 0;
257 case AMDGPU_INFO_FW_GFX_ME
:
258 fw_info
->ver
= adev
->gfx
.me_fw_version
;
259 fw_info
->feature
= adev
->gfx
.me_feature_version
;
261 case AMDGPU_INFO_FW_GFX_PFP
:
262 fw_info
->ver
= adev
->gfx
.pfp_fw_version
;
263 fw_info
->feature
= adev
->gfx
.pfp_feature_version
;
265 case AMDGPU_INFO_FW_GFX_CE
:
266 fw_info
->ver
= adev
->gfx
.ce_fw_version
;
267 fw_info
->feature
= adev
->gfx
.ce_feature_version
;
269 case AMDGPU_INFO_FW_GFX_RLC
:
270 fw_info
->ver
= adev
->gfx
.rlc_fw_version
;
271 fw_info
->feature
= adev
->gfx
.rlc_feature_version
;
273 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL
:
274 fw_info
->ver
= adev
->gfx
.rlc_srlc_fw_version
;
275 fw_info
->feature
= adev
->gfx
.rlc_srlc_feature_version
;
277 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM
:
278 fw_info
->ver
= adev
->gfx
.rlc_srlg_fw_version
;
279 fw_info
->feature
= adev
->gfx
.rlc_srlg_feature_version
;
281 case AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM
:
282 fw_info
->ver
= adev
->gfx
.rlc_srls_fw_version
;
283 fw_info
->feature
= adev
->gfx
.rlc_srls_feature_version
;
285 case AMDGPU_INFO_FW_GFX_MEC
:
286 if (query_fw
->index
== 0) {
287 fw_info
->ver
= adev
->gfx
.mec_fw_version
;
288 fw_info
->feature
= adev
->gfx
.mec_feature_version
;
289 } else if (query_fw
->index
== 1) {
290 fw_info
->ver
= adev
->gfx
.mec2_fw_version
;
291 fw_info
->feature
= adev
->gfx
.mec2_feature_version
;
295 case AMDGPU_INFO_FW_SMC
:
296 fw_info
->ver
= adev
->pm
.fw_version
;
297 fw_info
->feature
= 0;
299 case AMDGPU_INFO_FW_SDMA
:
300 if (query_fw
->index
>= adev
->sdma
.num_instances
)
302 fw_info
->ver
= adev
->sdma
.instance
[query_fw
->index
].fw_version
;
303 fw_info
->feature
= adev
->sdma
.instance
[query_fw
->index
].feature_version
;
305 case AMDGPU_INFO_FW_SOS
:
306 fw_info
->ver
= adev
->psp
.sos_fw_version
;
307 fw_info
->feature
= adev
->psp
.sos_feature_version
;
309 case AMDGPU_INFO_FW_ASD
:
310 fw_info
->ver
= adev
->psp
.asd_fw_version
;
311 fw_info
->feature
= adev
->psp
.asd_feature_version
;
313 case AMDGPU_INFO_FW_DMCU
:
314 fw_info
->ver
= adev
->dm
.dmcu_fw_version
;
315 fw_info
->feature
= 0;
323 static int amdgpu_hw_ip_info(struct amdgpu_device
*adev
,
324 struct drm_amdgpu_info
*info
,
325 struct drm_amdgpu_info_hw_ip
*result
)
327 uint32_t ib_start_alignment
= 0;
328 uint32_t ib_size_alignment
= 0;
329 enum amd_ip_block_type type
;
330 unsigned int num_rings
= 0;
333 if (info
->query_hw_ip
.ip_instance
>= AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
336 switch (info
->query_hw_ip
.type
) {
337 case AMDGPU_HW_IP_GFX
:
338 type
= AMD_IP_BLOCK_TYPE_GFX
;
339 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
340 if (adev
->gfx
.gfx_ring
[i
].sched
.ready
)
342 ib_start_alignment
= 32;
343 ib_size_alignment
= 32;
345 case AMDGPU_HW_IP_COMPUTE
:
346 type
= AMD_IP_BLOCK_TYPE_GFX
;
347 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
348 if (adev
->gfx
.compute_ring
[i
].sched
.ready
)
350 ib_start_alignment
= 32;
351 ib_size_alignment
= 32;
353 case AMDGPU_HW_IP_DMA
:
354 type
= AMD_IP_BLOCK_TYPE_SDMA
;
355 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
356 if (adev
->sdma
.instance
[i
].ring
.sched
.ready
)
358 ib_start_alignment
= 256;
359 ib_size_alignment
= 4;
361 case AMDGPU_HW_IP_UVD
:
362 type
= AMD_IP_BLOCK_TYPE_UVD
;
363 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
364 if (adev
->uvd
.harvest_config
& (1 << i
))
367 if (adev
->uvd
.inst
[i
].ring
.sched
.ready
)
370 ib_start_alignment
= 64;
371 ib_size_alignment
= 64;
373 case AMDGPU_HW_IP_VCE
:
374 type
= AMD_IP_BLOCK_TYPE_VCE
;
375 for (i
= 0; i
< adev
->vce
.num_rings
; i
++)
376 if (adev
->vce
.ring
[i
].sched
.ready
)
378 ib_start_alignment
= 4;
379 ib_size_alignment
= 1;
381 case AMDGPU_HW_IP_UVD_ENC
:
382 type
= AMD_IP_BLOCK_TYPE_UVD
;
383 for (i
= 0; i
< adev
->uvd
.num_uvd_inst
; i
++) {
384 if (adev
->uvd
.harvest_config
& (1 << i
))
387 for (j
= 0; j
< adev
->uvd
.num_enc_rings
; j
++)
388 if (adev
->uvd
.inst
[i
].ring_enc
[j
].sched
.ready
)
391 ib_start_alignment
= 64;
392 ib_size_alignment
= 64;
394 case AMDGPU_HW_IP_VCN_DEC
:
395 type
= AMD_IP_BLOCK_TYPE_VCN
;
396 if (adev
->vcn
.ring_dec
.sched
.ready
)
398 ib_start_alignment
= 16;
399 ib_size_alignment
= 16;
401 case AMDGPU_HW_IP_VCN_ENC
:
402 type
= AMD_IP_BLOCK_TYPE_VCN
;
403 for (i
= 0; i
< adev
->vcn
.num_enc_rings
; i
++)
404 if (adev
->vcn
.ring_enc
[i
].sched
.ready
)
406 ib_start_alignment
= 64;
407 ib_size_alignment
= 1;
409 case AMDGPU_HW_IP_VCN_JPEG
:
410 type
= AMD_IP_BLOCK_TYPE_VCN
;
411 if (adev
->vcn
.ring_jpeg
.sched
.ready
)
413 ib_start_alignment
= 16;
414 ib_size_alignment
= 16;
420 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
421 if (adev
->ip_blocks
[i
].version
->type
== type
&&
422 adev
->ip_blocks
[i
].status
.valid
)
425 if (i
== adev
->num_ip_blocks
)
428 num_rings
= min(amdgpu_ctx_num_entities
[info
->query_hw_ip
.type
],
431 result
->hw_ip_version_major
= adev
->ip_blocks
[i
].version
->major
;
432 result
->hw_ip_version_minor
= adev
->ip_blocks
[i
].version
->minor
;
433 result
->capabilities_flags
= 0;
434 result
->available_rings
= (1 << num_rings
) - 1;
435 result
->ib_start_alignment
= ib_start_alignment
;
436 result
->ib_size_alignment
= ib_size_alignment
;
441 * Userspace get information ioctl
444 * amdgpu_info_ioctl - answer a device specific request.
446 * @adev: amdgpu device pointer
447 * @data: request object
450 * This function is used to pass device specific parameters to the userspace
451 * drivers. Examples include: pci device id, pipeline parms, tiling params,
453 * Returns 0 on success, -EINVAL on failure.
455 static int amdgpu_info_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
457 struct amdgpu_device
*adev
= dev
->dev_private
;
458 struct drm_amdgpu_info
*info
= data
;
459 struct amdgpu_mode_info
*minfo
= &adev
->mode_info
;
460 void __user
*out
= (void __user
*)(uintptr_t)info
->return_pointer
;
461 uint32_t size
= info
->return_size
;
462 struct drm_crtc
*crtc
;
466 int ui32_size
= sizeof(ui32
);
468 if (!info
->return_size
|| !info
->return_pointer
)
471 switch (info
->query
) {
472 case AMDGPU_INFO_ACCEL_WORKING
:
473 ui32
= adev
->accel_working
;
474 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
475 case AMDGPU_INFO_CRTC_FROM_ID
:
476 for (i
= 0, found
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
477 crtc
= (struct drm_crtc
*)minfo
->crtcs
[i
];
478 if (crtc
&& crtc
->base
.id
== info
->mode_crtc
.id
) {
479 struct amdgpu_crtc
*amdgpu_crtc
= to_amdgpu_crtc(crtc
);
480 ui32
= amdgpu_crtc
->crtc_id
;
486 DRM_DEBUG_KMS("unknown crtc id %d\n", info
->mode_crtc
.id
);
489 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
490 case AMDGPU_INFO_HW_IP_INFO
: {
491 struct drm_amdgpu_info_hw_ip ip
= {};
494 ret
= amdgpu_hw_ip_info(adev
, info
, &ip
);
498 ret
= copy_to_user(out
, &ip
, min((size_t)size
, sizeof(ip
)));
499 return ret
? -EFAULT
: 0;
501 case AMDGPU_INFO_HW_IP_COUNT
: {
502 enum amd_ip_block_type type
;
505 switch (info
->query_hw_ip
.type
) {
506 case AMDGPU_HW_IP_GFX
:
507 type
= AMD_IP_BLOCK_TYPE_GFX
;
509 case AMDGPU_HW_IP_COMPUTE
:
510 type
= AMD_IP_BLOCK_TYPE_GFX
;
512 case AMDGPU_HW_IP_DMA
:
513 type
= AMD_IP_BLOCK_TYPE_SDMA
;
515 case AMDGPU_HW_IP_UVD
:
516 type
= AMD_IP_BLOCK_TYPE_UVD
;
518 case AMDGPU_HW_IP_VCE
:
519 type
= AMD_IP_BLOCK_TYPE_VCE
;
521 case AMDGPU_HW_IP_UVD_ENC
:
522 type
= AMD_IP_BLOCK_TYPE_UVD
;
524 case AMDGPU_HW_IP_VCN_DEC
:
525 case AMDGPU_HW_IP_VCN_ENC
:
526 case AMDGPU_HW_IP_VCN_JPEG
:
527 type
= AMD_IP_BLOCK_TYPE_VCN
;
533 for (i
= 0; i
< adev
->num_ip_blocks
; i
++)
534 if (adev
->ip_blocks
[i
].version
->type
== type
&&
535 adev
->ip_blocks
[i
].status
.valid
&&
536 count
< AMDGPU_HW_IP_INSTANCE_MAX_COUNT
)
539 return copy_to_user(out
, &count
, min(size
, 4u)) ? -EFAULT
: 0;
541 case AMDGPU_INFO_TIMESTAMP
:
542 ui64
= amdgpu_gfx_get_gpu_clock_counter(adev
);
543 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
544 case AMDGPU_INFO_FW_VERSION
: {
545 struct drm_amdgpu_info_firmware fw_info
;
548 /* We only support one instance of each IP block right now. */
549 if (info
->query_fw
.ip_instance
!= 0)
552 ret
= amdgpu_firmware_info(&fw_info
, &info
->query_fw
, adev
);
556 return copy_to_user(out
, &fw_info
,
557 min((size_t)size
, sizeof(fw_info
))) ? -EFAULT
: 0;
559 case AMDGPU_INFO_NUM_BYTES_MOVED
:
560 ui64
= atomic64_read(&adev
->num_bytes_moved
);
561 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
562 case AMDGPU_INFO_NUM_EVICTIONS
:
563 ui64
= atomic64_read(&adev
->num_evictions
);
564 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
565 case AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
:
566 ui64
= atomic64_read(&adev
->num_vram_cpu_page_faults
);
567 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
568 case AMDGPU_INFO_VRAM_USAGE
:
569 ui64
= amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
570 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
571 case AMDGPU_INFO_VIS_VRAM_USAGE
:
572 ui64
= amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
573 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
574 case AMDGPU_INFO_GTT_USAGE
:
575 ui64
= amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
576 return copy_to_user(out
, &ui64
, min(size
, 8u)) ? -EFAULT
: 0;
577 case AMDGPU_INFO_GDS_CONFIG
: {
578 struct drm_amdgpu_info_gds gds_info
;
580 memset(&gds_info
, 0, sizeof(gds_info
));
581 gds_info
.gds_gfx_partition_size
= adev
->gds
.mem
.gfx_partition_size
;
582 gds_info
.compute_partition_size
= adev
->gds
.mem
.cs_partition_size
;
583 gds_info
.gds_total_size
= adev
->gds
.mem
.total_size
;
584 gds_info
.gws_per_gfx_partition
= adev
->gds
.gws
.gfx_partition_size
;
585 gds_info
.gws_per_compute_partition
= adev
->gds
.gws
.cs_partition_size
;
586 gds_info
.oa_per_gfx_partition
= adev
->gds
.oa
.gfx_partition_size
;
587 gds_info
.oa_per_compute_partition
= adev
->gds
.oa
.cs_partition_size
;
588 return copy_to_user(out
, &gds_info
,
589 min((size_t)size
, sizeof(gds_info
))) ? -EFAULT
: 0;
591 case AMDGPU_INFO_VRAM_GTT
: {
592 struct drm_amdgpu_info_vram_gtt vram_gtt
;
594 vram_gtt
.vram_size
= adev
->gmc
.real_vram_size
-
595 atomic64_read(&adev
->vram_pin_size
);
596 vram_gtt
.vram_cpu_accessible_size
= adev
->gmc
.visible_vram_size
-
597 atomic64_read(&adev
->visible_pin_size
);
598 vram_gtt
.gtt_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
599 vram_gtt
.gtt_size
*= PAGE_SIZE
;
600 vram_gtt
.gtt_size
-= atomic64_read(&adev
->gart_pin_size
);
601 return copy_to_user(out
, &vram_gtt
,
602 min((size_t)size
, sizeof(vram_gtt
))) ? -EFAULT
: 0;
604 case AMDGPU_INFO_MEMORY
: {
605 struct drm_amdgpu_memory_info mem
;
607 memset(&mem
, 0, sizeof(mem
));
608 mem
.vram
.total_heap_size
= adev
->gmc
.real_vram_size
;
609 mem
.vram
.usable_heap_size
= adev
->gmc
.real_vram_size
-
610 atomic64_read(&adev
->vram_pin_size
);
611 mem
.vram
.heap_usage
=
612 amdgpu_vram_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
613 mem
.vram
.max_allocation
= mem
.vram
.usable_heap_size
* 3 / 4;
615 mem
.cpu_accessible_vram
.total_heap_size
=
616 adev
->gmc
.visible_vram_size
;
617 mem
.cpu_accessible_vram
.usable_heap_size
= adev
->gmc
.visible_vram_size
-
618 atomic64_read(&adev
->visible_pin_size
);
619 mem
.cpu_accessible_vram
.heap_usage
=
620 amdgpu_vram_mgr_vis_usage(&adev
->mman
.bdev
.man
[TTM_PL_VRAM
]);
621 mem
.cpu_accessible_vram
.max_allocation
=
622 mem
.cpu_accessible_vram
.usable_heap_size
* 3 / 4;
624 mem
.gtt
.total_heap_size
= adev
->mman
.bdev
.man
[TTM_PL_TT
].size
;
625 mem
.gtt
.total_heap_size
*= PAGE_SIZE
;
626 mem
.gtt
.usable_heap_size
= mem
.gtt
.total_heap_size
-
627 atomic64_read(&adev
->gart_pin_size
);
629 amdgpu_gtt_mgr_usage(&adev
->mman
.bdev
.man
[TTM_PL_TT
]);
630 mem
.gtt
.max_allocation
= mem
.gtt
.usable_heap_size
* 3 / 4;
632 return copy_to_user(out
, &mem
,
633 min((size_t)size
, sizeof(mem
)))
636 case AMDGPU_INFO_READ_MMR_REG
: {
637 unsigned n
, alloc_size
;
639 unsigned se_num
= (info
->read_mmr_reg
.instance
>>
640 AMDGPU_INFO_MMR_SE_INDEX_SHIFT
) &
641 AMDGPU_INFO_MMR_SE_INDEX_MASK
;
642 unsigned sh_num
= (info
->read_mmr_reg
.instance
>>
643 AMDGPU_INFO_MMR_SH_INDEX_SHIFT
) &
644 AMDGPU_INFO_MMR_SH_INDEX_MASK
;
646 /* set full masks if the userspace set all bits
647 * in the bitfields */
648 if (se_num
== AMDGPU_INFO_MMR_SE_INDEX_MASK
)
650 if (sh_num
== AMDGPU_INFO_MMR_SH_INDEX_MASK
)
653 regs
= kmalloc_array(info
->read_mmr_reg
.count
, sizeof(*regs
), GFP_KERNEL
);
656 alloc_size
= info
->read_mmr_reg
.count
* sizeof(*regs
);
658 for (i
= 0; i
< info
->read_mmr_reg
.count
; i
++)
659 if (amdgpu_asic_read_register(adev
, se_num
, sh_num
,
660 info
->read_mmr_reg
.dword_offset
+ i
,
662 DRM_DEBUG_KMS("unallowed offset %#x\n",
663 info
->read_mmr_reg
.dword_offset
+ i
);
667 n
= copy_to_user(out
, regs
, min(size
, alloc_size
));
669 return n
? -EFAULT
: 0;
671 case AMDGPU_INFO_DEV_INFO
: {
672 struct drm_amdgpu_info_device dev_info
= {};
675 dev_info
.device_id
= dev
->pdev
->device
;
676 dev_info
.chip_rev
= adev
->rev_id
;
677 dev_info
.external_rev
= adev
->external_rev_id
;
678 dev_info
.pci_rev
= dev
->pdev
->revision
;
679 dev_info
.family
= adev
->family
;
680 dev_info
.num_shader_engines
= adev
->gfx
.config
.max_shader_engines
;
681 dev_info
.num_shader_arrays_per_engine
= adev
->gfx
.config
.max_sh_per_se
;
682 /* return all clocks in KHz */
683 dev_info
.gpu_counter_freq
= amdgpu_asic_get_xclk(adev
) * 10;
684 if (adev
->pm
.dpm_enabled
) {
685 dev_info
.max_engine_clock
= amdgpu_dpm_get_sclk(adev
, false) * 10;
686 dev_info
.max_memory_clock
= amdgpu_dpm_get_mclk(adev
, false) * 10;
688 dev_info
.max_engine_clock
= adev
->clock
.default_sclk
* 10;
689 dev_info
.max_memory_clock
= adev
->clock
.default_mclk
* 10;
691 dev_info
.enabled_rb_pipes_mask
= adev
->gfx
.config
.backend_enable_mask
;
692 dev_info
.num_rb_pipes
= adev
->gfx
.config
.max_backends_per_se
*
693 adev
->gfx
.config
.max_shader_engines
;
694 dev_info
.num_hw_gfx_contexts
= adev
->gfx
.config
.max_hw_contexts
;
696 dev_info
.ids_flags
= 0;
697 if (adev
->flags
& AMD_IS_APU
)
698 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_FUSION
;
699 if (amdgpu_sriov_vf(adev
))
700 dev_info
.ids_flags
|= AMDGPU_IDS_FLAGS_PREEMPTION
;
702 vm_size
= adev
->vm_manager
.max_pfn
* AMDGPU_GPU_PAGE_SIZE
;
703 vm_size
-= AMDGPU_VA_RESERVED_SIZE
;
705 /* Older VCE FW versions are buggy and can handle only 40bits */
706 if (adev
->vce
.fw_version
&&
707 adev
->vce
.fw_version
< AMDGPU_VCE_FW_53_45
)
708 vm_size
= min(vm_size
, 1ULL << 40);
710 dev_info
.virtual_address_offset
= AMDGPU_VA_RESERVED_SIZE
;
711 dev_info
.virtual_address_max
=
712 min(vm_size
, AMDGPU_GMC_HOLE_START
);
714 if (vm_size
> AMDGPU_GMC_HOLE_START
) {
715 dev_info
.high_va_offset
= AMDGPU_GMC_HOLE_END
;
716 dev_info
.high_va_max
= AMDGPU_GMC_HOLE_END
| vm_size
;
718 dev_info
.virtual_address_alignment
= max((int)PAGE_SIZE
, AMDGPU_GPU_PAGE_SIZE
);
719 dev_info
.pte_fragment_size
= (1 << adev
->vm_manager
.fragment_size
) * AMDGPU_GPU_PAGE_SIZE
;
720 dev_info
.gart_page_size
= AMDGPU_GPU_PAGE_SIZE
;
721 dev_info
.cu_active_number
= adev
->gfx
.cu_info
.number
;
722 dev_info
.cu_ao_mask
= adev
->gfx
.cu_info
.ao_cu_mask
;
723 dev_info
.ce_ram_size
= adev
->gfx
.ce_ram_size
;
724 memcpy(&dev_info
.cu_ao_bitmap
[0], &adev
->gfx
.cu_info
.ao_cu_bitmap
[0],
725 sizeof(adev
->gfx
.cu_info
.ao_cu_bitmap
));
726 memcpy(&dev_info
.cu_bitmap
[0], &adev
->gfx
.cu_info
.bitmap
[0],
727 sizeof(adev
->gfx
.cu_info
.bitmap
));
728 dev_info
.vram_type
= adev
->gmc
.vram_type
;
729 dev_info
.vram_bit_width
= adev
->gmc
.vram_width
;
730 dev_info
.vce_harvest_config
= adev
->vce
.harvest_config
;
731 dev_info
.gc_double_offchip_lds_buf
=
732 adev
->gfx
.config
.double_offchip_lds_buf
;
735 dev_info
.prim_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_PRIM
].gpu_addr
;
736 dev_info
.prim_buf_size
= adev
->gfx
.ngg
.buf
[NGG_PRIM
].size
;
737 dev_info
.pos_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_POS
].gpu_addr
;
738 dev_info
.pos_buf_size
= adev
->gfx
.ngg
.buf
[NGG_POS
].size
;
739 dev_info
.cntl_sb_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_CNTL
].gpu_addr
;
740 dev_info
.cntl_sb_buf_size
= adev
->gfx
.ngg
.buf
[NGG_CNTL
].size
;
741 dev_info
.param_buf_gpu_addr
= adev
->gfx
.ngg
.buf
[NGG_PARAM
].gpu_addr
;
742 dev_info
.param_buf_size
= adev
->gfx
.ngg
.buf
[NGG_PARAM
].size
;
744 dev_info
.wave_front_size
= adev
->gfx
.cu_info
.wave_front_size
;
745 dev_info
.num_shader_visible_vgprs
= adev
->gfx
.config
.max_gprs
;
746 dev_info
.num_cu_per_sh
= adev
->gfx
.config
.max_cu_per_sh
;
747 dev_info
.num_tcc_blocks
= adev
->gfx
.config
.max_texture_channel_caches
;
748 dev_info
.gs_vgt_table_depth
= adev
->gfx
.config
.gs_vgt_table_depth
;
749 dev_info
.gs_prim_buffer_depth
= adev
->gfx
.config
.gs_prim_buffer_depth
;
750 dev_info
.max_gs_waves_per_vgt
= adev
->gfx
.config
.max_gs_threads
;
752 return copy_to_user(out
, &dev_info
,
753 min((size_t)size
, sizeof(dev_info
))) ? -EFAULT
: 0;
755 case AMDGPU_INFO_VCE_CLOCK_TABLE
: {
757 struct drm_amdgpu_info_vce_clock_table vce_clk_table
= {};
758 struct amd_vce_state
*vce_state
;
760 for (i
= 0; i
< AMDGPU_VCE_CLOCK_TABLE_ENTRIES
; i
++) {
761 vce_state
= amdgpu_dpm_get_vce_clock_state(adev
, i
);
763 vce_clk_table
.entries
[i
].sclk
= vce_state
->sclk
;
764 vce_clk_table
.entries
[i
].mclk
= vce_state
->mclk
;
765 vce_clk_table
.entries
[i
].eclk
= vce_state
->evclk
;
766 vce_clk_table
.num_valid_entries
++;
770 return copy_to_user(out
, &vce_clk_table
,
771 min((size_t)size
, sizeof(vce_clk_table
))) ? -EFAULT
: 0;
773 case AMDGPU_INFO_VBIOS
: {
774 uint32_t bios_size
= adev
->bios_size
;
776 switch (info
->vbios_info
.type
) {
777 case AMDGPU_INFO_VBIOS_SIZE
:
778 return copy_to_user(out
, &bios_size
,
779 min((size_t)size
, sizeof(bios_size
)))
781 case AMDGPU_INFO_VBIOS_IMAGE
: {
783 uint32_t bios_offset
= info
->vbios_info
.offset
;
785 if (bios_offset
>= bios_size
)
788 bios
= adev
->bios
+ bios_offset
;
789 return copy_to_user(out
, bios
,
790 min((size_t)size
, (size_t)(bios_size
- bios_offset
)))
794 DRM_DEBUG_KMS("Invalid request %d\n",
795 info
->vbios_info
.type
);
799 case AMDGPU_INFO_NUM_HANDLES
: {
800 struct drm_amdgpu_info_num_handles handle
;
802 switch (info
->query_hw_ip
.type
) {
803 case AMDGPU_HW_IP_UVD
:
804 /* Starting Polaris, we support unlimited UVD handles */
805 if (adev
->asic_type
< CHIP_POLARIS10
) {
806 handle
.uvd_max_handles
= adev
->uvd
.max_handles
;
807 handle
.uvd_used_handles
= amdgpu_uvd_used_handles(adev
);
809 return copy_to_user(out
, &handle
,
810 min((size_t)size
, sizeof(handle
))) ? -EFAULT
: 0;
820 case AMDGPU_INFO_SENSOR
: {
821 if (!adev
->pm
.dpm_enabled
)
824 switch (info
->sensor_info
.type
) {
825 case AMDGPU_INFO_SENSOR_GFX_SCLK
:
826 /* get sclk in Mhz */
827 if (amdgpu_dpm_read_sensor(adev
,
828 AMDGPU_PP_SENSOR_GFX_SCLK
,
829 (void *)&ui32
, &ui32_size
)) {
834 case AMDGPU_INFO_SENSOR_GFX_MCLK
:
835 /* get mclk in Mhz */
836 if (amdgpu_dpm_read_sensor(adev
,
837 AMDGPU_PP_SENSOR_GFX_MCLK
,
838 (void *)&ui32
, &ui32_size
)) {
843 case AMDGPU_INFO_SENSOR_GPU_TEMP
:
844 /* get temperature in millidegrees C */
845 if (amdgpu_dpm_read_sensor(adev
,
846 AMDGPU_PP_SENSOR_GPU_TEMP
,
847 (void *)&ui32
, &ui32_size
)) {
851 case AMDGPU_INFO_SENSOR_GPU_LOAD
:
853 if (amdgpu_dpm_read_sensor(adev
,
854 AMDGPU_PP_SENSOR_GPU_LOAD
,
855 (void *)&ui32
, &ui32_size
)) {
859 case AMDGPU_INFO_SENSOR_GPU_AVG_POWER
:
860 /* get average GPU power */
861 if (amdgpu_dpm_read_sensor(adev
,
862 AMDGPU_PP_SENSOR_GPU_POWER
,
863 (void *)&ui32
, &ui32_size
)) {
868 case AMDGPU_INFO_SENSOR_VDDNB
:
869 /* get VDDNB in millivolts */
870 if (amdgpu_dpm_read_sensor(adev
,
871 AMDGPU_PP_SENSOR_VDDNB
,
872 (void *)&ui32
, &ui32_size
)) {
876 case AMDGPU_INFO_SENSOR_VDDGFX
:
877 /* get VDDGFX in millivolts */
878 if (amdgpu_dpm_read_sensor(adev
,
879 AMDGPU_PP_SENSOR_VDDGFX
,
880 (void *)&ui32
, &ui32_size
)) {
884 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK
:
885 /* get stable pstate sclk in Mhz */
886 if (amdgpu_dpm_read_sensor(adev
,
887 AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
,
888 (void *)&ui32
, &ui32_size
)) {
893 case AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK
:
894 /* get stable pstate mclk in Mhz */
895 if (amdgpu_dpm_read_sensor(adev
,
896 AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
,
897 (void *)&ui32
, &ui32_size
)) {
903 DRM_DEBUG_KMS("Invalid request %d\n",
904 info
->sensor_info
.type
);
907 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
909 case AMDGPU_INFO_VRAM_LOST_COUNTER
:
910 ui32
= atomic_read(&adev
->vram_lost_counter
);
911 return copy_to_user(out
, &ui32
, min(size
, 4u)) ? -EFAULT
: 0;
913 DRM_DEBUG_KMS("Invalid request %d\n", info
->query
);
921 * Outdated mess for old drm with Xorg being in charge (void function now).
924 * amdgpu_driver_lastclose_kms - drm callback for last close
926 * @dev: drm dev pointer
928 * Switch vga_switcheroo state after last close (all asics).
930 void amdgpu_driver_lastclose_kms(struct drm_device
*dev
)
932 drm_fb_helper_lastclose(dev
);
933 vga_switcheroo_process_delayed_switch();
937 * amdgpu_driver_open_kms - drm callback for open
939 * @dev: drm dev pointer
940 * @file_priv: drm file
942 * On device open, init vm on cayman+ (all asics).
943 * Returns 0 on success, error on failure.
945 int amdgpu_driver_open_kms(struct drm_device
*dev
, struct drm_file
*file_priv
)
947 struct amdgpu_device
*adev
= dev
->dev_private
;
948 struct amdgpu_fpriv
*fpriv
;
951 /* Ensure IB tests are run on ring */
952 flush_delayed_work(&adev
->late_init_work
);
954 file_priv
->driver_priv
= NULL
;
956 r
= pm_runtime_get_sync(dev
->dev
);
960 fpriv
= kzalloc(sizeof(*fpriv
), GFP_KERNEL
);
961 if (unlikely(!fpriv
)) {
966 pasid
= amdgpu_pasid_alloc(16);
968 dev_warn(adev
->dev
, "No more PASIDs available!");
971 r
= amdgpu_vm_init(adev
, &fpriv
->vm
, AMDGPU_VM_CONTEXT_GFX
, pasid
);
975 fpriv
->prt_va
= amdgpu_vm_bo_add(adev
, &fpriv
->vm
, NULL
);
976 if (!fpriv
->prt_va
) {
981 if (amdgpu_sriov_vf(adev
)) {
982 uint64_t csa_addr
= amdgpu_csa_vaddr(adev
) & AMDGPU_GMC_HOLE_MASK
;
984 r
= amdgpu_map_static_csa(adev
, &fpriv
->vm
, adev
->virt
.csa_obj
,
985 &fpriv
->csa_va
, csa_addr
, AMDGPU_CSA_SIZE
);
990 mutex_init(&fpriv
->bo_list_lock
);
991 idr_init(&fpriv
->bo_list_handles
);
993 amdgpu_ctx_mgr_init(&fpriv
->ctx_mgr
);
995 file_priv
->driver_priv
= fpriv
;
999 amdgpu_vm_fini(adev
, &fpriv
->vm
);
1003 amdgpu_pasid_free(pasid
);
1008 pm_runtime_mark_last_busy(dev
->dev
);
1009 pm_runtime_put_autosuspend(dev
->dev
);
1015 * amdgpu_driver_postclose_kms - drm callback for post close
1017 * @dev: drm dev pointer
1018 * @file_priv: drm file
1020 * On device post close, tear down vm on cayman+ (all asics).
1022 void amdgpu_driver_postclose_kms(struct drm_device
*dev
,
1023 struct drm_file
*file_priv
)
1025 struct amdgpu_device
*adev
= dev
->dev_private
;
1026 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
1027 struct amdgpu_bo_list
*list
;
1028 struct amdgpu_bo
*pd
;
1035 pm_runtime_get_sync(dev
->dev
);
1037 if (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_UVD
) != NULL
)
1038 amdgpu_uvd_free_handles(adev
, file_priv
);
1039 if (amdgpu_device_ip_get_ip_block(adev
, AMD_IP_BLOCK_TYPE_VCE
) != NULL
)
1040 amdgpu_vce_free_handles(adev
, file_priv
);
1042 amdgpu_vm_bo_rmv(adev
, fpriv
->prt_va
);
1044 if (amdgpu_sriov_vf(adev
)) {
1045 /* TODO: how to handle reserve failure */
1046 BUG_ON(amdgpu_bo_reserve(adev
->virt
.csa_obj
, true));
1047 amdgpu_vm_bo_rmv(adev
, fpriv
->csa_va
);
1048 fpriv
->csa_va
= NULL
;
1049 amdgpu_bo_unreserve(adev
->virt
.csa_obj
);
1052 pasid
= fpriv
->vm
.pasid
;
1053 pd
= amdgpu_bo_ref(fpriv
->vm
.root
.base
.bo
);
1055 amdgpu_ctx_mgr_fini(&fpriv
->ctx_mgr
);
1056 amdgpu_vm_fini(adev
, &fpriv
->vm
);
1059 amdgpu_pasid_free_delayed(pd
->tbo
.resv
, pasid
);
1060 amdgpu_bo_unref(&pd
);
1062 idr_for_each_entry(&fpriv
->bo_list_handles
, list
, handle
)
1063 amdgpu_bo_list_put(list
);
1065 idr_destroy(&fpriv
->bo_list_handles
);
1066 mutex_destroy(&fpriv
->bo_list_lock
);
1069 file_priv
->driver_priv
= NULL
;
1071 pm_runtime_mark_last_busy(dev
->dev
);
1072 pm_runtime_put_autosuspend(dev
->dev
);
1076 * VBlank related functions.
1079 * amdgpu_get_vblank_counter_kms - get frame count
1081 * @dev: drm dev pointer
1082 * @pipe: crtc to get the frame count from
1084 * Gets the frame count on the requested crtc (all asics).
1085 * Returns frame count on success, -EINVAL on failure.
1087 u32
amdgpu_get_vblank_counter_kms(struct drm_device
*dev
, unsigned int pipe
)
1089 struct amdgpu_device
*adev
= dev
->dev_private
;
1090 int vpos
, hpos
, stat
;
1093 if (pipe
>= adev
->mode_info
.num_crtc
) {
1094 DRM_ERROR("Invalid crtc %u\n", pipe
);
1098 /* The hw increments its frame counter at start of vsync, not at start
1099 * of vblank, as is required by DRM core vblank counter handling.
1100 * Cook the hw count here to make it appear to the caller as if it
1101 * incremented at start of vblank. We measure distance to start of
1102 * vblank in vpos. vpos therefore will be >= 0 between start of vblank
1103 * and start of vsync, so vpos >= 0 means to bump the hw frame counter
1104 * result by 1 to give the proper appearance to caller.
1106 if (adev
->mode_info
.crtcs
[pipe
]) {
1107 /* Repeat readout if needed to provide stable result if
1108 * we cross start of vsync during the queries.
1111 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
1112 /* Ask amdgpu_display_get_crtc_scanoutpos to return
1113 * vpos as distance to start of vblank, instead of
1114 * regular vertical scanout pos.
1116 stat
= amdgpu_display_get_crtc_scanoutpos(
1117 dev
, pipe
, GET_DISTANCE_TO_VBLANKSTART
,
1118 &vpos
, &hpos
, NULL
, NULL
,
1119 &adev
->mode_info
.crtcs
[pipe
]->base
.hwmode
);
1120 } while (count
!= amdgpu_display_vblank_get_counter(adev
, pipe
));
1122 if (((stat
& (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
)) !=
1123 (DRM_SCANOUTPOS_VALID
| DRM_SCANOUTPOS_ACCURATE
))) {
1124 DRM_DEBUG_VBL("Query failed! stat %d\n", stat
);
1126 DRM_DEBUG_VBL("crtc %d: dist from vblank start %d\n",
1129 /* Bump counter if we are at >= leading edge of vblank,
1130 * but before vsync where vpos would turn negative and
1131 * the hw counter really increments.
1137 /* Fallback to use value as is. */
1138 count
= amdgpu_display_vblank_get_counter(adev
, pipe
);
1139 DRM_DEBUG_VBL("NULL mode info! Returned count may be wrong.\n");
1146 * amdgpu_enable_vblank_kms - enable vblank interrupt
1148 * @dev: drm dev pointer
1149 * @pipe: crtc to enable vblank interrupt for
1151 * Enable the interrupt on the requested crtc (all asics).
1152 * Returns 0 on success, -EINVAL on failure.
1154 int amdgpu_enable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
1156 struct amdgpu_device
*adev
= dev
->dev_private
;
1157 int idx
= amdgpu_display_crtc_idx_to_irq_type(adev
, pipe
);
1159 return amdgpu_irq_get(adev
, &adev
->crtc_irq
, idx
);
1163 * amdgpu_disable_vblank_kms - disable vblank interrupt
1165 * @dev: drm dev pointer
1166 * @pipe: crtc to disable vblank interrupt for
1168 * Disable the interrupt on the requested crtc (all asics).
1170 void amdgpu_disable_vblank_kms(struct drm_device
*dev
, unsigned int pipe
)
1172 struct amdgpu_device
*adev
= dev
->dev_private
;
1173 int idx
= amdgpu_display_crtc_idx_to_irq_type(adev
, pipe
);
1175 amdgpu_irq_put(adev
, &adev
->crtc_irq
, idx
);
1178 const struct drm_ioctl_desc amdgpu_ioctls_kms
[] = {
1179 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE
, amdgpu_gem_create_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1180 DRM_IOCTL_DEF_DRV(AMDGPU_CTX
, amdgpu_ctx_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1181 DRM_IOCTL_DEF_DRV(AMDGPU_VM
, amdgpu_vm_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1182 DRM_IOCTL_DEF_DRV(AMDGPU_SCHED
, amdgpu_sched_ioctl
, DRM_MASTER
),
1183 DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST
, amdgpu_bo_list_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1184 DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE
, amdgpu_cs_fence_to_handle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1186 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP
, amdgpu_gem_mmap_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1187 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE
, amdgpu_gem_wait_idle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1188 DRM_IOCTL_DEF_DRV(AMDGPU_CS
, amdgpu_cs_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1189 DRM_IOCTL_DEF_DRV(AMDGPU_INFO
, amdgpu_info_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1190 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS
, amdgpu_cs_wait_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1191 DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES
, amdgpu_cs_wait_fences_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1192 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA
, amdgpu_gem_metadata_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1193 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA
, amdgpu_gem_va_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1194 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP
, amdgpu_gem_op_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1195 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR
, amdgpu_gem_userptr_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
)
1197 const int amdgpu_max_kms_ioctl
= ARRAY_SIZE(amdgpu_ioctls_kms
);
1202 #if defined(CONFIG_DEBUG_FS)
1204 static int amdgpu_debugfs_firmware_info(struct seq_file
*m
, void *data
)
1206 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1207 struct drm_device
*dev
= node
->minor
->dev
;
1208 struct amdgpu_device
*adev
= dev
->dev_private
;
1209 struct drm_amdgpu_info_firmware fw_info
;
1210 struct drm_amdgpu_query_fw query_fw
;
1211 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
1215 query_fw
.fw_type
= AMDGPU_INFO_FW_VCE
;
1216 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1219 seq_printf(m
, "VCE feature version: %u, firmware version: 0x%08x\n",
1220 fw_info
.feature
, fw_info
.ver
);
1223 query_fw
.fw_type
= AMDGPU_INFO_FW_UVD
;
1224 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1227 seq_printf(m
, "UVD feature version: %u, firmware version: 0x%08x\n",
1228 fw_info
.feature
, fw_info
.ver
);
1231 query_fw
.fw_type
= AMDGPU_INFO_FW_GMC
;
1232 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1235 seq_printf(m
, "MC feature version: %u, firmware version: 0x%08x\n",
1236 fw_info
.feature
, fw_info
.ver
);
1239 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_ME
;
1240 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1243 seq_printf(m
, "ME feature version: %u, firmware version: 0x%08x\n",
1244 fw_info
.feature
, fw_info
.ver
);
1247 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_PFP
;
1248 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1251 seq_printf(m
, "PFP feature version: %u, firmware version: 0x%08x\n",
1252 fw_info
.feature
, fw_info
.ver
);
1255 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_CE
;
1256 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1259 seq_printf(m
, "CE feature version: %u, firmware version: 0x%08x\n",
1260 fw_info
.feature
, fw_info
.ver
);
1263 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC
;
1264 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1267 seq_printf(m
, "RLC feature version: %u, firmware version: 0x%08x\n",
1268 fw_info
.feature
, fw_info
.ver
);
1270 /* RLC SAVE RESTORE LIST CNTL */
1271 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL
;
1272 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1275 seq_printf(m
, "RLC SRLC feature version: %u, firmware version: 0x%08x\n",
1276 fw_info
.feature
, fw_info
.ver
);
1278 /* RLC SAVE RESTORE LIST GPM MEM */
1279 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM
;
1280 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1283 seq_printf(m
, "RLC SRLG feature version: %u, firmware version: 0x%08x\n",
1284 fw_info
.feature
, fw_info
.ver
);
1286 /* RLC SAVE RESTORE LIST SRM MEM */
1287 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM
;
1288 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1291 seq_printf(m
, "RLC SRLS feature version: %u, firmware version: 0x%08x\n",
1292 fw_info
.feature
, fw_info
.ver
);
1295 query_fw
.fw_type
= AMDGPU_INFO_FW_GFX_MEC
;
1297 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1300 seq_printf(m
, "MEC feature version: %u, firmware version: 0x%08x\n",
1301 fw_info
.feature
, fw_info
.ver
);
1304 if (adev
->asic_type
== CHIP_KAVERI
||
1305 (adev
->asic_type
> CHIP_TOPAZ
&& adev
->asic_type
!= CHIP_STONEY
)) {
1307 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1310 seq_printf(m
, "MEC2 feature version: %u, firmware version: 0x%08x\n",
1311 fw_info
.feature
, fw_info
.ver
);
1315 query_fw
.fw_type
= AMDGPU_INFO_FW_SOS
;
1316 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1319 seq_printf(m
, "SOS feature version: %u, firmware version: 0x%08x\n",
1320 fw_info
.feature
, fw_info
.ver
);
1324 query_fw
.fw_type
= AMDGPU_INFO_FW_ASD
;
1325 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1328 seq_printf(m
, "ASD feature version: %u, firmware version: 0x%08x\n",
1329 fw_info
.feature
, fw_info
.ver
);
1332 query_fw
.fw_type
= AMDGPU_INFO_FW_SMC
;
1333 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1336 seq_printf(m
, "SMC feature version: %u, firmware version: 0x%08x\n",
1337 fw_info
.feature
, fw_info
.ver
);
1340 query_fw
.fw_type
= AMDGPU_INFO_FW_SDMA
;
1341 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1343 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1346 seq_printf(m
, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
1347 i
, fw_info
.feature
, fw_info
.ver
);
1351 query_fw
.fw_type
= AMDGPU_INFO_FW_VCN
;
1352 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1355 seq_printf(m
, "VCN feature version: %u, firmware version: 0x%08x\n",
1356 fw_info
.feature
, fw_info
.ver
);
1359 query_fw
.fw_type
= AMDGPU_INFO_FW_DMCU
;
1360 ret
= amdgpu_firmware_info(&fw_info
, &query_fw
, adev
);
1363 seq_printf(m
, "DMCU feature version: %u, firmware version: 0x%08x\n",
1364 fw_info
.feature
, fw_info
.ver
);
1367 seq_printf(m
, "VBIOS version: %s\n", ctx
->vbios_version
);
1372 static const struct drm_info_list amdgpu_firmware_info_list
[] = {
1373 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info
, 0, NULL
},
1377 int amdgpu_debugfs_firmware_init(struct amdgpu_device
*adev
)
1379 #if defined(CONFIG_DEBUG_FS)
1380 return amdgpu_debugfs_add_files(adev
, amdgpu_firmware_info_list
,
1381 ARRAY_SIZE(amdgpu_firmware_info_list
));