2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
29 #include <linux/module.h>
34 #include "amdgpu_pm.h"
35 #include "amdgpu_vce.h"
38 /* 1 second timeout */
39 #define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42 #ifdef CONFIG_DRM_AMDGPU_CIK
43 #define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
44 #define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
45 #define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
46 #define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
47 #define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
49 #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
50 #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
51 #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
52 #define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
53 #define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
54 #define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
55 #define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
56 #define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
58 #define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
59 #define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
60 #define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
62 #ifdef CONFIG_DRM_AMDGPU_CIK
63 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
64 MODULE_FIRMWARE(FIRMWARE_KABINI
);
65 MODULE_FIRMWARE(FIRMWARE_KAVERI
);
66 MODULE_FIRMWARE(FIRMWARE_HAWAII
);
67 MODULE_FIRMWARE(FIRMWARE_MULLINS
);
69 MODULE_FIRMWARE(FIRMWARE_TONGA
);
70 MODULE_FIRMWARE(FIRMWARE_CARRIZO
);
71 MODULE_FIRMWARE(FIRMWARE_FIJI
);
72 MODULE_FIRMWARE(FIRMWARE_STONEY
);
73 MODULE_FIRMWARE(FIRMWARE_POLARIS10
);
74 MODULE_FIRMWARE(FIRMWARE_POLARIS11
);
75 MODULE_FIRMWARE(FIRMWARE_POLARIS12
);
76 MODULE_FIRMWARE(FIRMWARE_VEGAM
);
78 MODULE_FIRMWARE(FIRMWARE_VEGA10
);
79 MODULE_FIRMWARE(FIRMWARE_VEGA12
);
80 MODULE_FIRMWARE(FIRMWARE_VEGA20
);
82 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
);
85 * amdgpu_vce_init - allocate memory, load vce firmware
87 * @adev: amdgpu_device pointer
89 * First step to get VCE online, allocate memory and load the firmware
91 int amdgpu_vce_sw_init(struct amdgpu_device
*adev
, unsigned long size
)
94 const struct common_firmware_header
*hdr
;
95 unsigned ucode_version
, version_major
, version_minor
, binary_id
;
98 switch (adev
->asic_type
) {
99 #ifdef CONFIG_DRM_AMDGPU_CIK
101 fw_name
= FIRMWARE_BONAIRE
;
104 fw_name
= FIRMWARE_KAVERI
;
107 fw_name
= FIRMWARE_KABINI
;
110 fw_name
= FIRMWARE_HAWAII
;
113 fw_name
= FIRMWARE_MULLINS
;
117 fw_name
= FIRMWARE_TONGA
;
120 fw_name
= FIRMWARE_CARRIZO
;
123 fw_name
= FIRMWARE_FIJI
;
126 fw_name
= FIRMWARE_STONEY
;
129 fw_name
= FIRMWARE_POLARIS10
;
132 fw_name
= FIRMWARE_POLARIS11
;
135 fw_name
= FIRMWARE_POLARIS12
;
138 fw_name
= FIRMWARE_VEGAM
;
141 fw_name
= FIRMWARE_VEGA10
;
144 fw_name
= FIRMWARE_VEGA12
;
147 fw_name
= FIRMWARE_VEGA20
;
154 r
= request_firmware(&adev
->vce
.fw
, fw_name
, adev
->dev
);
156 dev_err(adev
->dev
, "amdgpu_vce: Can't load firmware \"%s\"\n",
161 r
= amdgpu_ucode_validate(adev
->vce
.fw
);
163 dev_err(adev
->dev
, "amdgpu_vce: Can't validate firmware \"%s\"\n",
165 release_firmware(adev
->vce
.fw
);
170 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
172 ucode_version
= le32_to_cpu(hdr
->ucode_version
);
173 version_major
= (ucode_version
>> 20) & 0xfff;
174 version_minor
= (ucode_version
>> 8) & 0xfff;
175 binary_id
= ucode_version
& 0xff;
176 DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
177 version_major
, version_minor
, binary_id
);
178 adev
->vce
.fw_version
= ((version_major
<< 24) | (version_minor
<< 16) |
181 r
= amdgpu_bo_create_kernel(adev
, size
, PAGE_SIZE
,
182 AMDGPU_GEM_DOMAIN_VRAM
, &adev
->vce
.vcpu_bo
,
183 &adev
->vce
.gpu_addr
, &adev
->vce
.cpu_addr
);
185 dev_err(adev
->dev
, "(%d) failed to allocate VCE bo\n", r
);
189 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
190 atomic_set(&adev
->vce
.handles
[i
], 0);
191 adev
->vce
.filp
[i
] = NULL
;
194 INIT_DELAYED_WORK(&adev
->vce
.idle_work
, amdgpu_vce_idle_work_handler
);
195 mutex_init(&adev
->vce
.idle_mutex
);
201 * amdgpu_vce_fini - free memory
203 * @adev: amdgpu_device pointer
205 * Last step on VCE teardown, free firmware memory
207 int amdgpu_vce_sw_fini(struct amdgpu_device
*adev
)
211 if (adev
->vce
.vcpu_bo
== NULL
)
214 drm_sched_entity_destroy(&adev
->vce
.entity
);
216 amdgpu_bo_free_kernel(&adev
->vce
.vcpu_bo
, &adev
->vce
.gpu_addr
,
217 (void **)&adev
->vce
.cpu_addr
);
219 for (i
= 0; i
< adev
->vce
.num_rings
; i
++)
220 amdgpu_ring_fini(&adev
->vce
.ring
[i
]);
222 release_firmware(adev
->vce
.fw
);
223 mutex_destroy(&adev
->vce
.idle_mutex
);
229 * amdgpu_vce_entity_init - init entity
231 * @adev: amdgpu_device pointer
234 int amdgpu_vce_entity_init(struct amdgpu_device
*adev
)
236 struct amdgpu_ring
*ring
;
237 struct drm_sched_rq
*rq
;
240 ring
= &adev
->vce
.ring
[0];
241 rq
= &ring
->sched
.sched_rq
[DRM_SCHED_PRIORITY_NORMAL
];
242 r
= drm_sched_entity_init(&adev
->vce
.entity
, &rq
, 1, NULL
);
244 DRM_ERROR("Failed setting up VCE run queue.\n");
252 * amdgpu_vce_suspend - unpin VCE fw memory
254 * @adev: amdgpu_device pointer
257 int amdgpu_vce_suspend(struct amdgpu_device
*adev
)
261 cancel_delayed_work_sync(&adev
->vce
.idle_work
);
263 if (adev
->vce
.vcpu_bo
== NULL
)
266 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
267 if (atomic_read(&adev
->vce
.handles
[i
]))
270 if (i
== AMDGPU_MAX_VCE_HANDLES
)
273 /* TODO: suspending running encoding sessions isn't supported */
278 * amdgpu_vce_resume - pin VCE fw memory
280 * @adev: amdgpu_device pointer
283 int amdgpu_vce_resume(struct amdgpu_device
*adev
)
286 const struct common_firmware_header
*hdr
;
290 if (adev
->vce
.vcpu_bo
== NULL
)
293 r
= amdgpu_bo_reserve(adev
->vce
.vcpu_bo
, false);
295 dev_err(adev
->dev
, "(%d) failed to reserve VCE bo\n", r
);
299 r
= amdgpu_bo_kmap(adev
->vce
.vcpu_bo
, &cpu_addr
);
301 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
302 dev_err(adev
->dev
, "(%d) VCE map failed\n", r
);
306 hdr
= (const struct common_firmware_header
*)adev
->vce
.fw
->data
;
307 offset
= le32_to_cpu(hdr
->ucode_array_offset_bytes
);
308 memcpy_toio(cpu_addr
, adev
->vce
.fw
->data
+ offset
,
309 adev
->vce
.fw
->size
- offset
);
311 amdgpu_bo_kunmap(adev
->vce
.vcpu_bo
);
313 amdgpu_bo_unreserve(adev
->vce
.vcpu_bo
);
319 * amdgpu_vce_idle_work_handler - power off VCE
321 * @work: pointer to work structure
323 * power of VCE when it's not used any more
325 static void amdgpu_vce_idle_work_handler(struct work_struct
*work
)
327 struct amdgpu_device
*adev
=
328 container_of(work
, struct amdgpu_device
, vce
.idle_work
.work
);
329 unsigned i
, count
= 0;
331 for (i
= 0; i
< adev
->vce
.num_rings
; i
++)
332 count
+= amdgpu_fence_count_emitted(&adev
->vce
.ring
[i
]);
335 if (adev
->pm
.dpm_enabled
) {
336 amdgpu_dpm_enable_vce(adev
, false);
338 amdgpu_asic_set_vce_clocks(adev
, 0, 0);
339 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
341 amdgpu_device_ip_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
345 schedule_delayed_work(&adev
->vce
.idle_work
, VCE_IDLE_TIMEOUT
);
350 * amdgpu_vce_ring_begin_use - power up VCE
354 * Make sure VCE is powerd up when we want to use it
356 void amdgpu_vce_ring_begin_use(struct amdgpu_ring
*ring
)
358 struct amdgpu_device
*adev
= ring
->adev
;
361 if (amdgpu_sriov_vf(adev
))
364 mutex_lock(&adev
->vce
.idle_mutex
);
365 set_clocks
= !cancel_delayed_work_sync(&adev
->vce
.idle_work
);
367 if (adev
->pm
.dpm_enabled
) {
368 amdgpu_dpm_enable_vce(adev
, true);
370 amdgpu_asic_set_vce_clocks(adev
, 53300, 40000);
371 amdgpu_device_ip_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
372 AMD_CG_STATE_UNGATE
);
373 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
374 AMD_PG_STATE_UNGATE
);
378 mutex_unlock(&adev
->vce
.idle_mutex
);
382 * amdgpu_vce_ring_end_use - power VCE down
386 * Schedule work to power VCE down again
388 void amdgpu_vce_ring_end_use(struct amdgpu_ring
*ring
)
390 if (!amdgpu_sriov_vf(ring
->adev
))
391 schedule_delayed_work(&ring
->adev
->vce
.idle_work
, VCE_IDLE_TIMEOUT
);
395 * amdgpu_vce_free_handles - free still open VCE handles
397 * @adev: amdgpu_device pointer
398 * @filp: drm file pointer
400 * Close all VCE handles still open by this file pointer
402 void amdgpu_vce_free_handles(struct amdgpu_device
*adev
, struct drm_file
*filp
)
404 struct amdgpu_ring
*ring
= &adev
->vce
.ring
[0];
406 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
407 uint32_t handle
= atomic_read(&adev
->vce
.handles
[i
]);
409 if (!handle
|| adev
->vce
.filp
[i
] != filp
)
412 r
= amdgpu_vce_get_destroy_msg(ring
, handle
, false, NULL
);
414 DRM_ERROR("Error destroying VCE handle (%d)!\n", r
);
416 adev
->vce
.filp
[i
] = NULL
;
417 atomic_set(&adev
->vce
.handles
[i
], 0);
422 * amdgpu_vce_get_create_msg - generate a VCE create msg
424 * @adev: amdgpu_device pointer
425 * @ring: ring we should submit the msg to
426 * @handle: VCE session handle to use
427 * @fence: optional fence to return
429 * Open up a stream for HW test
431 int amdgpu_vce_get_create_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
432 struct dma_fence
**fence
)
434 const unsigned ib_size_dw
= 1024;
435 struct amdgpu_job
*job
;
436 struct amdgpu_ib
*ib
;
437 struct dma_fence
*f
= NULL
;
441 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
447 dummy
= ib
->gpu_addr
+ 1024;
449 /* stitch together an VCE create msg */
451 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
452 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
453 ib
->ptr
[ib
->length_dw
++] = handle
;
455 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52)
456 ib
->ptr
[ib
->length_dw
++] = 0x00000040; /* len */
458 ib
->ptr
[ib
->length_dw
++] = 0x00000030; /* len */
459 ib
->ptr
[ib
->length_dw
++] = 0x01000001; /* create cmd */
460 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
461 ib
->ptr
[ib
->length_dw
++] = 0x00000042;
462 ib
->ptr
[ib
->length_dw
++] = 0x0000000a;
463 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
464 ib
->ptr
[ib
->length_dw
++] = 0x00000080;
465 ib
->ptr
[ib
->length_dw
++] = 0x00000060;
466 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
467 ib
->ptr
[ib
->length_dw
++] = 0x00000100;
468 ib
->ptr
[ib
->length_dw
++] = 0x0000000c;
469 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
470 if ((ring
->adev
->vce
.fw_version
>> 24) >= 52) {
471 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
472 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
473 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
474 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
477 ib
->ptr
[ib
->length_dw
++] = 0x00000014; /* len */
478 ib
->ptr
[ib
->length_dw
++] = 0x05000005; /* feedback buffer */
479 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dummy
);
480 ib
->ptr
[ib
->length_dw
++] = dummy
;
481 ib
->ptr
[ib
->length_dw
++] = 0x00000001;
483 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
486 r
= amdgpu_job_submit_direct(job
, ring
, &f
);
491 *fence
= dma_fence_get(f
);
496 amdgpu_job_free(job
);
501 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
503 * @adev: amdgpu_device pointer
504 * @ring: ring we should submit the msg to
505 * @handle: VCE session handle to use
506 * @fence: optional fence to return
508 * Close up a stream for HW test or if userspace failed to do so
510 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring
*ring
, uint32_t handle
,
511 bool direct
, struct dma_fence
**fence
)
513 const unsigned ib_size_dw
= 1024;
514 struct amdgpu_job
*job
;
515 struct amdgpu_ib
*ib
;
516 struct dma_fence
*f
= NULL
;
519 r
= amdgpu_job_alloc_with_ib(ring
->adev
, ib_size_dw
* 4, &job
);
525 /* stitch together an VCE destroy msg */
527 ib
->ptr
[ib
->length_dw
++] = 0x0000000c; /* len */
528 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* session cmd */
529 ib
->ptr
[ib
->length_dw
++] = handle
;
531 ib
->ptr
[ib
->length_dw
++] = 0x00000020; /* len */
532 ib
->ptr
[ib
->length_dw
++] = 0x00000002; /* task info */
533 ib
->ptr
[ib
->length_dw
++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
534 ib
->ptr
[ib
->length_dw
++] = 0x00000001; /* destroy session */
535 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
536 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
537 ib
->ptr
[ib
->length_dw
++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
538 ib
->ptr
[ib
->length_dw
++] = 0x00000000;
540 ib
->ptr
[ib
->length_dw
++] = 0x00000008; /* len */
541 ib
->ptr
[ib
->length_dw
++] = 0x02000001; /* destroy cmd */
543 for (i
= ib
->length_dw
; i
< ib_size_dw
; ++i
)
547 r
= amdgpu_job_submit_direct(job
, ring
, &f
);
549 r
= amdgpu_job_submit(job
, &ring
->adev
->vce
.entity
,
550 AMDGPU_FENCE_OWNER_UNDEFINED
, &f
);
555 *fence
= dma_fence_get(f
);
560 amdgpu_job_free(job
);
565 * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
568 * @lo: address of lower dword
569 * @hi: address of higher dword
570 * @size: minimum size
571 * @index: bs/fb index
573 * Make sure that no BO cross a 4GB boundary.
575 static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
,
576 int lo
, int hi
, unsigned size
, int32_t index
)
578 int64_t offset
= ((uint64_t)size
) * ((int64_t)index
);
579 struct ttm_operation_ctx ctx
= { false, false };
580 struct amdgpu_bo_va_mapping
*mapping
;
581 unsigned i
, fpfn
, lpfn
;
582 struct amdgpu_bo
*bo
;
586 addr
= ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, lo
)) |
587 ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, hi
)) << 32;
590 fpfn
= PAGE_ALIGN(offset
) >> PAGE_SHIFT
;
591 lpfn
= 0x100000000ULL
>> PAGE_SHIFT
;
594 lpfn
= (0x100000000ULL
- PAGE_ALIGN(offset
)) >> PAGE_SHIFT
;
597 r
= amdgpu_cs_find_mapping(p
, addr
, &bo
, &mapping
);
599 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
600 addr
, lo
, hi
, size
, index
);
604 for (i
= 0; i
< bo
->placement
.num_placement
; ++i
) {
605 bo
->placements
[i
].fpfn
= max(bo
->placements
[i
].fpfn
, fpfn
);
606 bo
->placements
[i
].lpfn
= bo
->placements
[i
].lpfn
?
607 min(bo
->placements
[i
].lpfn
, lpfn
) : lpfn
;
609 return ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
614 * amdgpu_vce_cs_reloc - command submission relocation
617 * @lo: address of lower dword
618 * @hi: address of higher dword
619 * @size: minimum size
621 * Patch relocation inside command stream with real buffer address
623 static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
,
624 int lo
, int hi
, unsigned size
, uint32_t index
)
626 struct amdgpu_bo_va_mapping
*mapping
;
627 struct amdgpu_bo
*bo
;
631 if (index
== 0xffffffff)
634 addr
= ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, lo
)) |
635 ((uint64_t)amdgpu_get_ib_value(p
, ib_idx
, hi
)) << 32;
636 addr
+= ((uint64_t)size
) * ((uint64_t)index
);
638 r
= amdgpu_cs_find_mapping(p
, addr
, &bo
, &mapping
);
640 DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
641 addr
, lo
, hi
, size
, index
);
645 if ((addr
+ (uint64_t)size
) >
646 (mapping
->last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
647 DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
652 addr
-= mapping
->start
* AMDGPU_GPU_PAGE_SIZE
;
653 addr
+= amdgpu_bo_gpu_offset(bo
);
654 addr
-= ((uint64_t)size
) * ((uint64_t)index
);
656 amdgpu_set_ib_value(p
, ib_idx
, lo
, lower_32_bits(addr
));
657 amdgpu_set_ib_value(p
, ib_idx
, hi
, upper_32_bits(addr
));
663 * amdgpu_vce_validate_handle - validate stream handle
666 * @handle: handle to validate
667 * @allocated: allocated a new handle?
669 * Validates the handle and return the found session index or -EINVAL
670 * we we don't have another free session index.
672 static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser
*p
,
673 uint32_t handle
, uint32_t *allocated
)
677 /* validate the handle */
678 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
679 if (atomic_read(&p
->adev
->vce
.handles
[i
]) == handle
) {
680 if (p
->adev
->vce
.filp
[i
] != p
->filp
) {
681 DRM_ERROR("VCE handle collision detected!\n");
688 /* handle not found try to alloc a new one */
689 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
) {
690 if (!atomic_cmpxchg(&p
->adev
->vce
.handles
[i
], 0, handle
)) {
691 p
->adev
->vce
.filp
[i
] = p
->filp
;
692 p
->adev
->vce
.img_size
[i
] = 0;
693 *allocated
|= 1 << i
;
698 DRM_ERROR("No more free VCE handles!\n");
703 * amdgpu_vce_cs_parse - parse and validate the command stream
708 int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
)
710 struct amdgpu_ib
*ib
= &p
->job
->ibs
[ib_idx
];
711 unsigned fb_idx
= 0, bs_idx
= 0;
712 int session_idx
= -1;
713 uint32_t destroyed
= 0;
714 uint32_t created
= 0;
715 uint32_t allocated
= 0;
716 uint32_t tmp
, handle
= 0;
717 uint32_t *size
= &tmp
;
722 ib
->gpu_addr
= amdgpu_sa_bo_gpu_addr(ib
->sa_bo
);
724 for (idx
= 0; idx
< ib
->length_dw
;) {
725 uint32_t len
= amdgpu_get_ib_value(p
, ib_idx
, idx
);
726 uint32_t cmd
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 1);
728 if ((len
< 8) || (len
& 3)) {
729 DRM_ERROR("invalid VCE command length (%d)!\n", len
);
735 case 0x00000002: /* task info */
736 fb_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 6);
737 bs_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 7);
740 case 0x03000001: /* encode */
741 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 10,
746 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 12,
752 case 0x05000001: /* context buffer */
753 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 3,
759 case 0x05000004: /* video bitstream buffer */
760 tmp
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 4);
761 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 3, idx
+ 2,
767 case 0x05000005: /* feedback buffer */
768 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 3, idx
+ 2,
774 case 0x0500000d: /* MV buffer */
775 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 3,
780 r
= amdgpu_vce_validate_bo(p
, ib_idx
, idx
+ 8,
790 for (idx
= 0; idx
< ib
->length_dw
;) {
791 uint32_t len
= amdgpu_get_ib_value(p
, ib_idx
, idx
);
792 uint32_t cmd
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 1);
795 case 0x00000001: /* session */
796 handle
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 2);
797 session_idx
= amdgpu_vce_validate_handle(p
, handle
,
799 if (session_idx
< 0) {
803 size
= &p
->adev
->vce
.img_size
[session_idx
];
806 case 0x00000002: /* task info */
807 fb_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 6);
808 bs_idx
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 7);
811 case 0x01000001: /* create */
812 created
|= 1 << session_idx
;
813 if (destroyed
& (1 << session_idx
)) {
814 destroyed
&= ~(1 << session_idx
);
815 allocated
|= 1 << session_idx
;
817 } else if (!(allocated
& (1 << session_idx
))) {
818 DRM_ERROR("Handle already in use!\n");
823 *size
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 8) *
824 amdgpu_get_ib_value(p
, ib_idx
, idx
+ 10) *
828 case 0x04000001: /* config extension */
829 case 0x04000002: /* pic control */
830 case 0x04000005: /* rate control */
831 case 0x04000007: /* motion estimation */
832 case 0x04000008: /* rdo */
833 case 0x04000009: /* vui */
834 case 0x05000002: /* auxiliary buffer */
835 case 0x05000009: /* clock table */
838 case 0x0500000c: /* hw config */
839 switch (p
->adev
->asic_type
) {
840 #ifdef CONFIG_DRM_AMDGPU_CIK
852 case 0x03000001: /* encode */
853 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 10, idx
+ 9,
858 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 12, idx
+ 11,
864 case 0x02000001: /* destroy */
865 destroyed
|= 1 << session_idx
;
868 case 0x05000001: /* context buffer */
869 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
875 case 0x05000004: /* video bitstream buffer */
876 tmp
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 4);
877 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
883 case 0x05000005: /* feedback buffer */
884 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3, idx
+ 2,
890 case 0x0500000d: /* MV buffer */
891 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 3,
896 r
= amdgpu_vce_cs_reloc(p
, ib_idx
, idx
+ 8,
897 idx
+ 7, *size
/ 12, 0);
903 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd
);
908 if (session_idx
== -1) {
909 DRM_ERROR("no session command at start of IB\n");
917 if (allocated
& ~created
) {
918 DRM_ERROR("New session without create command!\n");
924 /* No error, free all destroyed handle slots */
927 /* Error during parsing, free all allocated handle slots */
931 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
933 atomic_set(&p
->adev
->vce
.handles
[i
], 0);
939 * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
944 int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser
*p
, uint32_t ib_idx
)
946 struct amdgpu_ib
*ib
= &p
->job
->ibs
[ib_idx
];
947 int session_idx
= -1;
948 uint32_t destroyed
= 0;
949 uint32_t created
= 0;
950 uint32_t allocated
= 0;
951 uint32_t tmp
, handle
= 0;
952 int i
, r
= 0, idx
= 0;
954 while (idx
< ib
->length_dw
) {
955 uint32_t len
= amdgpu_get_ib_value(p
, ib_idx
, idx
);
956 uint32_t cmd
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 1);
958 if ((len
< 8) || (len
& 3)) {
959 DRM_ERROR("invalid VCE command length (%d)!\n", len
);
965 case 0x00000001: /* session */
966 handle
= amdgpu_get_ib_value(p
, ib_idx
, idx
+ 2);
967 session_idx
= amdgpu_vce_validate_handle(p
, handle
,
969 if (session_idx
< 0) {
975 case 0x01000001: /* create */
976 created
|= 1 << session_idx
;
977 if (destroyed
& (1 << session_idx
)) {
978 destroyed
&= ~(1 << session_idx
);
979 allocated
|= 1 << session_idx
;
981 } else if (!(allocated
& (1 << session_idx
))) {
982 DRM_ERROR("Handle already in use!\n");
989 case 0x02000001: /* destroy */
990 destroyed
|= 1 << session_idx
;
997 if (session_idx
== -1) {
998 DRM_ERROR("no session command at start of IB\n");
1006 if (allocated
& ~created
) {
1007 DRM_ERROR("New session without create command!\n");
1013 /* No error, free all destroyed handle slots */
1015 amdgpu_ib_free(p
->adev
, ib
, NULL
);
1017 /* Error during parsing, free all allocated handle slots */
1021 for (i
= 0; i
< AMDGPU_MAX_VCE_HANDLES
; ++i
)
1023 atomic_set(&p
->adev
->vce
.handles
[i
], 0);
1029 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1031 * @ring: engine to use
1032 * @ib: the IB to execute
1035 void amdgpu_vce_ring_emit_ib(struct amdgpu_ring
*ring
,
1036 struct amdgpu_job
*job
,
1037 struct amdgpu_ib
*ib
,
1040 amdgpu_ring_write(ring
, VCE_CMD_IB
);
1041 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
1042 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
1043 amdgpu_ring_write(ring
, ib
->length_dw
);
1047 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1049 * @ring: engine to use
1053 void amdgpu_vce_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
1056 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
1058 amdgpu_ring_write(ring
, VCE_CMD_FENCE
);
1059 amdgpu_ring_write(ring
, addr
);
1060 amdgpu_ring_write(ring
, upper_32_bits(addr
));
1061 amdgpu_ring_write(ring
, seq
);
1062 amdgpu_ring_write(ring
, VCE_CMD_TRAP
);
1063 amdgpu_ring_write(ring
, VCE_CMD_END
);
1067 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1069 * @ring: the engine to test on
1072 int amdgpu_vce_ring_test_ring(struct amdgpu_ring
*ring
)
1074 struct amdgpu_device
*adev
= ring
->adev
;
1075 uint32_t rptr
= amdgpu_ring_get_rptr(ring
);
1077 int r
, timeout
= adev
->usec_timeout
;
1079 /* skip ring test for sriov*/
1080 if (amdgpu_sriov_vf(adev
))
1083 r
= amdgpu_ring_alloc(ring
, 16);
1087 amdgpu_ring_write(ring
, VCE_CMD_END
);
1088 amdgpu_ring_commit(ring
);
1090 for (i
= 0; i
< timeout
; i
++) {
1091 if (amdgpu_ring_get_rptr(ring
) != rptr
)
1103 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1105 * @ring: the engine to test on
1108 int amdgpu_vce_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
1110 struct dma_fence
*fence
= NULL
;
1113 /* skip vce ring1/2 ib test for now, since it's not reliable */
1114 if (ring
!= &ring
->adev
->vce
.ring
[0])
1117 r
= amdgpu_vce_get_create_msg(ring
, 1, NULL
);
1121 r
= amdgpu_vce_get_destroy_msg(ring
, 1, true, &fence
);
1125 r
= dma_fence_wait_timeout(fence
, false, timeout
);
1132 dma_fence_put(fence
);