2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS 1000
43 #define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44 #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45 #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46 #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47 #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
49 MODULE_FIRMWARE(FIRMWARE_RV710
);
50 MODULE_FIRMWARE(FIRMWARE_CYPRESS
);
51 MODULE_FIRMWARE(FIRMWARE_SUMO
);
52 MODULE_FIRMWARE(FIRMWARE_TAHITI
);
53 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
55 static void radeon_uvd_idle_work_handler(struct work_struct
*work
);
57 int radeon_uvd_init(struct radeon_device
*rdev
)
59 unsigned long bo_size
;
63 INIT_DELAYED_WORK(&rdev
->uvd
.idle_work
, radeon_uvd_idle_work_handler
);
65 switch (rdev
->family
) {
69 fw_name
= FIRMWARE_RV710
;
77 fw_name
= FIRMWARE_CYPRESS
;
87 fw_name
= FIRMWARE_SUMO
;
95 fw_name
= FIRMWARE_TAHITI
;
101 fw_name
= FIRMWARE_BONAIRE
;
108 r
= request_firmware(&rdev
->uvd_fw
, fw_name
, rdev
->dev
);
110 dev_err(rdev
->dev
, "radeon_uvd: Can't load firmware \"%s\"\n",
115 bo_size
= RADEON_GPU_PAGE_ALIGN(rdev
->uvd_fw
->size
+ 8) +
116 RADEON_UVD_STACK_SIZE
+ RADEON_UVD_HEAP_SIZE
;
117 r
= radeon_bo_create(rdev
, bo_size
, PAGE_SIZE
, true,
118 RADEON_GEM_DOMAIN_VRAM
, NULL
, &rdev
->uvd
.vcpu_bo
);
120 dev_err(rdev
->dev
, "(%d) failed to allocate UVD bo\n", r
);
124 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
126 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
127 dev_err(rdev
->dev
, "(%d) failed to reserve UVD bo\n", r
);
131 r
= radeon_bo_pin(rdev
->uvd
.vcpu_bo
, RADEON_GEM_DOMAIN_VRAM
,
132 &rdev
->uvd
.gpu_addr
);
134 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
135 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
136 dev_err(rdev
->dev
, "(%d) UVD bo pin failed\n", r
);
140 r
= radeon_bo_kmap(rdev
->uvd
.vcpu_bo
, &rdev
->uvd
.cpu_addr
);
142 dev_err(rdev
->dev
, "(%d) UVD map failed\n", r
);
146 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
148 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
149 atomic_set(&rdev
->uvd
.handles
[i
], 0);
150 rdev
->uvd
.filp
[i
] = NULL
;
151 rdev
->uvd
.img_size
[i
] = 0;
157 void radeon_uvd_fini(struct radeon_device
*rdev
)
161 if (rdev
->uvd
.vcpu_bo
== NULL
)
164 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
166 radeon_bo_kunmap(rdev
->uvd
.vcpu_bo
);
167 radeon_bo_unpin(rdev
->uvd
.vcpu_bo
);
168 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
171 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
173 radeon_ring_fini(rdev
, &rdev
->ring
[R600_RING_TYPE_UVD_INDEX
]);
175 release_firmware(rdev
->uvd_fw
);
178 int radeon_uvd_suspend(struct radeon_device
*rdev
)
184 if (rdev
->uvd
.vcpu_bo
== NULL
)
187 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
)
188 if (atomic_read(&rdev
->uvd
.handles
[i
]))
191 if (i
== RADEON_MAX_UVD_HANDLES
)
194 size
= radeon_bo_size(rdev
->uvd
.vcpu_bo
);
195 size
-= rdev
->uvd_fw
->size
;
197 ptr
= rdev
->uvd
.cpu_addr
;
198 ptr
+= rdev
->uvd_fw
->size
;
200 rdev
->uvd
.saved_bo
= kmalloc(size
, GFP_KERNEL
);
201 memcpy(rdev
->uvd
.saved_bo
, ptr
, size
);
206 int radeon_uvd_resume(struct radeon_device
*rdev
)
211 if (rdev
->uvd
.vcpu_bo
== NULL
)
214 memcpy(rdev
->uvd
.cpu_addr
, rdev
->uvd_fw
->data
, rdev
->uvd_fw
->size
);
216 size
= radeon_bo_size(rdev
->uvd
.vcpu_bo
);
217 size
-= rdev
->uvd_fw
->size
;
219 ptr
= rdev
->uvd
.cpu_addr
;
220 ptr
+= rdev
->uvd_fw
->size
;
222 if (rdev
->uvd
.saved_bo
!= NULL
) {
223 memcpy(ptr
, rdev
->uvd
.saved_bo
, size
);
224 kfree(rdev
->uvd
.saved_bo
);
225 rdev
->uvd
.saved_bo
= NULL
;
227 memset(ptr
, 0, size
);
232 void radeon_uvd_force_into_uvd_segment(struct radeon_bo
*rbo
)
234 rbo
->placement
.fpfn
= 0 >> PAGE_SHIFT
;
235 rbo
->placement
.lpfn
= (256 * 1024 * 1024) >> PAGE_SHIFT
;
238 void radeon_uvd_free_handles(struct radeon_device
*rdev
, struct drm_file
*filp
)
241 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
242 uint32_t handle
= atomic_read(&rdev
->uvd
.handles
[i
]);
243 if (handle
!= 0 && rdev
->uvd
.filp
[i
] == filp
) {
244 struct radeon_fence
*fence
;
246 radeon_uvd_note_usage(rdev
);
248 r
= radeon_uvd_get_destroy_msg(rdev
,
249 R600_RING_TYPE_UVD_INDEX
, handle
, &fence
);
251 DRM_ERROR("Error destroying UVD (%d)!\n", r
);
255 radeon_fence_wait(fence
, false);
256 radeon_fence_unref(&fence
);
258 rdev
->uvd
.filp
[i
] = NULL
;
259 atomic_set(&rdev
->uvd
.handles
[i
], 0);
264 static int radeon_uvd_cs_msg_decode(uint32_t *msg
, unsigned buf_sizes
[])
266 unsigned stream_type
= msg
[4];
267 unsigned width
= msg
[6];
268 unsigned height
= msg
[7];
269 unsigned dpb_size
= msg
[9];
270 unsigned pitch
= msg
[28];
272 unsigned width_in_mb
= width
/ 16;
273 unsigned height_in_mb
= ALIGN(height
/ 16, 2);
275 unsigned image_size
, tmp
, min_dpb_size
;
277 image_size
= width
* height
;
278 image_size
+= image_size
/ 2;
279 image_size
= ALIGN(image_size
, 1024);
281 switch (stream_type
) {
284 /* reference picture buffer */
285 min_dpb_size
= image_size
* 17;
287 /* macroblock context buffer */
288 min_dpb_size
+= width_in_mb
* height_in_mb
* 17 * 192;
290 /* IT surface buffer */
291 min_dpb_size
+= width_in_mb
* height_in_mb
* 32;
296 /* reference picture buffer */
297 min_dpb_size
= image_size
* 3;
300 min_dpb_size
+= width_in_mb
* height_in_mb
* 128;
302 /* IT surface buffer */
303 min_dpb_size
+= width_in_mb
* 64;
305 /* DB surface buffer */
306 min_dpb_size
+= width_in_mb
* 128;
309 tmp
= max(width_in_mb
, height_in_mb
);
310 min_dpb_size
+= ALIGN(tmp
* 7 * 16, 64);
315 /* reference picture buffer */
316 min_dpb_size
= image_size
* 3;
321 /* reference picture buffer */
322 min_dpb_size
= image_size
* 3;
325 min_dpb_size
+= width_in_mb
* height_in_mb
* 64;
327 /* IT surface buffer */
328 min_dpb_size
+= ALIGN(width_in_mb
* height_in_mb
* 32, 64);
332 DRM_ERROR("UVD codec not handled %d!\n", stream_type
);
337 DRM_ERROR("Invalid UVD decoding target pitch!\n");
341 if (dpb_size
< min_dpb_size
) {
342 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
343 dpb_size
, min_dpb_size
);
347 buf_sizes
[0x1] = dpb_size
;
348 buf_sizes
[0x2] = image_size
;
352 static int radeon_uvd_cs_msg(struct radeon_cs_parser
*p
, struct radeon_bo
*bo
,
353 unsigned offset
, unsigned buf_sizes
[])
355 int32_t *msg
, msg_type
, handle
;
356 unsigned img_size
= 0;
362 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
366 if (bo
->tbo
.sync_obj
) {
367 r
= radeon_fence_wait(bo
->tbo
.sync_obj
, false);
369 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r
);
374 r
= radeon_bo_kmap(bo
, &ptr
);
376 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r
);
386 DRM_ERROR("Invalid UVD handle!\n");
391 /* it's a decode msg, calc buffer sizes */
392 r
= radeon_uvd_cs_msg_decode(msg
, buf_sizes
);
393 /* calc image size (width * height) */
394 img_size
= msg
[6] * msg
[7];
395 radeon_bo_kunmap(bo
);
399 } else if (msg_type
== 2) {
400 /* it's a destroy msg, free the handle */
401 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
)
402 atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], handle
, 0);
403 radeon_bo_kunmap(bo
);
406 /* it's a create msg, calc image size (width * height) */
407 img_size
= msg
[7] * msg
[8];
408 radeon_bo_kunmap(bo
);
411 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type
);
415 /* it's a create msg, no special handling needed */
418 /* create or decode, validate the handle */
419 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
420 if (atomic_read(&p
->rdev
->uvd
.handles
[i
]) == handle
)
424 /* handle not found try to alloc a new one */
425 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
426 if (!atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], 0, handle
)) {
427 p
->rdev
->uvd
.filp
[i
] = p
->filp
;
428 p
->rdev
->uvd
.img_size
[i
] = img_size
;
433 DRM_ERROR("No more free UVD handles!\n");
437 static int radeon_uvd_cs_reloc(struct radeon_cs_parser
*p
,
438 int data0
, int data1
,
439 unsigned buf_sizes
[], bool *has_msg_cmd
)
441 struct radeon_cs_chunk
*relocs_chunk
;
442 struct radeon_cs_reloc
*reloc
;
443 unsigned idx
, cmd
, offset
;
447 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
448 offset
= radeon_get_ib_value(p
, data0
);
449 idx
= radeon_get_ib_value(p
, data1
);
450 if (idx
>= relocs_chunk
->length_dw
) {
451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
452 idx
, relocs_chunk
->length_dw
);
456 reloc
= p
->relocs_ptr
[(idx
/ 4)];
457 start
= reloc
->lobj
.gpu_offset
;
458 end
= start
+ radeon_bo_size(reloc
->robj
);
461 p
->ib
.ptr
[data0
] = start
& 0xFFFFFFFF;
462 p
->ib
.ptr
[data1
] = start
>> 32;
464 cmd
= radeon_get_ib_value(p
, p
->idx
) >> 1;
468 DRM_ERROR("invalid reloc offset %X!\n", offset
);
471 if ((end
- start
) < buf_sizes
[cmd
]) {
472 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd
,
473 (unsigned)(end
- start
), buf_sizes
[cmd
]);
477 } else if (cmd
!= 0x100) {
478 DRM_ERROR("invalid UVD command %X!\n", cmd
);
482 if ((start
>> 28) != ((end
- 1) >> 28)) {
483 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
488 /* TODO: is this still necessary on NI+ ? */
489 if ((cmd
== 0 || cmd
== 0x3) &&
490 (start
>> 28) != (p
->rdev
->uvd
.gpu_addr
>> 28)) {
491 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
498 DRM_ERROR("More than one message in a UVD-IB!\n");
502 r
= radeon_uvd_cs_msg(p
, reloc
->robj
, offset
, buf_sizes
);
505 } else if (!*has_msg_cmd
) {
506 DRM_ERROR("Message needed before other commands are send!\n");
513 static int radeon_uvd_cs_reg(struct radeon_cs_parser
*p
,
514 struct radeon_cs_packet
*pkt
,
515 int *data0
, int *data1
,
516 unsigned buf_sizes
[],
522 for (i
= 0; i
<= pkt
->count
; ++i
) {
523 switch (pkt
->reg
+ i
*4) {
524 case UVD_GPCOM_VCPU_DATA0
:
527 case UVD_GPCOM_VCPU_DATA1
:
530 case UVD_GPCOM_VCPU_CMD
:
531 r
= radeon_uvd_cs_reloc(p
, *data0
, *data1
,
532 buf_sizes
, has_msg_cmd
);
536 case UVD_ENGINE_CNTL
:
539 DRM_ERROR("Invalid reg 0x%X!\n",
548 int radeon_uvd_cs_parse(struct radeon_cs_parser
*p
)
550 struct radeon_cs_packet pkt
;
551 int r
, data0
= 0, data1
= 0;
553 /* does the IB has a msg command */
554 bool has_msg_cmd
= false;
556 /* minimum buffer sizes */
557 unsigned buf_sizes
[] = {
559 [0x00000001] = 32 * 1024 * 1024,
560 [0x00000002] = 2048 * 1152 * 3,
564 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
% 16) {
565 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
566 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
570 if (p
->chunk_relocs_idx
== -1) {
571 DRM_ERROR("No relocation chunk !\n");
577 r
= radeon_cs_packet_parse(p
, &pkt
, p
->idx
);
581 case RADEON_PACKET_TYPE0
:
582 r
= radeon_uvd_cs_reg(p
, &pkt
, &data0
, &data1
,
583 buf_sizes
, &has_msg_cmd
);
587 case RADEON_PACKET_TYPE2
:
588 p
->idx
+= pkt
.count
+ 2;
591 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
594 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
597 DRM_ERROR("UVD-IBs need a msg command!\n");
604 static int radeon_uvd_send_msg(struct radeon_device
*rdev
,
605 int ring
, struct radeon_bo
*bo
,
606 struct radeon_fence
**fence
)
608 struct ttm_validate_buffer tv
;
609 struct ww_acquire_ctx ticket
;
610 struct list_head head
;
615 memset(&tv
, 0, sizeof(tv
));
618 INIT_LIST_HEAD(&head
);
619 list_add(&tv
.head
, &head
);
621 r
= ttm_eu_reserve_buffers(&ticket
, &head
);
625 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_VRAM
);
626 radeon_uvd_force_into_uvd_segment(bo
);
628 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
632 r
= radeon_ib_get(rdev
, ring
, &ib
, NULL
, 64);
636 addr
= radeon_bo_gpu_offset(bo
);
637 ib
.ptr
[0] = PACKET0(UVD_GPCOM_VCPU_DATA0
, 0);
639 ib
.ptr
[2] = PACKET0(UVD_GPCOM_VCPU_DATA1
, 0);
640 ib
.ptr
[3] = addr
>> 32;
641 ib
.ptr
[4] = PACKET0(UVD_GPCOM_VCPU_CMD
, 0);
643 for (i
= 6; i
< 16; ++i
)
644 ib
.ptr
[i
] = PACKET2(0);
647 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
650 ttm_eu_fence_buffer_objects(&ticket
, &head
, ib
.fence
);
653 *fence
= radeon_fence_ref(ib
.fence
);
655 radeon_ib_free(rdev
, &ib
);
656 radeon_bo_unref(&bo
);
660 ttm_eu_backoff_reservation(&ticket
, &head
);
664 /* multiple fence commands without any stream commands in between can
665 crash the vcpu so just try to emmit a dummy create/destroy msg to
667 int radeon_uvd_get_create_msg(struct radeon_device
*rdev
, int ring
,
668 uint32_t handle
, struct radeon_fence
**fence
)
670 struct radeon_bo
*bo
;
674 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
675 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
679 r
= radeon_bo_reserve(bo
, false);
681 radeon_bo_unref(&bo
);
685 r
= radeon_bo_kmap(bo
, (void **)&msg
);
687 radeon_bo_unreserve(bo
);
688 radeon_bo_unref(&bo
);
692 /* stitch together an UVD create msg */
693 msg
[0] = cpu_to_le32(0x00000de4);
694 msg
[1] = cpu_to_le32(0x00000000);
695 msg
[2] = cpu_to_le32(handle
);
696 msg
[3] = cpu_to_le32(0x00000000);
697 msg
[4] = cpu_to_le32(0x00000000);
698 msg
[5] = cpu_to_le32(0x00000000);
699 msg
[6] = cpu_to_le32(0x00000000);
700 msg
[7] = cpu_to_le32(0x00000780);
701 msg
[8] = cpu_to_le32(0x00000440);
702 msg
[9] = cpu_to_le32(0x00000000);
703 msg
[10] = cpu_to_le32(0x01b37000);
704 for (i
= 11; i
< 1024; ++i
)
705 msg
[i
] = cpu_to_le32(0x0);
707 radeon_bo_kunmap(bo
);
708 radeon_bo_unreserve(bo
);
710 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
713 int radeon_uvd_get_destroy_msg(struct radeon_device
*rdev
, int ring
,
714 uint32_t handle
, struct radeon_fence
**fence
)
716 struct radeon_bo
*bo
;
720 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
721 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
725 r
= radeon_bo_reserve(bo
, false);
727 radeon_bo_unref(&bo
);
731 r
= radeon_bo_kmap(bo
, (void **)&msg
);
733 radeon_bo_unreserve(bo
);
734 radeon_bo_unref(&bo
);
738 /* stitch together an UVD destroy msg */
739 msg
[0] = cpu_to_le32(0x00000de4);
740 msg
[1] = cpu_to_le32(0x00000002);
741 msg
[2] = cpu_to_le32(handle
);
742 msg
[3] = cpu_to_le32(0x00000000);
743 for (i
= 4; i
< 1024; ++i
)
744 msg
[i
] = cpu_to_le32(0x0);
746 radeon_bo_kunmap(bo
);
747 radeon_bo_unreserve(bo
);
749 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
753 * radeon_uvd_count_handles - count number of open streams
755 * @rdev: radeon_device pointer
756 * @sd: number of SD streams
757 * @hd: number of HD streams
759 * Count the number of open SD/HD streams as a hint for power mangement
761 static void radeon_uvd_count_handles(struct radeon_device
*rdev
,
762 unsigned *sd
, unsigned *hd
)
769 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
770 if (!atomic_read(&rdev
->uvd
.handles
[i
]))
773 if (rdev
->uvd
.img_size
[i
] >= 720*576)
780 static void radeon_uvd_idle_work_handler(struct work_struct
*work
)
782 struct radeon_device
*rdev
=
783 container_of(work
, struct radeon_device
, uvd
.idle_work
.work
);
785 if (radeon_fence_count_emitted(rdev
, R600_RING_TYPE_UVD_INDEX
) == 0) {
786 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
787 radeon_dpm_enable_uvd(rdev
, false);
789 radeon_set_uvd_clocks(rdev
, 0, 0);
792 schedule_delayed_work(&rdev
->uvd
.idle_work
,
793 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
797 void radeon_uvd_note_usage(struct radeon_device
*rdev
)
799 bool streams_changed
= false;
800 bool set_clocks
= !cancel_delayed_work_sync(&rdev
->uvd
.idle_work
);
801 set_clocks
&= schedule_delayed_work(&rdev
->uvd
.idle_work
,
802 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
804 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
805 unsigned hd
= 0, sd
= 0;
806 radeon_uvd_count_handles(rdev
, &sd
, &hd
);
807 if ((rdev
->pm
.dpm
.sd
!= sd
) ||
808 (rdev
->pm
.dpm
.hd
!= hd
)) {
809 rdev
->pm
.dpm
.sd
= sd
;
810 rdev
->pm
.dpm
.hd
= hd
;
811 /* disable this for now */
812 /*streams_changed = true;*/
816 if (set_clocks
|| streams_changed
) {
817 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
818 radeon_dpm_enable_uvd(rdev
, true);
820 radeon_set_uvd_clocks(rdev
, 53300, 40000);
825 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq
,
826 unsigned target_freq
,
830 unsigned post_div
= vco_freq
/ target_freq
;
832 /* adjust to post divider minimum value */
833 if (post_div
< pd_min
)
836 /* we alway need a frequency less than or equal the target */
837 if ((vco_freq
/ post_div
) > target_freq
)
840 /* post dividers above a certain value must be even */
841 if (post_div
> pd_even
&& post_div
% 2)
848 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
850 * @rdev: radeon_device pointer
853 * @vco_min: minimum VCO frequency
854 * @vco_max: maximum VCO frequency
855 * @fb_factor: factor to multiply vco freq with
856 * @fb_mask: limit and bitmask for feedback divider
857 * @pd_min: post divider minimum
858 * @pd_max: post divider maximum
859 * @pd_even: post divider must be even above this value
860 * @optimal_fb_div: resulting feedback divider
861 * @optimal_vclk_div: resulting vclk post divider
862 * @optimal_dclk_div: resulting dclk post divider
864 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
865 * Returns zero on success -EINVAL on error.
867 int radeon_uvd_calc_upll_dividers(struct radeon_device
*rdev
,
868 unsigned vclk
, unsigned dclk
,
869 unsigned vco_min
, unsigned vco_max
,
870 unsigned fb_factor
, unsigned fb_mask
,
871 unsigned pd_min
, unsigned pd_max
,
873 unsigned *optimal_fb_div
,
874 unsigned *optimal_vclk_div
,
875 unsigned *optimal_dclk_div
)
877 unsigned vco_freq
, ref_freq
= rdev
->clock
.spll
.reference_freq
;
879 /* start off with something large */
880 unsigned optimal_score
= ~0;
882 /* loop through vco from low to high */
883 vco_min
= max(max(vco_min
, vclk
), dclk
);
884 for (vco_freq
= vco_min
; vco_freq
<= vco_max
; vco_freq
+= 100) {
886 uint64_t fb_div
= (uint64_t)vco_freq
* fb_factor
;
887 unsigned vclk_div
, dclk_div
, score
;
889 do_div(fb_div
, ref_freq
);
891 /* fb div out of range ? */
892 if (fb_div
> fb_mask
)
893 break; /* it can oly get worse */
897 /* calc vclk divider with current vco freq */
898 vclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, vclk
,
900 if (vclk_div
> pd_max
)
901 break; /* vco is too big, it has to stop */
903 /* calc dclk divider with current vco freq */
904 dclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, dclk
,
906 if (vclk_div
> pd_max
)
907 break; /* vco is too big, it has to stop */
909 /* calc score with current vco freq */
910 score
= vclk
- (vco_freq
/ vclk_div
) + dclk
- (vco_freq
/ dclk_div
);
912 /* determine if this vco setting is better than current optimal settings */
913 if (score
< optimal_score
) {
914 *optimal_fb_div
= fb_div
;
915 *optimal_vclk_div
= vclk_div
;
916 *optimal_dclk_div
= dclk_div
;
917 optimal_score
= score
;
918 if (optimal_score
== 0)
919 break; /* it can't get better than this */
923 /* did we found a valid setup ? */
924 if (optimal_score
== ~0)
930 int radeon_uvd_send_upll_ctlreq(struct radeon_device
*rdev
,
931 unsigned cg_upll_func_cntl
)
935 /* make sure UPLL_CTLREQ is deasserted */
936 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
940 /* assert UPLL_CTLREQ */
941 WREG32_P(cg_upll_func_cntl
, UPLL_CTLREQ_MASK
, ~UPLL_CTLREQ_MASK
);
943 /* wait for CTLACK and CTLACK2 to get asserted */
944 for (i
= 0; i
< 100; ++i
) {
945 uint32_t mask
= UPLL_CTLACK_MASK
| UPLL_CTLACK2_MASK
;
946 if ((RREG32(cg_upll_func_cntl
) & mask
) == mask
)
951 /* deassert UPLL_CTLREQ */
952 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
955 DRM_ERROR("Timeout setting UVD clocks!\n");