2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets
{
42 struct list_head bucket
[RADEON_CS_NUM_BUCKETS
];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets
*b
)
49 for (i
= 0; i
< RADEON_CS_NUM_BUCKETS
; i
++)
50 INIT_LIST_HEAD(&b
->bucket
[i
]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets
*b
,
54 struct list_head
*item
, unsigned priority
)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item
, &b
->bucket
[min(priority
, RADEON_CS_MAX_PRIORITY
)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets
*b
,
65 struct list_head
*out_list
)
69 /* Connect the sorted buckets in the output list. */
70 for (i
= 0; i
< RADEON_CS_NUM_BUCKETS
; i
++) {
71 list_splice(&b
->bucket
[i
], out_list
);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
77 struct radeon_cs_chunk
*chunk
;
78 struct radeon_cs_buckets buckets
;
80 bool need_mmap_lock
= false;
83 if (p
->chunk_relocs
== NULL
) {
86 chunk
= p
->chunk_relocs
;
88 /* FIXME: we assume that each relocs use 4 dwords */
89 p
->nrelocs
= chunk
->length_dw
/ 4;
90 p
->relocs
= drm_calloc_large(p
->nrelocs
, sizeof(struct radeon_bo_list
));
91 if (p
->relocs
== NULL
) {
95 radeon_cs_buckets_init(&buckets
);
97 for (i
= 0; i
< p
->nrelocs
; i
++) {
98 struct drm_radeon_cs_reloc
*r
;
99 struct drm_gem_object
*gobj
;
102 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
103 gobj
= drm_gem_object_lookup(p
->filp
, r
->handle
);
105 DRM_ERROR("gem object lookup failed 0x%x\n",
109 p
->relocs
[i
].robj
= gem_to_radeon_bo(gobj
);
111 /* The userspace buffer priorities are from 0 to 15. A higher
112 * number means the buffer is more important.
113 * Also, the buffers used for write have a higher priority than
114 * the buffers used for read only, which doubles the range
115 * to 0 to 31. 32 is reserved for the kernel driver.
117 priority
= (r
->flags
& RADEON_RELOC_PRIO_MASK
) * 2
120 /* the first reloc of an UVD job is the msg and that must be in
121 VRAM, also but everything into VRAM on AGP cards and older
122 IGP chips to avoid image corruptions */
123 if (p
->ring
== R600_RING_TYPE_UVD_INDEX
&&
124 (i
== 0 || drm_pci_device_is_agp(p
->rdev
->ddev
) ||
125 p
->rdev
->family
== CHIP_RS780
||
126 p
->rdev
->family
== CHIP_RS880
)) {
128 /* TODO: is this still needed for NI+ ? */
129 p
->relocs
[i
].prefered_domains
=
130 RADEON_GEM_DOMAIN_VRAM
;
132 p
->relocs
[i
].allowed_domains
=
133 RADEON_GEM_DOMAIN_VRAM
;
135 /* prioritize this over any other relocation */
136 priority
= RADEON_CS_MAX_PRIORITY
;
138 uint32_t domain
= r
->write_domain
?
139 r
->write_domain
: r
->read_domains
;
141 if (domain
& RADEON_GEM_DOMAIN_CPU
) {
142 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
143 "for command submission\n");
147 p
->relocs
[i
].prefered_domains
= domain
;
148 if (domain
== RADEON_GEM_DOMAIN_VRAM
)
149 domain
|= RADEON_GEM_DOMAIN_GTT
;
150 p
->relocs
[i
].allowed_domains
= domain
;
153 if (radeon_ttm_tt_has_userptr(p
->relocs
[i
].robj
->tbo
.ttm
)) {
154 uint32_t domain
= p
->relocs
[i
].prefered_domains
;
155 if (!(domain
& RADEON_GEM_DOMAIN_GTT
)) {
156 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
157 "allowed for userptr BOs\n");
160 need_mmap_lock
= true;
161 domain
= RADEON_GEM_DOMAIN_GTT
;
162 p
->relocs
[i
].prefered_domains
= domain
;
163 p
->relocs
[i
].allowed_domains
= domain
;
166 p
->relocs
[i
].tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
167 p
->relocs
[i
].tv
.shared
= !r
->write_domain
;
169 radeon_cs_buckets_add(&buckets
, &p
->relocs
[i
].tv
.head
,
173 radeon_cs_buckets_get_list(&buckets
, &p
->validated
);
175 if (p
->cs_flags
& RADEON_CS_USE_VM
)
176 p
->vm_bos
= radeon_vm_get_bos(p
->rdev
, p
->ib
.vm
,
179 down_read(¤t
->mm
->mmap_sem
);
181 r
= radeon_bo_list_validate(p
->rdev
, &p
->ticket
, &p
->validated
, p
->ring
);
184 up_read(¤t
->mm
->mmap_sem
);
189 static int radeon_cs_get_ring(struct radeon_cs_parser
*p
, u32 ring
, s32 priority
)
191 p
->priority
= priority
;
195 DRM_ERROR("unknown ring id: %d\n", ring
);
197 case RADEON_CS_RING_GFX
:
198 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
200 case RADEON_CS_RING_COMPUTE
:
201 if (p
->rdev
->family
>= CHIP_TAHITI
) {
203 p
->ring
= CAYMAN_RING_TYPE_CP1_INDEX
;
205 p
->ring
= CAYMAN_RING_TYPE_CP2_INDEX
;
207 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
209 case RADEON_CS_RING_DMA
:
210 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
212 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
214 p
->ring
= CAYMAN_RING_TYPE_DMA1_INDEX
;
215 } else if (p
->rdev
->family
>= CHIP_RV770
) {
216 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
221 case RADEON_CS_RING_UVD
:
222 p
->ring
= R600_RING_TYPE_UVD_INDEX
;
224 case RADEON_CS_RING_VCE
:
225 /* TODO: only use the low priority ring for now */
226 p
->ring
= TN_RING_TYPE_VCE1_INDEX
;
232 static int radeon_cs_sync_rings(struct radeon_cs_parser
*p
)
234 struct radeon_bo_list
*reloc
;
237 list_for_each_entry(reloc
, &p
->validated
, tv
.head
) {
238 struct reservation_object
*resv
;
240 resv
= reloc
->robj
->tbo
.resv
;
241 r
= radeon_sync_resv(p
->rdev
, &p
->ib
.sync
, resv
,
249 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
250 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
252 struct drm_radeon_cs
*cs
= data
;
253 uint64_t *chunk_array_ptr
;
255 u32 ring
= RADEON_CS_RING_GFX
;
258 INIT_LIST_HEAD(&p
->validated
);
260 if (!cs
->num_chunks
) {
267 p
->const_ib
.sa_bo
= NULL
;
269 p
->chunk_relocs
= NULL
;
270 p
->chunk_flags
= NULL
;
271 p
->chunk_const_ib
= NULL
;
272 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
273 if (p
->chunks_array
== NULL
) {
276 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
277 if (copy_from_user(p
->chunks_array
, chunk_array_ptr
,
278 sizeof(uint64_t)*cs
->num_chunks
)) {
282 p
->nchunks
= cs
->num_chunks
;
283 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
284 if (p
->chunks
== NULL
) {
287 for (i
= 0; i
< p
->nchunks
; i
++) {
288 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
289 struct drm_radeon_cs_chunk user_chunk
;
290 uint32_t __user
*cdata
;
292 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
293 if (copy_from_user(&user_chunk
, chunk_ptr
,
294 sizeof(struct drm_radeon_cs_chunk
))) {
297 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
298 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
299 p
->chunk_relocs
= &p
->chunks
[i
];
301 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_IB
) {
302 p
->chunk_ib
= &p
->chunks
[i
];
303 /* zero length IB isn't useful */
304 if (p
->chunks
[i
].length_dw
== 0)
307 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_CONST_IB
) {
308 p
->chunk_const_ib
= &p
->chunks
[i
];
309 /* zero length CONST IB isn't useful */
310 if (p
->chunks
[i
].length_dw
== 0)
313 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
314 p
->chunk_flags
= &p
->chunks
[i
];
315 /* zero length flags aren't useful */
316 if (p
->chunks
[i
].length_dw
== 0)
320 size
= p
->chunks
[i
].length_dw
;
321 cdata
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
322 p
->chunks
[i
].user_ptr
= cdata
;
323 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_CONST_IB
)
326 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_IB
) {
327 if (!p
->rdev
|| !(p
->rdev
->flags
& RADEON_IS_AGP
))
331 p
->chunks
[i
].kdata
= drm_malloc_ab(size
, sizeof(uint32_t));
332 size
*= sizeof(uint32_t);
333 if (p
->chunks
[i
].kdata
== NULL
) {
336 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
339 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
340 p
->cs_flags
= p
->chunks
[i
].kdata
[0];
341 if (p
->chunks
[i
].length_dw
> 1)
342 ring
= p
->chunks
[i
].kdata
[1];
343 if (p
->chunks
[i
].length_dw
> 2)
344 priority
= (s32
)p
->chunks
[i
].kdata
[2];
348 /* these are KMS only */
350 if ((p
->cs_flags
& RADEON_CS_USE_VM
) &&
351 !p
->rdev
->vm_manager
.enabled
) {
352 DRM_ERROR("VM not active on asic!\n");
356 if (radeon_cs_get_ring(p
, ring
, priority
))
359 /* we only support VM on some SI+ rings */
360 if ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0) {
361 if (p
->rdev
->asic
->ring
[p
->ring
]->cs_parse
== NULL
) {
362 DRM_ERROR("Ring %d requires VM!\n", p
->ring
);
366 if (p
->rdev
->asic
->ring
[p
->ring
]->ib_parse
== NULL
) {
367 DRM_ERROR("VM not supported on ring %d!\n",
377 static int cmp_size_smaller_first(void *priv
, struct list_head
*a
,
380 struct radeon_bo_list
*la
= list_entry(a
, struct radeon_bo_list
, tv
.head
);
381 struct radeon_bo_list
*lb
= list_entry(b
, struct radeon_bo_list
, tv
.head
);
383 /* Sort A before B if A is smaller. */
384 return (int)la
->robj
->tbo
.num_pages
- (int)lb
->robj
->tbo
.num_pages
;
388 * cs_parser_fini() - clean parser states
389 * @parser: parser structure holding parsing context.
390 * @error: error number
392 * If error is set than unvalidate buffer, otherwise just free memory
393 * used by parsing context.
395 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
, bool backoff
)
400 /* Sort the buffer list from the smallest to largest buffer,
401 * which affects the order of buffers in the LRU list.
402 * This assures that the smallest buffers are added first
403 * to the LRU list, so they are likely to be later evicted
404 * first, instead of large buffers whose eviction is more
407 * This slightly lowers the number of bytes moved by TTM
408 * per frame under memory pressure.
410 list_sort(NULL
, &parser
->validated
, cmp_size_smaller_first
);
412 ttm_eu_fence_buffer_objects(&parser
->ticket
,
414 &parser
->ib
.fence
->base
);
415 } else if (backoff
) {
416 ttm_eu_backoff_reservation(&parser
->ticket
,
420 if (parser
->relocs
!= NULL
) {
421 for (i
= 0; i
< parser
->nrelocs
; i
++) {
422 struct radeon_bo
*bo
= parser
->relocs
[i
].robj
;
426 drm_gem_object_unreference_unlocked(&bo
->gem_base
);
429 kfree(parser
->track
);
430 drm_free_large(parser
->relocs
);
431 drm_free_large(parser
->vm_bos
);
432 for (i
= 0; i
< parser
->nchunks
; i
++)
433 drm_free_large(parser
->chunks
[i
].kdata
);
434 kfree(parser
->chunks
);
435 kfree(parser
->chunks_array
);
436 radeon_ib_free(parser
->rdev
, &parser
->ib
);
437 radeon_ib_free(parser
->rdev
, &parser
->const_ib
);
440 static int radeon_cs_ib_chunk(struct radeon_device
*rdev
,
441 struct radeon_cs_parser
*parser
)
445 if (parser
->chunk_ib
== NULL
)
448 if (parser
->cs_flags
& RADEON_CS_USE_VM
)
451 r
= radeon_cs_parse(rdev
, parser
->ring
, parser
);
452 if (r
|| parser
->parser_error
) {
453 DRM_ERROR("Invalid command stream !\n");
457 r
= radeon_cs_sync_rings(parser
);
459 if (r
!= -ERESTARTSYS
)
460 DRM_ERROR("Failed to sync rings: %i\n", r
);
464 if (parser
->ring
== R600_RING_TYPE_UVD_INDEX
)
465 radeon_uvd_note_usage(rdev
);
466 else if ((parser
->ring
== TN_RING_TYPE_VCE1_INDEX
) ||
467 (parser
->ring
== TN_RING_TYPE_VCE2_INDEX
))
468 radeon_vce_note_usage(rdev
);
470 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
, true);
472 DRM_ERROR("Failed to schedule IB !\n");
477 static int radeon_bo_vm_update_pte(struct radeon_cs_parser
*p
,
478 struct radeon_vm
*vm
)
480 struct radeon_device
*rdev
= p
->rdev
;
481 struct radeon_bo_va
*bo_va
;
484 r
= radeon_vm_update_page_directory(rdev
, vm
);
488 r
= radeon_vm_clear_freed(rdev
, vm
);
492 if (vm
->ib_bo_va
== NULL
) {
493 DRM_ERROR("Tmp BO not in VM!\n");
497 r
= radeon_vm_bo_update(rdev
, vm
->ib_bo_va
,
498 &rdev
->ring_tmp_bo
.bo
->tbo
.mem
);
502 for (i
= 0; i
< p
->nrelocs
; i
++) {
503 struct radeon_bo
*bo
;
505 bo
= p
->relocs
[i
].robj
;
506 bo_va
= radeon_vm_bo_find(vm
, bo
);
508 dev_err(rdev
->dev
, "bo %p not in vm %p\n", bo
, vm
);
512 r
= radeon_vm_bo_update(rdev
, bo_va
, &bo
->tbo
.mem
);
516 radeon_sync_fence(&p
->ib
.sync
, bo_va
->last_pt_update
);
519 return radeon_vm_clear_invalids(rdev
, vm
);
522 static int radeon_cs_ib_vm_chunk(struct radeon_device
*rdev
,
523 struct radeon_cs_parser
*parser
)
525 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
526 struct radeon_vm
*vm
= &fpriv
->vm
;
529 if (parser
->chunk_ib
== NULL
)
531 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0)
534 if (parser
->const_ib
.length_dw
) {
535 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->const_ib
);
541 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->ib
);
546 if (parser
->ring
== R600_RING_TYPE_UVD_INDEX
)
547 radeon_uvd_note_usage(rdev
);
549 mutex_lock(&vm
->mutex
);
550 r
= radeon_bo_vm_update_pte(parser
, vm
);
555 r
= radeon_cs_sync_rings(parser
);
557 if (r
!= -ERESTARTSYS
)
558 DRM_ERROR("Failed to sync rings: %i\n", r
);
562 if ((rdev
->family
>= CHIP_TAHITI
) &&
563 (parser
->chunk_const_ib
!= NULL
)) {
564 r
= radeon_ib_schedule(rdev
, &parser
->ib
, &parser
->const_ib
, true);
566 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
, true);
570 mutex_unlock(&vm
->mutex
);
574 static int radeon_cs_handle_lockup(struct radeon_device
*rdev
, int r
)
577 r
= radeon_gpu_reset(rdev
);
584 static int radeon_cs_ib_fill(struct radeon_device
*rdev
, struct radeon_cs_parser
*parser
)
586 struct radeon_cs_chunk
*ib_chunk
;
587 struct radeon_vm
*vm
= NULL
;
590 if (parser
->chunk_ib
== NULL
)
593 if (parser
->cs_flags
& RADEON_CS_USE_VM
) {
594 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
597 if ((rdev
->family
>= CHIP_TAHITI
) &&
598 (parser
->chunk_const_ib
!= NULL
)) {
599 ib_chunk
= parser
->chunk_const_ib
;
600 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
601 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk
->length_dw
);
604 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->const_ib
,
605 vm
, ib_chunk
->length_dw
* 4);
607 DRM_ERROR("Failed to get const ib !\n");
610 parser
->const_ib
.is_const_ib
= true;
611 parser
->const_ib
.length_dw
= ib_chunk
->length_dw
;
612 if (copy_from_user(parser
->const_ib
.ptr
,
614 ib_chunk
->length_dw
* 4))
618 ib_chunk
= parser
->chunk_ib
;
619 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
620 DRM_ERROR("cs IB too big: %d\n", ib_chunk
->length_dw
);
624 ib_chunk
= parser
->chunk_ib
;
626 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
627 vm
, ib_chunk
->length_dw
* 4);
629 DRM_ERROR("Failed to get ib !\n");
632 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
634 memcpy(parser
->ib
.ptr
, ib_chunk
->kdata
, ib_chunk
->length_dw
* 4);
635 else if (copy_from_user(parser
->ib
.ptr
, ib_chunk
->user_ptr
, ib_chunk
->length_dw
* 4))
640 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
642 struct radeon_device
*rdev
= dev
->dev_private
;
643 struct radeon_cs_parser parser
;
646 down_read(&rdev
->exclusive_lock
);
647 if (!rdev
->accel_working
) {
648 up_read(&rdev
->exclusive_lock
);
651 if (rdev
->in_reset
) {
652 up_read(&rdev
->exclusive_lock
);
653 r
= radeon_gpu_reset(rdev
);
658 /* initialize parser */
659 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
662 parser
.dev
= rdev
->dev
;
663 parser
.family
= rdev
->family
;
664 r
= radeon_cs_parser_init(&parser
, data
);
666 DRM_ERROR("Failed to initialize parser !\n");
667 radeon_cs_parser_fini(&parser
, r
, false);
668 up_read(&rdev
->exclusive_lock
);
669 r
= radeon_cs_handle_lockup(rdev
, r
);
673 r
= radeon_cs_ib_fill(rdev
, &parser
);
675 r
= radeon_cs_parser_relocs(&parser
);
676 if (r
&& r
!= -ERESTARTSYS
)
677 DRM_ERROR("Failed to parse relocation %d!\n", r
);
681 radeon_cs_parser_fini(&parser
, r
, false);
682 up_read(&rdev
->exclusive_lock
);
683 r
= radeon_cs_handle_lockup(rdev
, r
);
687 trace_radeon_cs(&parser
);
689 r
= radeon_cs_ib_chunk(rdev
, &parser
);
693 r
= radeon_cs_ib_vm_chunk(rdev
, &parser
);
698 radeon_cs_parser_fini(&parser
, r
, true);
699 up_read(&rdev
->exclusive_lock
);
700 r
= radeon_cs_handle_lockup(rdev
, r
);
705 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
706 * @parser: parser structure holding parsing context.
707 * @pkt: where to store packet information
709 * Assume that chunk_ib_index is properly set. Will return -EINVAL
710 * if packet is bigger than remaining ib size. or if packets is unknown.
712 int radeon_cs_packet_parse(struct radeon_cs_parser
*p
,
713 struct radeon_cs_packet
*pkt
,
716 struct radeon_cs_chunk
*ib_chunk
= p
->chunk_ib
;
717 struct radeon_device
*rdev
= p
->rdev
;
721 if (idx
>= ib_chunk
->length_dw
) {
722 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
723 idx
, ib_chunk
->length_dw
);
726 header
= radeon_get_ib_value(p
, idx
);
728 pkt
->type
= RADEON_CP_PACKET_GET_TYPE(header
);
729 pkt
->count
= RADEON_CP_PACKET_GET_COUNT(header
);
732 case RADEON_PACKET_TYPE0
:
733 if (rdev
->family
< CHIP_R600
) {
734 pkt
->reg
= R100_CP_PACKET0_GET_REG(header
);
736 RADEON_CP_PACKET0_GET_ONE_REG_WR(header
);
738 pkt
->reg
= R600_CP_PACKET0_GET_REG(header
);
740 case RADEON_PACKET_TYPE3
:
741 pkt
->opcode
= RADEON_CP_PACKET3_GET_OPCODE(header
);
743 case RADEON_PACKET_TYPE2
:
747 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
751 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
752 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
753 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
760 for (i
= 0; i
< ib_chunk
->length_dw
; i
++) {
762 printk("\t0x%08x <---\n", radeon_get_ib_value(p
, i
));
764 printk("\t0x%08x\n", radeon_get_ib_value(p
, i
));
770 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
771 * @p: structure holding the parser context.
773 * Check if the next packet is NOP relocation packet3.
775 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
777 struct radeon_cs_packet p3reloc
;
780 r
= radeon_cs_packet_parse(p
, &p3reloc
, p
->idx
);
783 if (p3reloc
.type
!= RADEON_PACKET_TYPE3
)
785 if (p3reloc
.opcode
!= RADEON_PACKET3_NOP
)
791 * radeon_cs_dump_packet() - dump raw packet context
792 * @p: structure holding the parser context.
793 * @pkt: structure holding the packet.
795 * Used mostly for debugging and error reporting.
797 void radeon_cs_dump_packet(struct radeon_cs_parser
*p
,
798 struct radeon_cs_packet
*pkt
)
800 volatile uint32_t *ib
;
806 for (i
= 0; i
<= (pkt
->count
+ 1); i
++, idx
++)
807 DRM_INFO("ib[%d]=0x%08X\n", idx
, ib
[idx
]);
811 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
812 * @parser: parser structure holding parsing context.
813 * @data: pointer to relocation data
814 * @offset_start: starting offset
815 * @offset_mask: offset mask (to align start offset on)
816 * @reloc: reloc informations
818 * Check if next packet is relocation packet3, do bo validation and compute
819 * GPU offset using the provided start.
821 int radeon_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
822 struct radeon_bo_list
**cs_reloc
,
825 struct radeon_cs_chunk
*relocs_chunk
;
826 struct radeon_cs_packet p3reloc
;
830 if (p
->chunk_relocs
== NULL
) {
831 DRM_ERROR("No relocation chunk !\n");
835 relocs_chunk
= p
->chunk_relocs
;
836 r
= radeon_cs_packet_parse(p
, &p3reloc
, p
->idx
);
839 p
->idx
+= p3reloc
.count
+ 2;
840 if (p3reloc
.type
!= RADEON_PACKET_TYPE3
||
841 p3reloc
.opcode
!= RADEON_PACKET3_NOP
) {
842 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
844 radeon_cs_dump_packet(p
, &p3reloc
);
847 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
848 if (idx
>= relocs_chunk
->length_dw
) {
849 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
850 idx
, relocs_chunk
->length_dw
);
851 radeon_cs_dump_packet(p
, &p3reloc
);
854 /* FIXME: we assume reloc size is 4 dwords */
856 *cs_reloc
= p
->relocs
;
857 (*cs_reloc
)->gpu_offset
=
858 (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
859 (*cs_reloc
)->gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
861 *cs_reloc
= &p
->relocs
[(idx
/ 4)];