2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets
{
42 struct list_head bucket
[RADEON_CS_NUM_BUCKETS
];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets
*b
)
49 for (i
= 0; i
< RADEON_CS_NUM_BUCKETS
; i
++)
50 INIT_LIST_HEAD(&b
->bucket
[i
]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets
*b
,
54 struct list_head
*item
, unsigned priority
)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item
, &b
->bucket
[min(priority
, RADEON_CS_MAX_PRIORITY
)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets
*b
,
65 struct list_head
*out_list
)
69 /* Connect the sorted buckets in the output list. */
70 for (i
= 0; i
< RADEON_CS_NUM_BUCKETS
; i
++) {
71 list_splice(&b
->bucket
[i
], out_list
);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser
*p
)
77 struct radeon_cs_chunk
*chunk
;
78 struct radeon_cs_buckets buckets
;
80 bool need_mmap_lock
= false;
83 if (p
->chunk_relocs
== NULL
) {
86 chunk
= p
->chunk_relocs
;
88 /* FIXME: we assume that each relocs use 4 dwords */
89 p
->nrelocs
= chunk
->length_dw
/ 4;
90 p
->relocs
= kvmalloc_array(p
->nrelocs
, sizeof(struct radeon_bo_list
),
91 GFP_KERNEL
| __GFP_ZERO
);
92 if (p
->relocs
== NULL
) {
96 radeon_cs_buckets_init(&buckets
);
98 for (i
= 0; i
< p
->nrelocs
; i
++) {
99 struct drm_radeon_cs_reloc
*r
;
100 struct drm_gem_object
*gobj
;
103 r
= (struct drm_radeon_cs_reloc
*)&chunk
->kdata
[i
*4];
104 gobj
= drm_gem_object_lookup(p
->filp
, r
->handle
);
106 DRM_ERROR("gem object lookup failed 0x%x\n",
110 p
->relocs
[i
].robj
= gem_to_radeon_bo(gobj
);
112 /* The userspace buffer priorities are from 0 to 15. A higher
113 * number means the buffer is more important.
114 * Also, the buffers used for write have a higher priority than
115 * the buffers used for read only, which doubles the range
116 * to 0 to 31. 32 is reserved for the kernel driver.
118 priority
= (r
->flags
& RADEON_RELOC_PRIO_MASK
) * 2
121 /* The first reloc of an UVD job is the msg and that must be in
122 * VRAM, the second reloc is the DPB and for WMV that must be in
123 * VRAM as well. Also put everything into VRAM on AGP cards and older
124 * IGP chips to avoid image corruptions
126 if (p
->ring
== R600_RING_TYPE_UVD_INDEX
&&
127 (i
<= 0 || pci_find_capability(p
->rdev
->ddev
->pdev
,
129 p
->rdev
->family
== CHIP_RS780
||
130 p
->rdev
->family
== CHIP_RS880
)) {
132 /* TODO: is this still needed for NI+ ? */
133 p
->relocs
[i
].preferred_domains
=
134 RADEON_GEM_DOMAIN_VRAM
;
136 p
->relocs
[i
].allowed_domains
=
137 RADEON_GEM_DOMAIN_VRAM
;
139 /* prioritize this over any other relocation */
140 priority
= RADEON_CS_MAX_PRIORITY
;
142 uint32_t domain
= r
->write_domain
?
143 r
->write_domain
: r
->read_domains
;
145 if (domain
& RADEON_GEM_DOMAIN_CPU
) {
146 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
147 "for command submission\n");
151 p
->relocs
[i
].preferred_domains
= domain
;
152 if (domain
== RADEON_GEM_DOMAIN_VRAM
)
153 domain
|= RADEON_GEM_DOMAIN_GTT
;
154 p
->relocs
[i
].allowed_domains
= domain
;
157 if (radeon_ttm_tt_has_userptr(p
->relocs
[i
].robj
->tbo
.ttm
)) {
158 uint32_t domain
= p
->relocs
[i
].preferred_domains
;
159 if (!(domain
& RADEON_GEM_DOMAIN_GTT
)) {
160 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
161 "allowed for userptr BOs\n");
164 need_mmap_lock
= true;
165 domain
= RADEON_GEM_DOMAIN_GTT
;
166 p
->relocs
[i
].preferred_domains
= domain
;
167 p
->relocs
[i
].allowed_domains
= domain
;
170 /* Objects shared as dma-bufs cannot be moved to VRAM */
171 if (p
->relocs
[i
].robj
->prime_shared_count
) {
172 p
->relocs
[i
].allowed_domains
&= ~RADEON_GEM_DOMAIN_VRAM
;
173 if (!p
->relocs
[i
].allowed_domains
) {
174 DRM_ERROR("BO associated with dma-buf cannot "
175 "be moved to VRAM\n");
180 p
->relocs
[i
].tv
.bo
= &p
->relocs
[i
].robj
->tbo
;
181 p
->relocs
[i
].tv
.num_shared
= !r
->write_domain
;
183 radeon_cs_buckets_add(&buckets
, &p
->relocs
[i
].tv
.head
,
187 radeon_cs_buckets_get_list(&buckets
, &p
->validated
);
189 if (p
->cs_flags
& RADEON_CS_USE_VM
)
190 p
->vm_bos
= radeon_vm_get_bos(p
->rdev
, p
->ib
.vm
,
193 down_read(¤t
->mm
->mmap_sem
);
195 r
= radeon_bo_list_validate(p
->rdev
, &p
->ticket
, &p
->validated
, p
->ring
);
198 up_read(¤t
->mm
->mmap_sem
);
203 static int radeon_cs_get_ring(struct radeon_cs_parser
*p
, u32 ring
, s32 priority
)
205 p
->priority
= priority
;
209 DRM_ERROR("unknown ring id: %d\n", ring
);
211 case RADEON_CS_RING_GFX
:
212 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
214 case RADEON_CS_RING_COMPUTE
:
215 if (p
->rdev
->family
>= CHIP_TAHITI
) {
217 p
->ring
= CAYMAN_RING_TYPE_CP1_INDEX
;
219 p
->ring
= CAYMAN_RING_TYPE_CP2_INDEX
;
221 p
->ring
= RADEON_RING_TYPE_GFX_INDEX
;
223 case RADEON_CS_RING_DMA
:
224 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
226 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
228 p
->ring
= CAYMAN_RING_TYPE_DMA1_INDEX
;
229 } else if (p
->rdev
->family
>= CHIP_RV770
) {
230 p
->ring
= R600_RING_TYPE_DMA_INDEX
;
235 case RADEON_CS_RING_UVD
:
236 p
->ring
= R600_RING_TYPE_UVD_INDEX
;
238 case RADEON_CS_RING_VCE
:
239 /* TODO: only use the low priority ring for now */
240 p
->ring
= TN_RING_TYPE_VCE1_INDEX
;
246 static int radeon_cs_sync_rings(struct radeon_cs_parser
*p
)
248 struct radeon_bo_list
*reloc
;
251 list_for_each_entry(reloc
, &p
->validated
, tv
.head
) {
252 struct reservation_object
*resv
;
254 resv
= reloc
->robj
->tbo
.resv
;
255 r
= radeon_sync_resv(p
->rdev
, &p
->ib
.sync
, resv
,
256 reloc
->tv
.num_shared
);
263 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
264 int radeon_cs_parser_init(struct radeon_cs_parser
*p
, void *data
)
266 struct drm_radeon_cs
*cs
= data
;
267 uint64_t *chunk_array_ptr
;
269 u32 ring
= RADEON_CS_RING_GFX
;
272 INIT_LIST_HEAD(&p
->validated
);
274 if (!cs
->num_chunks
) {
281 p
->const_ib
.sa_bo
= NULL
;
283 p
->chunk_relocs
= NULL
;
284 p
->chunk_flags
= NULL
;
285 p
->chunk_const_ib
= NULL
;
286 p
->chunks_array
= kcalloc(cs
->num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
287 if (p
->chunks_array
== NULL
) {
290 chunk_array_ptr
= (uint64_t *)(unsigned long)(cs
->chunks
);
291 if (copy_from_user(p
->chunks_array
, chunk_array_ptr
,
292 sizeof(uint64_t)*cs
->num_chunks
)) {
296 p
->nchunks
= cs
->num_chunks
;
297 p
->chunks
= kcalloc(p
->nchunks
, sizeof(struct radeon_cs_chunk
), GFP_KERNEL
);
298 if (p
->chunks
== NULL
) {
301 for (i
= 0; i
< p
->nchunks
; i
++) {
302 struct drm_radeon_cs_chunk __user
**chunk_ptr
= NULL
;
303 struct drm_radeon_cs_chunk user_chunk
;
304 uint32_t __user
*cdata
;
306 chunk_ptr
= (void __user
*)(unsigned long)p
->chunks_array
[i
];
307 if (copy_from_user(&user_chunk
, chunk_ptr
,
308 sizeof(struct drm_radeon_cs_chunk
))) {
311 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
312 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_RELOCS
) {
313 p
->chunk_relocs
= &p
->chunks
[i
];
315 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_IB
) {
316 p
->chunk_ib
= &p
->chunks
[i
];
317 /* zero length IB isn't useful */
318 if (p
->chunks
[i
].length_dw
== 0)
321 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_CONST_IB
) {
322 p
->chunk_const_ib
= &p
->chunks
[i
];
323 /* zero length CONST IB isn't useful */
324 if (p
->chunks
[i
].length_dw
== 0)
327 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
328 p
->chunk_flags
= &p
->chunks
[i
];
329 /* zero length flags aren't useful */
330 if (p
->chunks
[i
].length_dw
== 0)
334 size
= p
->chunks
[i
].length_dw
;
335 cdata
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
336 p
->chunks
[i
].user_ptr
= cdata
;
337 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_CONST_IB
)
340 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_IB
) {
341 if (!p
->rdev
|| !(p
->rdev
->flags
& RADEON_IS_AGP
))
345 p
->chunks
[i
].kdata
= kvmalloc_array(size
, sizeof(uint32_t), GFP_KERNEL
);
346 size
*= sizeof(uint32_t);
347 if (p
->chunks
[i
].kdata
== NULL
) {
350 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
353 if (user_chunk
.chunk_id
== RADEON_CHUNK_ID_FLAGS
) {
354 p
->cs_flags
= p
->chunks
[i
].kdata
[0];
355 if (p
->chunks
[i
].length_dw
> 1)
356 ring
= p
->chunks
[i
].kdata
[1];
357 if (p
->chunks
[i
].length_dw
> 2)
358 priority
= (s32
)p
->chunks
[i
].kdata
[2];
362 /* these are KMS only */
364 if ((p
->cs_flags
& RADEON_CS_USE_VM
) &&
365 !p
->rdev
->vm_manager
.enabled
) {
366 DRM_ERROR("VM not active on asic!\n");
370 if (radeon_cs_get_ring(p
, ring
, priority
))
373 /* we only support VM on some SI+ rings */
374 if ((p
->cs_flags
& RADEON_CS_USE_VM
) == 0) {
375 if (p
->rdev
->asic
->ring
[p
->ring
]->cs_parse
== NULL
) {
376 DRM_ERROR("Ring %d requires VM!\n", p
->ring
);
380 if (p
->rdev
->asic
->ring
[p
->ring
]->ib_parse
== NULL
) {
381 DRM_ERROR("VM not supported on ring %d!\n",
391 static int cmp_size_smaller_first(void *priv
, struct list_head
*a
,
394 struct radeon_bo_list
*la
= list_entry(a
, struct radeon_bo_list
, tv
.head
);
395 struct radeon_bo_list
*lb
= list_entry(b
, struct radeon_bo_list
, tv
.head
);
397 /* Sort A before B if A is smaller. */
398 return (int)la
->robj
->tbo
.num_pages
- (int)lb
->robj
->tbo
.num_pages
;
402 * cs_parser_fini() - clean parser states
403 * @parser: parser structure holding parsing context.
404 * @error: error number
406 * If error is set than unvalidate buffer, otherwise just free memory
407 * used by parsing context.
409 static void radeon_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
, bool backoff
)
414 /* Sort the buffer list from the smallest to largest buffer,
415 * which affects the order of buffers in the LRU list.
416 * This assures that the smallest buffers are added first
417 * to the LRU list, so they are likely to be later evicted
418 * first, instead of large buffers whose eviction is more
421 * This slightly lowers the number of bytes moved by TTM
422 * per frame under memory pressure.
424 list_sort(NULL
, &parser
->validated
, cmp_size_smaller_first
);
426 ttm_eu_fence_buffer_objects(&parser
->ticket
,
428 &parser
->ib
.fence
->base
);
429 } else if (backoff
) {
430 ttm_eu_backoff_reservation(&parser
->ticket
,
434 if (parser
->relocs
!= NULL
) {
435 for (i
= 0; i
< parser
->nrelocs
; i
++) {
436 struct radeon_bo
*bo
= parser
->relocs
[i
].robj
;
440 drm_gem_object_put_unlocked(&bo
->gem_base
);
443 kfree(parser
->track
);
444 kvfree(parser
->relocs
);
445 kvfree(parser
->vm_bos
);
446 for (i
= 0; i
< parser
->nchunks
; i
++)
447 kvfree(parser
->chunks
[i
].kdata
);
448 kfree(parser
->chunks
);
449 kfree(parser
->chunks_array
);
450 radeon_ib_free(parser
->rdev
, &parser
->ib
);
451 radeon_ib_free(parser
->rdev
, &parser
->const_ib
);
454 static int radeon_cs_ib_chunk(struct radeon_device
*rdev
,
455 struct radeon_cs_parser
*parser
)
459 if (parser
->chunk_ib
== NULL
)
462 if (parser
->cs_flags
& RADEON_CS_USE_VM
)
465 r
= radeon_cs_parse(rdev
, parser
->ring
, parser
);
466 if (r
|| parser
->parser_error
) {
467 DRM_ERROR("Invalid command stream !\n");
471 r
= radeon_cs_sync_rings(parser
);
473 if (r
!= -ERESTARTSYS
)
474 DRM_ERROR("Failed to sync rings: %i\n", r
);
478 if (parser
->ring
== R600_RING_TYPE_UVD_INDEX
)
479 radeon_uvd_note_usage(rdev
);
480 else if ((parser
->ring
== TN_RING_TYPE_VCE1_INDEX
) ||
481 (parser
->ring
== TN_RING_TYPE_VCE2_INDEX
))
482 radeon_vce_note_usage(rdev
);
484 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
, true);
486 DRM_ERROR("Failed to schedule IB !\n");
491 static int radeon_bo_vm_update_pte(struct radeon_cs_parser
*p
,
492 struct radeon_vm
*vm
)
494 struct radeon_device
*rdev
= p
->rdev
;
495 struct radeon_bo_va
*bo_va
;
498 r
= radeon_vm_update_page_directory(rdev
, vm
);
502 r
= radeon_vm_clear_freed(rdev
, vm
);
506 if (vm
->ib_bo_va
== NULL
) {
507 DRM_ERROR("Tmp BO not in VM!\n");
511 r
= radeon_vm_bo_update(rdev
, vm
->ib_bo_va
,
512 &rdev
->ring_tmp_bo
.bo
->tbo
.mem
);
516 for (i
= 0; i
< p
->nrelocs
; i
++) {
517 struct radeon_bo
*bo
;
519 bo
= p
->relocs
[i
].robj
;
520 bo_va
= radeon_vm_bo_find(vm
, bo
);
522 dev_err(rdev
->dev
, "bo %p not in vm %p\n", bo
, vm
);
526 r
= radeon_vm_bo_update(rdev
, bo_va
, &bo
->tbo
.mem
);
530 radeon_sync_fence(&p
->ib
.sync
, bo_va
->last_pt_update
);
533 return radeon_vm_clear_invalids(rdev
, vm
);
536 static int radeon_cs_ib_vm_chunk(struct radeon_device
*rdev
,
537 struct radeon_cs_parser
*parser
)
539 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
540 struct radeon_vm
*vm
= &fpriv
->vm
;
543 if (parser
->chunk_ib
== NULL
)
545 if ((parser
->cs_flags
& RADEON_CS_USE_VM
) == 0)
548 if (parser
->const_ib
.length_dw
) {
549 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->const_ib
);
555 r
= radeon_ring_ib_parse(rdev
, parser
->ring
, &parser
->ib
);
560 if (parser
->ring
== R600_RING_TYPE_UVD_INDEX
)
561 radeon_uvd_note_usage(rdev
);
563 mutex_lock(&vm
->mutex
);
564 r
= radeon_bo_vm_update_pte(parser
, vm
);
569 r
= radeon_cs_sync_rings(parser
);
571 if (r
!= -ERESTARTSYS
)
572 DRM_ERROR("Failed to sync rings: %i\n", r
);
576 if ((rdev
->family
>= CHIP_TAHITI
) &&
577 (parser
->chunk_const_ib
!= NULL
)) {
578 r
= radeon_ib_schedule(rdev
, &parser
->ib
, &parser
->const_ib
, true);
580 r
= radeon_ib_schedule(rdev
, &parser
->ib
, NULL
, true);
584 mutex_unlock(&vm
->mutex
);
588 static int radeon_cs_handle_lockup(struct radeon_device
*rdev
, int r
)
591 r
= radeon_gpu_reset(rdev
);
598 static int radeon_cs_ib_fill(struct radeon_device
*rdev
, struct radeon_cs_parser
*parser
)
600 struct radeon_cs_chunk
*ib_chunk
;
601 struct radeon_vm
*vm
= NULL
;
604 if (parser
->chunk_ib
== NULL
)
607 if (parser
->cs_flags
& RADEON_CS_USE_VM
) {
608 struct radeon_fpriv
*fpriv
= parser
->filp
->driver_priv
;
611 if ((rdev
->family
>= CHIP_TAHITI
) &&
612 (parser
->chunk_const_ib
!= NULL
)) {
613 ib_chunk
= parser
->chunk_const_ib
;
614 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
615 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk
->length_dw
);
618 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->const_ib
,
619 vm
, ib_chunk
->length_dw
* 4);
621 DRM_ERROR("Failed to get const ib !\n");
624 parser
->const_ib
.is_const_ib
= true;
625 parser
->const_ib
.length_dw
= ib_chunk
->length_dw
;
626 if (copy_from_user(parser
->const_ib
.ptr
,
628 ib_chunk
->length_dw
* 4))
632 ib_chunk
= parser
->chunk_ib
;
633 if (ib_chunk
->length_dw
> RADEON_IB_VM_MAX_SIZE
) {
634 DRM_ERROR("cs IB too big: %d\n", ib_chunk
->length_dw
);
638 ib_chunk
= parser
->chunk_ib
;
640 r
= radeon_ib_get(rdev
, parser
->ring
, &parser
->ib
,
641 vm
, ib_chunk
->length_dw
* 4);
643 DRM_ERROR("Failed to get ib !\n");
646 parser
->ib
.length_dw
= ib_chunk
->length_dw
;
648 memcpy(parser
->ib
.ptr
, ib_chunk
->kdata
, ib_chunk
->length_dw
* 4);
649 else if (copy_from_user(parser
->ib
.ptr
, ib_chunk
->user_ptr
, ib_chunk
->length_dw
* 4))
654 int radeon_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
656 struct radeon_device
*rdev
= dev
->dev_private
;
657 struct radeon_cs_parser parser
;
660 down_read(&rdev
->exclusive_lock
);
661 if (!rdev
->accel_working
) {
662 up_read(&rdev
->exclusive_lock
);
665 if (rdev
->in_reset
) {
666 up_read(&rdev
->exclusive_lock
);
667 r
= radeon_gpu_reset(rdev
);
672 /* initialize parser */
673 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
676 parser
.dev
= rdev
->dev
;
677 parser
.family
= rdev
->family
;
678 r
= radeon_cs_parser_init(&parser
, data
);
680 DRM_ERROR("Failed to initialize parser !\n");
681 radeon_cs_parser_fini(&parser
, r
, false);
682 up_read(&rdev
->exclusive_lock
);
683 r
= radeon_cs_handle_lockup(rdev
, r
);
687 r
= radeon_cs_ib_fill(rdev
, &parser
);
689 r
= radeon_cs_parser_relocs(&parser
);
690 if (r
&& r
!= -ERESTARTSYS
)
691 DRM_ERROR("Failed to parse relocation %d!\n", r
);
695 radeon_cs_parser_fini(&parser
, r
, false);
696 up_read(&rdev
->exclusive_lock
);
697 r
= radeon_cs_handle_lockup(rdev
, r
);
701 trace_radeon_cs(&parser
);
703 r
= radeon_cs_ib_chunk(rdev
, &parser
);
707 r
= radeon_cs_ib_vm_chunk(rdev
, &parser
);
712 radeon_cs_parser_fini(&parser
, r
, true);
713 up_read(&rdev
->exclusive_lock
);
714 r
= radeon_cs_handle_lockup(rdev
, r
);
719 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
720 * @parser: parser structure holding parsing context.
721 * @pkt: where to store packet information
723 * Assume that chunk_ib_index is properly set. Will return -EINVAL
724 * if packet is bigger than remaining ib size. or if packets is unknown.
726 int radeon_cs_packet_parse(struct radeon_cs_parser
*p
,
727 struct radeon_cs_packet
*pkt
,
730 struct radeon_cs_chunk
*ib_chunk
= p
->chunk_ib
;
731 struct radeon_device
*rdev
= p
->rdev
;
735 if (idx
>= ib_chunk
->length_dw
) {
736 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
737 idx
, ib_chunk
->length_dw
);
740 header
= radeon_get_ib_value(p
, idx
);
742 pkt
->type
= RADEON_CP_PACKET_GET_TYPE(header
);
743 pkt
->count
= RADEON_CP_PACKET_GET_COUNT(header
);
746 case RADEON_PACKET_TYPE0
:
747 if (rdev
->family
< CHIP_R600
) {
748 pkt
->reg
= R100_CP_PACKET0_GET_REG(header
);
750 RADEON_CP_PACKET0_GET_ONE_REG_WR(header
);
752 pkt
->reg
= R600_CP_PACKET0_GET_REG(header
);
754 case RADEON_PACKET_TYPE3
:
755 pkt
->opcode
= RADEON_CP_PACKET3_GET_OPCODE(header
);
757 case RADEON_PACKET_TYPE2
:
761 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
765 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
766 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
767 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
774 for (i
= 0; i
< ib_chunk
->length_dw
; i
++) {
776 printk("\t0x%08x <---\n", radeon_get_ib_value(p
, i
));
778 printk("\t0x%08x\n", radeon_get_ib_value(p
, i
));
784 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
785 * @p: structure holding the parser context.
787 * Check if the next packet is NOP relocation packet3.
789 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
791 struct radeon_cs_packet p3reloc
;
794 r
= radeon_cs_packet_parse(p
, &p3reloc
, p
->idx
);
797 if (p3reloc
.type
!= RADEON_PACKET_TYPE3
)
799 if (p3reloc
.opcode
!= RADEON_PACKET3_NOP
)
805 * radeon_cs_dump_packet() - dump raw packet context
806 * @p: structure holding the parser context.
807 * @pkt: structure holding the packet.
809 * Used mostly for debugging and error reporting.
811 void radeon_cs_dump_packet(struct radeon_cs_parser
*p
,
812 struct radeon_cs_packet
*pkt
)
814 volatile uint32_t *ib
;
820 for (i
= 0; i
<= (pkt
->count
+ 1); i
++, idx
++)
821 DRM_INFO("ib[%d]=0x%08X\n", idx
, ib
[idx
]);
825 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
826 * @parser: parser structure holding parsing context.
827 * @data: pointer to relocation data
828 * @offset_start: starting offset
829 * @offset_mask: offset mask (to align start offset on)
830 * @reloc: reloc informations
832 * Check if next packet is relocation packet3, do bo validation and compute
833 * GPU offset using the provided start.
835 int radeon_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
836 struct radeon_bo_list
**cs_reloc
,
839 struct radeon_cs_chunk
*relocs_chunk
;
840 struct radeon_cs_packet p3reloc
;
844 if (p
->chunk_relocs
== NULL
) {
845 DRM_ERROR("No relocation chunk !\n");
849 relocs_chunk
= p
->chunk_relocs
;
850 r
= radeon_cs_packet_parse(p
, &p3reloc
, p
->idx
);
853 p
->idx
+= p3reloc
.count
+ 2;
854 if (p3reloc
.type
!= RADEON_PACKET_TYPE3
||
855 p3reloc
.opcode
!= RADEON_PACKET3_NOP
) {
856 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
858 radeon_cs_dump_packet(p
, &p3reloc
);
861 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
862 if (idx
>= relocs_chunk
->length_dw
) {
863 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
864 idx
, relocs_chunk
->length_dw
);
865 radeon_cs_dump_packet(p
, &p3reloc
);
868 /* FIXME: we assume reloc size is 4 dwords */
870 *cs_reloc
= p
->relocs
;
871 (*cs_reloc
)->gpu_offset
=
872 (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
873 (*cs_reloc
)->gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
875 *cs_reloc
= &p
->relocs
[(idx
/ 4)];