2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
35 #include <xf86atomic.h>
36 #include "libdrm_lists.h"
37 #include "nouveau_drm.h"
42 struct nouveau_pushbuf_krec
{
43 struct nouveau_pushbuf_krec
*next
;
44 struct drm_nouveau_gem_pushbuf_bo buffer
[NOUVEAU_GEM_MAX_BUFFERS
];
45 struct drm_nouveau_gem_pushbuf_reloc reloc
[NOUVEAU_GEM_MAX_RELOCS
];
46 struct drm_nouveau_gem_pushbuf_push push
[NOUVEAU_GEM_MAX_PUSH
];
54 struct nouveau_pushbuf_priv
{
55 struct nouveau_pushbuf base
;
56 struct nouveau_pushbuf_krec
*list
;
57 struct nouveau_pushbuf_krec
*krec
;
58 struct nouveau_list bctx_list
;
59 struct nouveau_bo
*bo
;
67 struct nouveau_bo
*bos
[];
70 static inline struct nouveau_pushbuf_priv
*
71 nouveau_pushbuf(struct nouveau_pushbuf
*push
)
73 return (struct nouveau_pushbuf_priv
*)push
;
76 static int pushbuf_validate(struct nouveau_pushbuf
*, bool);
77 static int pushbuf_flush(struct nouveau_pushbuf
*);
80 pushbuf_kref_fits(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
,
83 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
84 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
85 struct nouveau_device
*dev
= push
->client
->device
;
86 struct nouveau_bo
*kbo
;
87 struct drm_nouveau_gem_pushbuf_bo
*kref
;
90 /* VRAM is the only valid domain. GART and VRAM|GART buffers
91 * are all accounted to GART, so if this doesn't fit in VRAM
92 * straight up, a flush is needed.
94 if (*domains
== NOUVEAU_GEM_DOMAIN_VRAM
) {
95 if (krec
->vram_used
+ bo
->size
> dev
->vram_limit
)
97 krec
->vram_used
+= bo
->size
;
101 /* GART or VRAM|GART buffer. Account both of these buffer types
102 * to GART only for the moment, which simplifies things. If the
103 * buffer can fit already, we're done here.
105 if (krec
->gart_used
+ bo
->size
<= dev
->gart_limit
) {
106 krec
->gart_used
+= bo
->size
;
110 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
111 * fit into available VRAM, turn it into a VRAM buffer
113 if ((*domains
& NOUVEAU_GEM_DOMAIN_VRAM
) &&
114 krec
->vram_used
+ bo
->size
<= dev
->vram_limit
) {
115 *domains
&= NOUVEAU_GEM_DOMAIN_VRAM
;
116 krec
->vram_used
+= bo
->size
;
120 /* Still couldn't fit the buffer in anywhere, so as a last resort;
121 * scan the buffer list for VRAM|GART buffers and turn them into
122 * VRAM buffers until we have enough space in GART for this one
125 for (i
= 0; i
< krec
->nr_buffer
; i
++, kref
++) {
126 if (!(kref
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
))
129 kbo
= (void *)(unsigned long)kref
->user_priv
;
130 if (!(kref
->valid_domains
& NOUVEAU_GEM_DOMAIN_VRAM
) ||
131 krec
->vram_used
+ kbo
->size
> dev
->vram_limit
)
134 kref
->valid_domains
&= NOUVEAU_GEM_DOMAIN_VRAM
;
135 krec
->gart_used
-= kbo
->size
;
136 krec
->vram_used
+= kbo
->size
;
137 if (krec
->gart_used
+ bo
->size
<= dev
->gart_limit
) {
138 krec
->gart_used
+= bo
->size
;
143 /* Couldn't resolve a placement, need to force a flush */
147 static struct drm_nouveau_gem_pushbuf_bo
*
148 pushbuf_kref(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
,
151 struct nouveau_device
*dev
= push
->client
->device
;
152 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
153 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
154 struct nouveau_pushbuf
*fpush
;
155 struct drm_nouveau_gem_pushbuf_bo
*kref
;
156 uint32_t domains
, domains_wr
, domains_rd
;
159 if (flags
& NOUVEAU_BO_VRAM
)
160 domains
|= NOUVEAU_GEM_DOMAIN_VRAM
;
161 if (flags
& NOUVEAU_BO_GART
)
162 domains
|= NOUVEAU_GEM_DOMAIN_GART
;
163 domains_wr
= domains
* !!(flags
& NOUVEAU_BO_WR
);
164 domains_rd
= domains
* !!(flags
& NOUVEAU_BO_RD
);
166 /* if buffer is referenced on another pushbuf that is owned by the
167 * same client, we need to flush the other pushbuf first to ensure
168 * the correct ordering of commands
170 fpush
= cli_push_get(push
->client
, bo
);
171 if (fpush
&& fpush
!= push
)
172 pushbuf_flush(fpush
);
174 kref
= cli_kref_get(push
->client
, bo
);
176 /* possible conflict in memory types - flush and retry */
177 if (!(kref
->valid_domains
& domains
))
180 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
181 * it'll fit in VRAM and force a flush if not.
183 if ((kref
->valid_domains
& NOUVEAU_GEM_DOMAIN_GART
) &&
184 ( domains
== NOUVEAU_GEM_DOMAIN_VRAM
)) {
185 if (krec
->vram_used
+ bo
->size
> dev
->vram_limit
)
187 krec
->vram_used
+= bo
->size
;
188 krec
->gart_used
-= bo
->size
;
191 kref
->valid_domains
&= domains
;
192 kref
->write_domains
|= domains_wr
;
193 kref
->read_domains
|= domains_rd
;
195 if (krec
->nr_buffer
== NOUVEAU_GEM_MAX_BUFFERS
||
196 !pushbuf_kref_fits(push
, bo
, &domains
))
199 kref
= &krec
->buffer
[krec
->nr_buffer
++];
200 kref
->user_priv
= (unsigned long)bo
;
201 kref
->handle
= bo
->handle
;
202 kref
->valid_domains
= domains
;
203 kref
->write_domains
= domains_wr
;
204 kref
->read_domains
= domains_rd
;
205 kref
->presumed
.valid
= 1;
206 kref
->presumed
.offset
= bo
->offset
;
207 if (bo
->flags
& NOUVEAU_BO_VRAM
)
208 kref
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_VRAM
;
210 kref
->presumed
.domain
= NOUVEAU_GEM_DOMAIN_GART
;
212 cli_kref_set(push
->client
, bo
, kref
, push
);
213 atomic_inc(&nouveau_bo(bo
)->refcnt
);
220 pushbuf_krel(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
,
221 uint32_t data
, uint32_t flags
, uint32_t vor
, uint32_t tor
)
223 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
224 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
225 struct drm_nouveau_gem_pushbuf_reloc
*krel
;
226 struct drm_nouveau_gem_pushbuf_bo
*pkref
;
227 struct drm_nouveau_gem_pushbuf_bo
*bkref
;
228 uint32_t reloc
= data
;
230 pkref
= cli_kref_get(push
->client
, nvpb
->bo
);
231 bkref
= cli_kref_get(push
->client
, bo
);
232 krel
= &krec
->reloc
[krec
->nr_reloc
++];
236 krel
->reloc_bo_index
= pkref
- krec
->buffer
;
237 krel
->reloc_bo_offset
= (push
->cur
- nvpb
->ptr
) * 4;
238 krel
->bo_index
= bkref
- krec
->buffer
;
244 if (flags
& NOUVEAU_BO_LOW
) {
245 reloc
= (bkref
->presumed
.offset
+ data
);
246 krel
->flags
|= NOUVEAU_GEM_RELOC_LOW
;
248 if (flags
& NOUVEAU_BO_HIGH
) {
249 reloc
= (bkref
->presumed
.offset
+ data
) >> 32;
250 krel
->flags
|= NOUVEAU_GEM_RELOC_HIGH
;
252 if (flags
& NOUVEAU_BO_OR
) {
253 if (bkref
->presumed
.domain
& NOUVEAU_GEM_DOMAIN_VRAM
)
257 krel
->flags
|= NOUVEAU_GEM_RELOC_OR
;
264 pushbuf_dump(struct nouveau_pushbuf_krec
*krec
, int krec_id
, int chid
)
266 struct drm_nouveau_gem_pushbuf_reloc
*krel
;
267 struct drm_nouveau_gem_pushbuf_push
*kpsh
;
268 struct drm_nouveau_gem_pushbuf_bo
*kref
;
269 struct nouveau_bo
*bo
;
273 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid
,
274 krec_id
, krec
->nr_push
, krec
->nr_buffer
, krec
->nr_reloc
);
277 for (i
= 0; i
< krec
->nr_buffer
; i
++, kref
++) {
278 bo
= (void *)(uintptr_t)kref
->user_priv
;
279 err("ch%d: buf %08x %08x %08x %08x %08x %p 0x%"PRIx64
" 0x%"PRIx64
"\n", chid
, i
,
280 kref
->handle
, kref
->valid_domains
,
281 kref
->read_domains
, kref
->write_domains
, bo
->map
, bo
->offset
, bo
->size
);
285 for (i
= 0; i
< krec
->nr_reloc
; i
++, krel
++) {
286 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 chid
, krel
->reloc_bo_index
, krel
->reloc_bo_offset
,
288 krel
->bo_index
, krel
->flags
, krel
->data
,
289 krel
->vor
, krel
->tor
);
293 for (i
= 0; i
< krec
->nr_push
; i
++, kpsh
++) {
294 kref
= krec
->buffer
+ kpsh
->bo_index
;
295 bo
= (void *)(unsigned long)kref
->user_priv
;
296 bgn
= (uint32_t *)((char *)bo
->map
+ kpsh
->offset
);
297 end
= bgn
+ ((kpsh
->length
& 0x7fffff) /4);
299 err("ch%d: psh %s%08x %010llx %010llx\n", chid
,
300 bo
->map
? "" : "(unmapped) ", kpsh
->bo_index
,
301 (unsigned long long)kpsh
->offset
,
302 (unsigned long long)(kpsh
->offset
+ kpsh
->length
));
306 err("\t0x%08x\n", *bgn
++);
311 pushbuf_submit(struct nouveau_pushbuf
*push
, struct nouveau_object
*chan
)
313 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
314 struct nouveau_pushbuf_krec
*krec
= nvpb
->list
;
315 struct nouveau_device
*dev
= push
->client
->device
;
316 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
317 struct drm_nouveau_gem_pushbuf_bo_presumed
*info
;
318 struct drm_nouveau_gem_pushbuf_bo
*kref
;
319 struct drm_nouveau_gem_pushbuf req
;
320 struct nouveau_fifo
*fifo
= chan
->data
;
321 struct nouveau_bo
*bo
;
325 if (chan
->oclass
!= NOUVEAU_FIFO_CHANNEL_CLASS
)
328 if (push
->kick_notify
)
329 push
->kick_notify(push
);
331 nouveau_pushbuf_data(push
, NULL
, 0, 0);
333 while (krec
&& krec
->nr_push
) {
334 req
.channel
= fifo
->channel
;
335 req
.nr_buffers
= krec
->nr_buffer
;
336 req
.buffers
= (uint64_t)(unsigned long)krec
->buffer
;
337 req
.nr_relocs
= krec
->nr_reloc
;
338 req
.nr_push
= krec
->nr_push
;
339 req
.relocs
= (uint64_t)(unsigned long)krec
->reloc
;
340 req
.push
= (uint64_t)(unsigned long)krec
->push
;
341 req
.suffix0
= nvpb
->suffix0
;
342 req
.suffix1
= nvpb
->suffix1
;
343 req
.vram_available
= 0; /* for valgrind */
345 req
.vram_available
|= NOUVEAU_GEM_PUSHBUF_SYNC
;
346 req
.gart_available
= 0;
349 pushbuf_dump(krec
, krec_id
++, fifo
->channel
);
352 ret
= drmCommandWriteRead(drm
->fd
, DRM_NOUVEAU_GEM_PUSHBUF
,
354 nvpb
->suffix0
= req
.suffix0
;
355 nvpb
->suffix1
= req
.suffix1
;
356 dev
->vram_limit
= (req
.vram_available
*
357 nouveau_device(dev
)->vram_limit_percent
) / 100;
358 dev
->gart_limit
= (req
.gart_available
*
359 nouveau_device(dev
)->gart_limit_percent
) / 100;
366 err("kernel rejected pushbuf: %s\n", strerror(-ret
));
367 pushbuf_dump(krec
, krec_id
++, fifo
->channel
);
372 for (i
= 0; i
< krec
->nr_buffer
; i
++, kref
++) {
373 bo
= (void *)(unsigned long)kref
->user_priv
;
375 info
= &kref
->presumed
;
377 bo
->flags
&= ~NOUVEAU_BO_APER
;
378 if (info
->domain
== NOUVEAU_GEM_DOMAIN_VRAM
)
379 bo
->flags
|= NOUVEAU_BO_VRAM
;
381 bo
->flags
|= NOUVEAU_BO_GART
;
382 bo
->offset
= info
->offset
;
385 if (kref
->write_domains
)
386 nouveau_bo(bo
)->access
|= NOUVEAU_BO_WR
;
387 if (kref
->read_domains
)
388 nouveau_bo(bo
)->access
|= NOUVEAU_BO_RD
;
398 pushbuf_flush(struct nouveau_pushbuf
*push
)
400 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
401 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
402 struct drm_nouveau_gem_pushbuf_bo
*kref
;
403 struct nouveau_bufctx
*bctx
, *btmp
;
404 struct nouveau_bo
*bo
;
408 ret
= pushbuf_submit(push
, push
->channel
);
410 nouveau_pushbuf_data(push
, NULL
, 0, 0);
411 krec
->next
= malloc(sizeof(*krec
));
412 nvpb
->krec
= krec
->next
;
416 for (i
= 0; i
< krec
->nr_buffer
; i
++, kref
++) {
417 bo
= (void *)(unsigned long)kref
->user_priv
;
418 cli_kref_set(push
->client
, bo
, NULL
, NULL
);
420 nouveau_bo_ref(NULL
, &bo
);
430 DRMLISTFOREACHENTRYSAFE(bctx
, btmp
, &nvpb
->bctx_list
, head
) {
431 DRMLISTJOIN(&bctx
->current
, &bctx
->pending
);
432 DRMINITLISTHEAD(&bctx
->current
);
433 DRMLISTDELINIT(&bctx
->head
);
440 pushbuf_refn_fail(struct nouveau_pushbuf
*push
, int sref
, int srel
)
442 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
443 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
444 struct drm_nouveau_gem_pushbuf_bo
*kref
;
446 kref
= krec
->buffer
+ sref
;
447 while (krec
->nr_buffer
-- > sref
) {
448 struct nouveau_bo
*bo
= (void *)(unsigned long)kref
->user_priv
;
449 cli_kref_set(push
->client
, bo
, NULL
, NULL
);
450 nouveau_bo_ref(NULL
, &bo
);
453 krec
->nr_buffer
= sref
;
454 krec
->nr_reloc
= srel
;
458 pushbuf_refn(struct nouveau_pushbuf
*push
, bool retry
,
459 struct nouveau_pushbuf_refn
*refs
, int nr
)
461 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
462 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
463 struct drm_nouveau_gem_pushbuf_bo
*kref
;
464 int sref
= krec
->nr_buffer
;
467 for (i
= 0; i
< nr
; i
++) {
468 kref
= pushbuf_kref(push
, refs
[i
].bo
, refs
[i
].flags
);
476 pushbuf_refn_fail(push
, sref
, krec
->nr_reloc
);
479 nouveau_pushbuf_space(push
, 0, 0, 0);
480 return pushbuf_refn(push
, false, refs
, nr
);
488 pushbuf_validate(struct nouveau_pushbuf
*push
, bool retry
)
490 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
491 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
492 struct drm_nouveau_gem_pushbuf_bo
*kref
;
493 struct nouveau_bufctx
*bctx
= push
->bufctx
;
494 struct nouveau_bufref
*bref
;
495 int relocs
= bctx
? bctx
->relocs
* 2: 0;
498 ret
= nouveau_pushbuf_space(push
, relocs
, relocs
, 0);
499 if (ret
|| bctx
== NULL
)
502 sref
= krec
->nr_buffer
;
503 srel
= krec
->nr_reloc
;
505 DRMLISTDEL(&bctx
->head
);
506 DRMLISTADD(&bctx
->head
, &nvpb
->bctx_list
);
508 DRMLISTFOREACHENTRY(bref
, &bctx
->pending
, thead
) {
509 kref
= pushbuf_kref(push
, bref
->bo
, bref
->flags
);
516 pushbuf_krel(push
, bref
->bo
, bref
->packet
, 0, 0, 0);
518 pushbuf_krel(push
, bref
->bo
, bref
->data
, bref
->flags
,
519 bref
->vor
, bref
->tor
);
524 DRMLISTJOIN(&bctx
->pending
, &bctx
->current
);
525 DRMINITLISTHEAD(&bctx
->pending
);
528 pushbuf_refn_fail(push
, sref
, srel
);
531 return pushbuf_validate(push
, false);
539 nouveau_pushbuf_new(struct nouveau_client
*client
, struct nouveau_object
*chan
,
540 int nr
, uint32_t size
, bool immediate
,
541 struct nouveau_pushbuf
**ppush
)
543 struct nouveau_drm
*drm
= nouveau_drm(&client
->device
->object
);
544 struct nouveau_fifo
*fifo
= chan
->data
;
545 struct nouveau_pushbuf_priv
*nvpb
;
546 struct nouveau_pushbuf
*push
;
547 struct drm_nouveau_gem_pushbuf req
= {};
550 if (chan
->oclass
!= NOUVEAU_FIFO_CHANNEL_CLASS
)
553 /* nop pushbuf call, to get the current "return to main" sequence
554 * we need to append to the pushbuf on early chipsets
556 req
.channel
= fifo
->channel
;
558 ret
= drmCommandWriteRead(drm
->fd
, DRM_NOUVEAU_GEM_PUSHBUF
,
563 nvpb
= calloc(1, sizeof(*nvpb
) + nr
* sizeof(*nvpb
->bos
));
568 nvpb
->suffix0
= req
.suffix0
;
569 nvpb
->suffix1
= req
.suffix1
;
571 nvpb
->suffix0
= 0xffffffff;
572 nvpb
->suffix1
= 0xffffffff;
574 nvpb
->krec
= calloc(1, sizeof(*nvpb
->krec
));
575 nvpb
->list
= nvpb
->krec
;
582 push
->client
= client
;
583 push
->channel
= immediate
? chan
: NULL
;
584 push
->flags
= NOUVEAU_BO_RD
;
585 if (fifo
->pushbuf
& NOUVEAU_GEM_DOMAIN_GART
) {
586 push
->flags
|= NOUVEAU_BO_GART
;
587 nvpb
->type
= NOUVEAU_BO_GART
;
589 if (fifo
->pushbuf
& NOUVEAU_GEM_DOMAIN_VRAM
) {
590 push
->flags
|= NOUVEAU_BO_VRAM
;
591 nvpb
->type
= NOUVEAU_BO_VRAM
;
593 nvpb
->type
|= NOUVEAU_BO_MAP
;
595 for (nvpb
->bo_nr
= 0; nvpb
->bo_nr
< nr
; nvpb
->bo_nr
++) {
596 ret
= nouveau_bo_new(client
->device
, nvpb
->type
, 0, size
,
597 NULL
, &nvpb
->bos
[nvpb
->bo_nr
]);
599 nouveau_pushbuf_del(&push
);
604 DRMINITLISTHEAD(&nvpb
->bctx_list
);
610 nouveau_pushbuf_del(struct nouveau_pushbuf
**ppush
)
612 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(*ppush
);
614 struct drm_nouveau_gem_pushbuf_bo
*kref
;
615 struct nouveau_pushbuf_krec
*krec
;
616 while ((krec
= nvpb
->list
)) {
618 while (krec
->nr_buffer
--) {
619 unsigned long priv
= kref
++->user_priv
;
620 struct nouveau_bo
*bo
= (void *)priv
;
621 cli_kref_set(nvpb
->base
.client
, bo
, NULL
, NULL
);
622 nouveau_bo_ref(NULL
, &bo
);
624 nvpb
->list
= krec
->next
;
627 while (nvpb
->bo_nr
--)
628 nouveau_bo_ref(NULL
, &nvpb
->bos
[nvpb
->bo_nr
]);
629 nouveau_bo_ref(NULL
, &nvpb
->bo
);
635 drm_public
struct nouveau_bufctx
*
636 nouveau_pushbuf_bufctx(struct nouveau_pushbuf
*push
, struct nouveau_bufctx
*ctx
)
638 struct nouveau_bufctx
*prev
= push
->bufctx
;
644 nouveau_pushbuf_space(struct nouveau_pushbuf
*push
,
645 uint32_t dwords
, uint32_t relocs
, uint32_t pushes
)
647 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
648 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
649 struct nouveau_client
*client
= push
->client
;
650 struct nouveau_bo
*bo
= NULL
;
651 bool flushed
= false;
654 /* switch to next buffer if insufficient space in the current one */
655 if (push
->cur
+ dwords
>= push
->end
) {
656 if (nvpb
->bo_next
< nvpb
->bo_nr
) {
657 nouveau_bo_ref(nvpb
->bos
[nvpb
->bo_next
++], &bo
);
658 if (nvpb
->bo_next
== nvpb
->bo_nr
&& push
->channel
)
661 ret
= nouveau_bo_new(client
->device
, nvpb
->type
, 0,
662 nvpb
->bos
[0]->size
, NULL
, &bo
);
668 /* make sure there's always enough space to queue up the pending
669 * data in the pushbuf proper
673 /* need to flush if we've run out of space on an immediate pushbuf,
674 * if the new buffer won't fit, or if the kernel push/reloc limits
677 if ((bo
&& ( push
->channel
||
678 !pushbuf_kref(push
, bo
, push
->flags
))) ||
679 krec
->nr_reloc
+ relocs
>= NOUVEAU_GEM_MAX_RELOCS
||
680 krec
->nr_push
+ pushes
>= NOUVEAU_GEM_MAX_PUSH
) {
681 if (nvpb
->bo
&& krec
->nr_buffer
)
686 /* if necessary, switch to new buffer */
688 ret
= nouveau_bo_map(bo
, NOUVEAU_BO_WR
, push
->client
);
692 nouveau_pushbuf_data(push
, NULL
, 0, 0);
693 nouveau_bo_ref(bo
, &nvpb
->bo
);
694 nouveau_bo_ref(NULL
, &bo
);
696 nvpb
->bgn
= nvpb
->bo
->map
;
697 nvpb
->ptr
= nvpb
->bgn
;
698 push
->cur
= nvpb
->bgn
;
699 push
->end
= push
->cur
+ (nvpb
->bo
->size
/ 4);
700 push
->end
-= 2 + push
->rsvd_kick
; /* space for suffix */
703 pushbuf_kref(push
, nvpb
->bo
, push
->flags
);
704 return flushed
? pushbuf_validate(push
, false) : 0;
708 nouveau_pushbuf_data(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
,
709 uint64_t offset
, uint64_t length
)
711 struct nouveau_pushbuf_priv
*nvpb
= nouveau_pushbuf(push
);
712 struct nouveau_pushbuf_krec
*krec
= nvpb
->krec
;
713 struct drm_nouveau_gem_pushbuf_push
*kpsh
;
714 struct drm_nouveau_gem_pushbuf_bo
*kref
;
716 if (bo
!= nvpb
->bo
&& nvpb
->bgn
!= push
->cur
) {
717 if (nvpb
->suffix0
|| nvpb
->suffix1
) {
718 *push
->cur
++ = nvpb
->suffix0
;
719 *push
->cur
++ = nvpb
->suffix1
;
722 nouveau_pushbuf_data(push
, nvpb
->bo
,
723 (nvpb
->bgn
- nvpb
->ptr
) * 4,
724 (push
->cur
- nvpb
->bgn
) * 4);
725 nvpb
->bgn
= push
->cur
;
729 kref
= cli_kref_get(push
->client
, bo
);
731 kpsh
= &krec
->push
[krec
->nr_push
++];
732 kpsh
->bo_index
= kref
- krec
->buffer
;
733 kpsh
->offset
= offset
;
734 kpsh
->length
= length
;
739 nouveau_pushbuf_refn(struct nouveau_pushbuf
*push
,
740 struct nouveau_pushbuf_refn
*refs
, int nr
)
742 return pushbuf_refn(push
, true, refs
, nr
);
746 nouveau_pushbuf_reloc(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
,
747 uint32_t data
, uint32_t flags
, uint32_t vor
, uint32_t tor
)
749 *push
->cur
= pushbuf_krel(push
, bo
, data
, flags
, vor
, tor
);
754 nouveau_pushbuf_validate(struct nouveau_pushbuf
*push
)
756 return pushbuf_validate(push
, true);
760 nouveau_pushbuf_refd(struct nouveau_pushbuf
*push
, struct nouveau_bo
*bo
)
762 struct drm_nouveau_gem_pushbuf_bo
*kref
;
765 if (cli_push_get(push
->client
, bo
) == push
) {
766 kref
= cli_kref_get(push
->client
, bo
);
768 if (kref
->read_domains
)
769 flags
|= NOUVEAU_BO_RD
;
770 if (kref
->write_domains
)
771 flags
|= NOUVEAU_BO_WR
;
778 nouveau_pushbuf_kick(struct nouveau_pushbuf
*push
, struct nouveau_object
*chan
)
781 return pushbuf_submit(push
, chan
);
783 return pushbuf_validate(push
, false);
787 nouveau_check_dead_channel(struct nouveau_drm
*drm
, struct nouveau_object
*chan
)
789 struct drm_nouveau_gem_pushbuf req
= {};
790 struct nouveau_fifo
*fifo
= chan
->data
;
793 req
.channel
= fifo
->channel
;
796 ret
= drmCommandWriteRead(drm
->fd
, DRM_NOUVEAU_GEM_PUSHBUF
,
798 /* nouveau returns ENODEV once the channel was killed */
799 return ret
== -ENODEV
;