Sync headers with drm-next
[drm/libdrm.git] / nouveau / pushbuf.c
blob5d54f21df91a58a9172d7b4505d38d8d17f89aa2
1 /*
2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Ben Skeggs
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <stdint.h>
28 #include <stdbool.h>
29 #include <string.h>
30 #include <assert.h>
31 #include <errno.h>
32 #include <inttypes.h>
34 #include <xf86drm.h>
35 #include <xf86atomic.h>
36 #include "libdrm_lists.h"
37 #include "nouveau_drm.h"
39 #include "nouveau.h"
40 #include "private.h"
42 struct nouveau_pushbuf_krec {
43 struct nouveau_pushbuf_krec *next;
44 struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
45 struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
46 struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
47 int nr_buffer;
48 int nr_reloc;
49 int nr_push;
50 uint64_t vram_used;
51 uint64_t gart_used;
54 struct nouveau_pushbuf_priv {
55 struct nouveau_pushbuf base;
56 struct nouveau_pushbuf_krec *list;
57 struct nouveau_pushbuf_krec *krec;
58 struct nouveau_list bctx_list;
59 struct nouveau_bo *bo;
60 uint32_t type;
61 uint32_t suffix0;
62 uint32_t suffix1;
63 uint32_t *ptr;
64 uint32_t *bgn;
65 int bo_next;
66 int bo_nr;
67 struct nouveau_bo *bos[];
70 static inline struct nouveau_pushbuf_priv *
71 nouveau_pushbuf(struct nouveau_pushbuf *push)
73 return (struct nouveau_pushbuf_priv *)push;
76 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
77 static int pushbuf_flush(struct nouveau_pushbuf *);
79 static bool
80 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
81 uint32_t *domains)
83 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
84 struct nouveau_pushbuf_krec *krec = nvpb->krec;
85 struct nouveau_device *dev = push->client->device;
86 struct nouveau_bo *kbo;
87 struct drm_nouveau_gem_pushbuf_bo *kref;
88 int i;
90 /* VRAM is the only valid domain. GART and VRAM|GART buffers
91 * are all accounted to GART, so if this doesn't fit in VRAM
92 * straight up, a flush is needed.
94 if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
95 if (krec->vram_used + bo->size > dev->vram_limit)
96 return false;
97 krec->vram_used += bo->size;
98 return true;
101 /* GART or VRAM|GART buffer. Account both of these buffer types
102 * to GART only for the moment, which simplifies things. If the
103 * buffer can fit already, we're done here.
105 if (krec->gart_used + bo->size <= dev->gart_limit) {
106 krec->gart_used += bo->size;
107 return true;
110 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
111 * fit into available VRAM, turn it into a VRAM buffer
113 if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
114 krec->vram_used + bo->size <= dev->vram_limit) {
115 *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
116 krec->vram_used += bo->size;
117 return true;
120 /* Still couldn't fit the buffer in anywhere, so as a last resort;
121 * scan the buffer list for VRAM|GART buffers and turn them into
122 * VRAM buffers until we have enough space in GART for this one
124 kref = krec->buffer;
125 for (i = 0; i < krec->nr_buffer; i++, kref++) {
126 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
127 continue;
129 kbo = (void *)(unsigned long)kref->user_priv;
130 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
131 krec->vram_used + kbo->size > dev->vram_limit)
132 continue;
134 kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
135 krec->gart_used -= kbo->size;
136 krec->vram_used += kbo->size;
137 if (krec->gart_used + bo->size <= dev->gart_limit) {
138 krec->gart_used += bo->size;
139 return true;
143 /* Couldn't resolve a placement, need to force a flush */
144 return false;
147 static struct drm_nouveau_gem_pushbuf_bo *
148 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
149 uint32_t flags)
151 struct nouveau_device *dev = push->client->device;
152 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
153 struct nouveau_pushbuf_krec *krec = nvpb->krec;
154 struct nouveau_pushbuf *fpush;
155 struct drm_nouveau_gem_pushbuf_bo *kref;
156 uint32_t domains, domains_wr, domains_rd;
158 domains = 0;
159 if (flags & NOUVEAU_BO_VRAM)
160 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
161 if (flags & NOUVEAU_BO_GART)
162 domains |= NOUVEAU_GEM_DOMAIN_GART;
163 domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
164 domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
166 /* if buffer is referenced on another pushbuf that is owned by the
167 * same client, we need to flush the other pushbuf first to ensure
168 * the correct ordering of commands
170 fpush = cli_push_get(push->client, bo);
171 if (fpush && fpush != push)
172 pushbuf_flush(fpush);
174 kref = cli_kref_get(push->client, bo);
175 if (kref) {
176 /* possible conflict in memory types - flush and retry */
177 if (!(kref->valid_domains & domains))
178 return NULL;
180 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
181 * it'll fit in VRAM and force a flush if not.
183 if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
184 ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
185 if (krec->vram_used + bo->size > dev->vram_limit)
186 return NULL;
187 krec->vram_used += bo->size;
188 krec->gart_used -= bo->size;
191 kref->valid_domains &= domains;
192 kref->write_domains |= domains_wr;
193 kref->read_domains |= domains_rd;
194 } else {
195 if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
196 !pushbuf_kref_fits(push, bo, &domains))
197 return NULL;
199 kref = &krec->buffer[krec->nr_buffer++];
200 kref->user_priv = (unsigned long)bo;
201 kref->handle = bo->handle;
202 kref->valid_domains = domains;
203 kref->write_domains = domains_wr;
204 kref->read_domains = domains_rd;
205 kref->presumed.valid = 1;
206 kref->presumed.offset = bo->offset;
207 if (bo->flags & NOUVEAU_BO_VRAM)
208 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
209 else
210 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
212 cli_kref_set(push->client, bo, kref, push);
213 atomic_inc(&nouveau_bo(bo)->refcnt);
216 return kref;
219 static uint32_t
220 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
221 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
223 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
224 struct nouveau_pushbuf_krec *krec = nvpb->krec;
225 struct drm_nouveau_gem_pushbuf_reloc *krel;
226 struct drm_nouveau_gem_pushbuf_bo *pkref;
227 struct drm_nouveau_gem_pushbuf_bo *bkref;
228 uint32_t reloc = data;
230 pkref = cli_kref_get(push->client, nvpb->bo);
231 bkref = cli_kref_get(push->client, bo);
232 krel = &krec->reloc[krec->nr_reloc++];
234 assert(pkref);
235 assert(bkref);
236 krel->reloc_bo_index = pkref - krec->buffer;
237 krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
238 krel->bo_index = bkref - krec->buffer;
239 krel->flags = 0;
240 krel->data = data;
241 krel->vor = vor;
242 krel->tor = tor;
244 if (flags & NOUVEAU_BO_LOW) {
245 reloc = (bkref->presumed.offset + data);
246 krel->flags |= NOUVEAU_GEM_RELOC_LOW;
247 } else
248 if (flags & NOUVEAU_BO_HIGH) {
249 reloc = (bkref->presumed.offset + data) >> 32;
250 krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
252 if (flags & NOUVEAU_BO_OR) {
253 if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
254 reloc |= vor;
255 else
256 reloc |= tor;
257 krel->flags |= NOUVEAU_GEM_RELOC_OR;
260 return reloc;
263 static void
264 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
266 struct drm_nouveau_gem_pushbuf_reloc *krel;
267 struct drm_nouveau_gem_pushbuf_push *kpsh;
268 struct drm_nouveau_gem_pushbuf_bo *kref;
269 struct nouveau_bo *bo;
270 uint32_t *bgn, *end;
271 int i;
273 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
274 krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
276 kref = krec->buffer;
277 for (i = 0; i < krec->nr_buffer; i++, kref++) {
278 bo = (void *)(uintptr_t)kref->user_priv;
279 err("ch%d: buf %08x %08x %08x %08x %08x %p 0x%"PRIx64" 0x%"PRIx64"\n", chid, i,
280 kref->handle, kref->valid_domains,
281 kref->read_domains, kref->write_domains, bo->map, bo->offset, bo->size);
284 krel = krec->reloc;
285 for (i = 0; i < krec->nr_reloc; i++, krel++) {
286 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
287 chid, krel->reloc_bo_index, krel->reloc_bo_offset,
288 krel->bo_index, krel->flags, krel->data,
289 krel->vor, krel->tor);
292 kpsh = krec->push;
293 for (i = 0; i < krec->nr_push; i++, kpsh++) {
294 kref = krec->buffer + kpsh->bo_index;
295 bo = (void *)(unsigned long)kref->user_priv;
296 bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
297 end = bgn + ((kpsh->length & 0x7fffff) /4);
299 err("ch%d: psh %s%08x %010llx %010llx\n", chid,
300 bo->map ? "" : "(unmapped) ", kpsh->bo_index,
301 (unsigned long long)kpsh->offset,
302 (unsigned long long)(kpsh->offset + kpsh->length));
303 if (!bo->map)
304 continue;
305 while (bgn < end)
306 err("\t0x%08x\n", *bgn++);
310 static int
311 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
313 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
314 struct nouveau_pushbuf_krec *krec = nvpb->list;
315 struct nouveau_device *dev = push->client->device;
316 struct nouveau_drm *drm = nouveau_drm(&dev->object);
317 struct drm_nouveau_gem_pushbuf_bo_presumed *info;
318 struct drm_nouveau_gem_pushbuf_bo *kref;
319 struct drm_nouveau_gem_pushbuf req;
320 struct nouveau_fifo *fifo = chan->data;
321 struct nouveau_bo *bo;
322 int krec_id = 0;
323 int ret = 0, i;
325 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
326 return -EINVAL;
328 if (push->kick_notify)
329 push->kick_notify(push);
331 nouveau_pushbuf_data(push, NULL, 0, 0);
333 while (krec && krec->nr_push) {
334 req.channel = fifo->channel;
335 req.nr_buffers = krec->nr_buffer;
336 req.buffers = (uint64_t)(unsigned long)krec->buffer;
337 req.nr_relocs = krec->nr_reloc;
338 req.nr_push = krec->nr_push;
339 req.relocs = (uint64_t)(unsigned long)krec->reloc;
340 req.push = (uint64_t)(unsigned long)krec->push;
341 req.suffix0 = nvpb->suffix0;
342 req.suffix1 = nvpb->suffix1;
343 req.vram_available = 0; /* for valgrind */
344 if (dbg_on(1))
345 req.vram_available |= NOUVEAU_GEM_PUSHBUF_SYNC;
346 req.gart_available = 0;
348 if (dbg_on(0))
349 pushbuf_dump(krec, krec_id++, fifo->channel);
351 #ifndef SIMULATE
352 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
353 &req, sizeof(req));
354 nvpb->suffix0 = req.suffix0;
355 nvpb->suffix1 = req.suffix1;
356 dev->vram_limit = (req.vram_available *
357 nouveau_device(dev)->vram_limit_percent) / 100;
358 dev->gart_limit = (req.gart_available *
359 nouveau_device(dev)->gart_limit_percent) / 100;
360 #else
361 if (dbg_on(31))
362 ret = -EINVAL;
363 #endif
365 if (ret) {
366 err("kernel rejected pushbuf: %s\n", strerror(-ret));
367 pushbuf_dump(krec, krec_id++, fifo->channel);
368 break;
371 kref = krec->buffer;
372 for (i = 0; i < krec->nr_buffer; i++, kref++) {
373 bo = (void *)(unsigned long)kref->user_priv;
375 info = &kref->presumed;
376 if (!info->valid) {
377 bo->flags &= ~NOUVEAU_BO_APER;
378 if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
379 bo->flags |= NOUVEAU_BO_VRAM;
380 else
381 bo->flags |= NOUVEAU_BO_GART;
382 bo->offset = info->offset;
385 if (kref->write_domains)
386 nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
387 if (kref->read_domains)
388 nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
391 krec = krec->next;
394 return ret;
397 static int
398 pushbuf_flush(struct nouveau_pushbuf *push)
400 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
401 struct nouveau_pushbuf_krec *krec = nvpb->krec;
402 struct drm_nouveau_gem_pushbuf_bo *kref;
403 struct nouveau_bufctx *bctx, *btmp;
404 struct nouveau_bo *bo;
405 int ret = 0, i;
407 if (push->channel) {
408 ret = pushbuf_submit(push, push->channel);
409 } else {
410 nouveau_pushbuf_data(push, NULL, 0, 0);
411 krec->next = malloc(sizeof(*krec));
412 nvpb->krec = krec->next;
415 kref = krec->buffer;
416 for (i = 0; i < krec->nr_buffer; i++, kref++) {
417 bo = (void *)(unsigned long)kref->user_priv;
418 cli_kref_set(push->client, bo, NULL, NULL);
419 if (push->channel)
420 nouveau_bo_ref(NULL, &bo);
423 krec = nvpb->krec;
424 krec->vram_used = 0;
425 krec->gart_used = 0;
426 krec->nr_buffer = 0;
427 krec->nr_reloc = 0;
428 krec->nr_push = 0;
430 DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
431 DRMLISTJOIN(&bctx->current, &bctx->pending);
432 DRMINITLISTHEAD(&bctx->current);
433 DRMLISTDELINIT(&bctx->head);
436 return ret;
439 static void
440 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
442 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
443 struct nouveau_pushbuf_krec *krec = nvpb->krec;
444 struct drm_nouveau_gem_pushbuf_bo *kref;
446 kref = krec->buffer + sref;
447 while (krec->nr_buffer-- > sref) {
448 struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
449 cli_kref_set(push->client, bo, NULL, NULL);
450 nouveau_bo_ref(NULL, &bo);
451 kref++;
453 krec->nr_buffer = sref;
454 krec->nr_reloc = srel;
457 static int
458 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
459 struct nouveau_pushbuf_refn *refs, int nr)
461 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
462 struct nouveau_pushbuf_krec *krec = nvpb->krec;
463 struct drm_nouveau_gem_pushbuf_bo *kref;
464 int sref = krec->nr_buffer;
465 int ret = 0, i;
467 for (i = 0; i < nr; i++) {
468 kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
469 if (!kref) {
470 ret = -ENOSPC;
471 break;
475 if (ret) {
476 pushbuf_refn_fail(push, sref, krec->nr_reloc);
477 if (retry) {
478 pushbuf_flush(push);
479 nouveau_pushbuf_space(push, 0, 0, 0);
480 return pushbuf_refn(push, false, refs, nr);
484 return ret;
487 static int
488 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
490 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
491 struct nouveau_pushbuf_krec *krec = nvpb->krec;
492 struct drm_nouveau_gem_pushbuf_bo *kref;
493 struct nouveau_bufctx *bctx = push->bufctx;
494 struct nouveau_bufref *bref;
495 int relocs = bctx ? bctx->relocs * 2: 0;
496 int sref, srel, ret;
498 ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
499 if (ret || bctx == NULL)
500 return ret;
502 sref = krec->nr_buffer;
503 srel = krec->nr_reloc;
505 DRMLISTDEL(&bctx->head);
506 DRMLISTADD(&bctx->head, &nvpb->bctx_list);
508 DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
509 kref = pushbuf_kref(push, bref->bo, bref->flags);
510 if (!kref) {
511 ret = -ENOSPC;
512 break;
515 if (bref->packet) {
516 pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
517 *push->cur++ = 0;
518 pushbuf_krel(push, bref->bo, bref->data, bref->flags,
519 bref->vor, bref->tor);
520 *push->cur++ = 0;
524 DRMLISTJOIN(&bctx->pending, &bctx->current);
525 DRMINITLISTHEAD(&bctx->pending);
527 if (ret) {
528 pushbuf_refn_fail(push, sref, srel);
529 if (retry) {
530 pushbuf_flush(push);
531 return pushbuf_validate(push, false);
535 return ret;
538 drm_public int
539 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
540 int nr, uint32_t size, bool immediate,
541 struct nouveau_pushbuf **ppush)
543 struct nouveau_drm *drm = nouveau_drm(&client->device->object);
544 struct nouveau_fifo *fifo = chan->data;
545 struct nouveau_pushbuf_priv *nvpb;
546 struct nouveau_pushbuf *push;
547 struct drm_nouveau_gem_pushbuf req = {};
548 int ret;
550 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
551 return -EINVAL;
553 /* nop pushbuf call, to get the current "return to main" sequence
554 * we need to append to the pushbuf on early chipsets
556 req.channel = fifo->channel;
557 req.nr_push = 0;
558 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
559 &req, sizeof(req));
560 if (ret)
561 return ret;
563 nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
564 if (!nvpb)
565 return -ENOMEM;
567 #ifndef SIMULATE
568 nvpb->suffix0 = req.suffix0;
569 nvpb->suffix1 = req.suffix1;
570 #else
571 nvpb->suffix0 = 0xffffffff;
572 nvpb->suffix1 = 0xffffffff;
573 #endif
574 nvpb->krec = calloc(1, sizeof(*nvpb->krec));
575 nvpb->list = nvpb->krec;
576 if (!nvpb->krec) {
577 free(nvpb);
578 return -ENOMEM;
581 push = &nvpb->base;
582 push->client = client;
583 push->channel = immediate ? chan : NULL;
584 push->flags = NOUVEAU_BO_RD;
585 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
586 push->flags |= NOUVEAU_BO_GART;
587 nvpb->type = NOUVEAU_BO_GART;
588 } else
589 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
590 push->flags |= NOUVEAU_BO_VRAM;
591 nvpb->type = NOUVEAU_BO_VRAM;
593 nvpb->type |= NOUVEAU_BO_MAP;
595 for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
596 ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
597 NULL, &nvpb->bos[nvpb->bo_nr]);
598 if (ret) {
599 nouveau_pushbuf_del(&push);
600 return ret;
604 DRMINITLISTHEAD(&nvpb->bctx_list);
605 *ppush = push;
606 return 0;
609 drm_public void
610 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
612 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
613 if (nvpb) {
614 struct drm_nouveau_gem_pushbuf_bo *kref;
615 struct nouveau_pushbuf_krec *krec;
616 while ((krec = nvpb->list)) {
617 kref = krec->buffer;
618 while (krec->nr_buffer--) {
619 unsigned long priv = kref++->user_priv;
620 struct nouveau_bo *bo = (void *)priv;
621 cli_kref_set(nvpb->base.client, bo, NULL, NULL);
622 nouveau_bo_ref(NULL, &bo);
624 nvpb->list = krec->next;
625 free(krec);
627 while (nvpb->bo_nr--)
628 nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
629 nouveau_bo_ref(NULL, &nvpb->bo);
630 free(nvpb);
632 *ppush = NULL;
635 drm_public struct nouveau_bufctx *
636 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
638 struct nouveau_bufctx *prev = push->bufctx;
639 push->bufctx = ctx;
640 return prev;
643 drm_public int
644 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
645 uint32_t dwords, uint32_t relocs, uint32_t pushes)
647 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
648 struct nouveau_pushbuf_krec *krec = nvpb->krec;
649 struct nouveau_client *client = push->client;
650 struct nouveau_bo *bo = NULL;
651 bool flushed = false;
652 int ret = 0;
654 /* switch to next buffer if insufficient space in the current one */
655 if (push->cur + dwords >= push->end) {
656 if (nvpb->bo_next < nvpb->bo_nr) {
657 nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
658 if (nvpb->bo_next == nvpb->bo_nr && push->channel)
659 nvpb->bo_next = 0;
660 } else {
661 ret = nouveau_bo_new(client->device, nvpb->type, 0,
662 nvpb->bos[0]->size, NULL, &bo);
663 if (ret)
664 return ret;
668 /* make sure there's always enough space to queue up the pending
669 * data in the pushbuf proper
671 pushes++;
673 /* need to flush if we've run out of space on an immediate pushbuf,
674 * if the new buffer won't fit, or if the kernel push/reloc limits
675 * have been hit
677 if ((bo && ( push->channel ||
678 !pushbuf_kref(push, bo, push->flags))) ||
679 krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
680 krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
681 if (nvpb->bo && krec->nr_buffer)
682 pushbuf_flush(push);
683 flushed = true;
686 /* if necessary, switch to new buffer */
687 if (bo) {
688 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
689 if (ret)
690 return ret;
692 nouveau_pushbuf_data(push, NULL, 0, 0);
693 nouveau_bo_ref(bo, &nvpb->bo);
694 nouveau_bo_ref(NULL, &bo);
696 nvpb->bgn = nvpb->bo->map;
697 nvpb->ptr = nvpb->bgn;
698 push->cur = nvpb->bgn;
699 push->end = push->cur + (nvpb->bo->size / 4);
700 push->end -= 2 + push->rsvd_kick; /* space for suffix */
703 pushbuf_kref(push, nvpb->bo, push->flags);
704 return flushed ? pushbuf_validate(push, false) : 0;
707 drm_public void
708 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
709 uint64_t offset, uint64_t length)
711 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
712 struct nouveau_pushbuf_krec *krec = nvpb->krec;
713 struct drm_nouveau_gem_pushbuf_push *kpsh;
714 struct drm_nouveau_gem_pushbuf_bo *kref;
716 if (bo != nvpb->bo && nvpb->bgn != push->cur) {
717 if (nvpb->suffix0 || nvpb->suffix1) {
718 *push->cur++ = nvpb->suffix0;
719 *push->cur++ = nvpb->suffix1;
722 nouveau_pushbuf_data(push, nvpb->bo,
723 (nvpb->bgn - nvpb->ptr) * 4,
724 (push->cur - nvpb->bgn) * 4);
725 nvpb->bgn = push->cur;
728 if (bo) {
729 kref = cli_kref_get(push->client, bo);
730 assert(kref);
731 kpsh = &krec->push[krec->nr_push++];
732 kpsh->bo_index = kref - krec->buffer;
733 kpsh->offset = offset;
734 kpsh->length = length;
738 drm_public int
739 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
740 struct nouveau_pushbuf_refn *refs, int nr)
742 return pushbuf_refn(push, true, refs, nr);
745 drm_public void
746 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
747 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
749 *push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
750 push->cur++;
753 drm_public int
754 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
756 return pushbuf_validate(push, true);
759 drm_public uint32_t
760 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
762 struct drm_nouveau_gem_pushbuf_bo *kref;
763 uint32_t flags = 0;
765 if (cli_push_get(push->client, bo) == push) {
766 kref = cli_kref_get(push->client, bo);
767 assert(kref);
768 if (kref->read_domains)
769 flags |= NOUVEAU_BO_RD;
770 if (kref->write_domains)
771 flags |= NOUVEAU_BO_WR;
774 return flags;
777 drm_public int
778 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
780 if (!push->channel)
781 return pushbuf_submit(push, chan);
782 pushbuf_flush(push);
783 return pushbuf_validate(push, false);
786 drm_public bool
787 nouveau_check_dead_channel(struct nouveau_drm *drm, struct nouveau_object *chan)
789 struct drm_nouveau_gem_pushbuf req = {};
790 struct nouveau_fifo *fifo = chan->data;
791 int ret;
793 req.channel = fifo->channel;
794 req.nr_push = 0;
796 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
797 &req, sizeof(req));
798 /* nouveau returns ENODEV once the channel was killed */
799 return ret == -ENODEV;