Sync usage with man page.
[netbsd-mini2440.git] / sys / external / bsd / drm / dist / libdrm / nouveau / nouveau_bo.c
blob66466e38ca0e8328d6b139538bce7658e49bef8c
1 /*
2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
23 #include <stdint.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
28 #include <sys/mman.h>
29 #include <sys/ioctl.h>
31 #include "nouveau_private.h"
33 int
34 nouveau_bo_init(struct nouveau_device *dev)
36 return 0;
39 void
40 nouveau_bo_takedown(struct nouveau_device *dev)
44 static int
45 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
47 if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
48 return 1;
49 return 0;
52 static int
53 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
55 if (nvbo->user || nvbo->sysmem) {
56 assert(nvbo->sysmem);
57 return 0;
60 nvbo->sysmem = malloc(nvbo->size);
61 if (!nvbo->sysmem)
62 return -ENOMEM;
64 return 0;
67 static void
68 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
70 if (nvbo->sysmem) {
71 if (!nvbo->user)
72 free(nvbo->sysmem);
73 nvbo->sysmem = NULL;
77 static void
78 nouveau_bo_kfree_nomm(struct nouveau_bo_priv *nvbo)
80 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
81 struct drm_nouveau_mem_free req;
83 if (nvbo->map) {
84 drmUnmap(nvbo->map, nvbo->size);
85 nvbo->map = NULL;
88 req.offset = nvbo->offset;
89 if (nvbo->domain & NOUVEAU_BO_GART)
90 req.flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI;
91 else
92 if (nvbo->domain & NOUVEAU_BO_VRAM)
93 req.flags = NOUVEAU_MEM_FB;
94 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE, &req, sizeof(req));
96 nvbo->handle = 0;
99 static void
100 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
102 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
103 struct drm_gem_close req;
105 if (!nvbo->handle)
106 return;
108 if (!nvdev->mm_enabled) {
109 nouveau_bo_kfree_nomm(nvbo);
110 return;
113 if (nvbo->map) {
114 munmap(nvbo->map, nvbo->size);
115 nvbo->map = NULL;
118 req.handle = nvbo->handle;
119 nvbo->handle = 0;
120 ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
123 static int
124 nouveau_bo_kalloc_nomm(struct nouveau_bo_priv *nvbo)
126 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
127 struct drm_nouveau_mem_alloc req;
128 int ret;
130 if (nvbo->handle)
131 return 0;
133 if (!(nvbo->flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART)))
134 nvbo->flags |= (NOUVEAU_BO_GART | NOUVEAU_BO_VRAM);
136 req.size = nvbo->size;
137 req.alignment = nvbo->align;
138 req.flags = 0;
139 if (nvbo->flags & NOUVEAU_BO_VRAM)
140 req.flags |= NOUVEAU_MEM_FB;
141 if (nvbo->flags & NOUVEAU_BO_GART)
142 req.flags |= (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI);
143 if (nvbo->flags & NOUVEAU_BO_TILED) {
144 req.flags |= NOUVEAU_MEM_TILE;
145 if (nvbo->flags & NOUVEAU_BO_ZTILE)
146 req.flags |= NOUVEAU_MEM_TILE_ZETA;
148 req.flags |= NOUVEAU_MEM_MAPPED;
150 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC,
151 &req, sizeof(req));
152 if (ret)
153 return ret;
155 nvbo->handle = req.map_handle;
156 nvbo->size = req.size;
157 nvbo->offset = req.offset;
158 if (req.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
159 nvbo->domain = NOUVEAU_BO_GART;
160 else
161 if (req.flags & NOUVEAU_MEM_FB)
162 nvbo->domain = NOUVEAU_BO_VRAM;
164 return 0;
167 static int
168 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
170 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
171 struct drm_nouveau_gem_new req;
172 int ret;
174 if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
175 return 0;
177 if (!nvdev->mm_enabled)
178 return nouveau_bo_kalloc_nomm(nvbo);
180 req.channel_hint = chan ? chan->id : 0;
182 req.size = nvbo->size;
183 req.align = nvbo->align;
185 req.domain = 0;
187 if (nvbo->flags & NOUVEAU_BO_VRAM)
188 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
190 if (nvbo->flags & NOUVEAU_BO_GART)
191 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
193 if (nvbo->flags & NOUVEAU_BO_TILED) {
194 req.domain |= NOUVEAU_GEM_DOMAIN_TILE;
195 if (nvbo->flags & NOUVEAU_BO_ZTILE)
196 req.domain |= NOUVEAU_GEM_DOMAIN_TILE_ZETA;
199 if (!req.domain) {
200 req.domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
201 NOUVEAU_GEM_DOMAIN_GART);
204 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
205 &req, sizeof(req));
206 if (ret)
207 return ret;
208 nvbo->handle = nvbo->base.handle = req.handle;
209 nvbo->size = req.size;
210 nvbo->domain = req.domain;
211 nvbo->offset = req.offset;
213 return 0;
216 static int
217 nouveau_bo_kmap_nomm(struct nouveau_bo_priv *nvbo)
219 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
220 int ret;
222 ret = drmMap(nvdev->fd, nvbo->handle, nvbo->size, &nvbo->map);
223 if (ret) {
224 nvbo->map = NULL;
225 return ret;
228 return 0;
231 static int
232 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
234 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
235 struct drm_nouveau_gem_mmap req;
236 int ret;
238 if (nvbo->map)
239 return 0;
241 if (!nvbo->handle)
242 return -EINVAL;
244 if (!nvdev->mm_enabled)
245 return nouveau_bo_kmap_nomm(nvbo);
247 req.handle = nvbo->handle;
248 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_MMAP,
249 &req, sizeof(req));
250 if (ret)
251 return ret;
253 nvbo->map = (void *)(unsigned long)req.vaddr;
254 return 0;
258 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
259 int size, struct nouveau_bo **bo)
261 struct nouveau_bo_priv *nvbo;
262 int ret;
264 if (!dev || !bo || *bo)
265 return -EINVAL;
267 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
268 if (!nvbo)
269 return -ENOMEM;
270 nvbo->base.device = dev;
271 nvbo->base.size = size;
273 nvbo->refcount = 1;
274 /* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will
275 * decided the buffer's already allocated when it's not. The
276 * call to nouveau_bo_pin() later will set this flag.
278 nvbo->flags = (flags & ~NOUVEAU_BO_PIN);
279 nvbo->size = size;
280 nvbo->align = align;
282 /*XXX: murder me violently */
283 if (flags & NOUVEAU_BO_TILED) {
284 nvbo->base.tiled = 1;
285 if (flags & NOUVEAU_BO_ZTILE)
286 nvbo->base.tiled |= 2;
289 if (flags & NOUVEAU_BO_PIN) {
290 ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
291 if (ret) {
292 nouveau_bo_ref(NULL, (void *)nvbo);
293 return ret;
297 *bo = &nvbo->base;
298 return 0;
302 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
303 struct nouveau_bo **bo)
305 struct nouveau_bo_priv *nvbo;
306 int ret;
308 ret = nouveau_bo_new(dev, 0, 0, size, bo);
309 if (ret)
310 return ret;
311 nvbo = nouveau_bo(*bo);
313 nvbo->sysmem = ptr;
314 nvbo->user = 1;
315 return 0;
319 nouveau_bo_fake(struct nouveau_device *dev, uint64_t offset, uint32_t flags,
320 uint32_t size, void *map, struct nouveau_bo **bo)
322 struct nouveau_bo_priv *nvbo;
323 int ret;
325 ret = nouveau_bo_new(dev, flags & ~NOUVEAU_BO_PIN, 0, size, bo);
326 if (ret)
327 return ret;
328 nvbo = nouveau_bo(*bo);
330 nvbo->flags = flags | NOUVEAU_BO_PIN;
331 nvbo->domain = (flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
332 nvbo->offset = offset;
333 nvbo->size = nvbo->base.size = size;
334 nvbo->map = map;
335 nvbo->base.flags = nvbo->flags;
336 nvbo->base.offset = nvbo->offset;
337 return 0;
341 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
343 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
344 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
345 int ret;
347 if (!bo || !handle)
348 return -EINVAL;
350 if (!nvbo->global_handle) {
351 struct drm_gem_flink req;
353 ret = nouveau_bo_kalloc(nvbo, NULL);
354 if (ret)
355 return ret;
357 if (nvdev->mm_enabled) {
358 req.handle = nvbo->handle;
359 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
360 if (ret) {
361 nouveau_bo_kfree(nvbo);
362 return ret;
365 nvbo->global_handle = req.name;
366 } else {
367 nvbo->global_handle = nvbo->offset;
371 *handle = nvbo->global_handle;
372 return 0;
376 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
377 struct nouveau_bo **bo)
379 struct nouveau_device_priv *nvdev = nouveau_device(dev);
380 struct nouveau_bo_priv *nvbo;
381 struct drm_gem_open req;
382 int ret;
384 ret = nouveau_bo_new(dev, 0, 0, 0, bo);
385 if (ret)
386 return ret;
387 nvbo = nouveau_bo(*bo);
389 if (!nvdev->mm_enabled) {
390 nvbo->handle = 0;
391 nvbo->offset = handle;
392 nvbo->domain = NOUVEAU_BO_VRAM;
393 nvbo->flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_PIN;
394 nvbo->base.offset = nvbo->offset;
395 nvbo->base.flags = nvbo->flags;
396 } else {
397 req.name = handle;
398 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
399 if (ret) {
400 nouveau_bo_ref(NULL, bo);
401 return ret;
404 nvbo->size = req.size;
405 nvbo->handle = req.handle;
408 nvbo->base.handle = nvbo->handle;
409 return 0;
412 static void
413 nouveau_bo_del_cb(void *priv)
415 struct nouveau_bo_priv *nvbo = priv;
417 nouveau_fence_ref(NULL, &nvbo->fence);
418 nouveau_fence_ref(NULL, &nvbo->wr_fence);
419 nouveau_bo_kfree(nvbo);
420 free(nvbo);
423 static void
424 nouveau_bo_del(struct nouveau_bo **bo)
426 struct nouveau_bo_priv *nvbo;
428 if (!bo || !*bo)
429 return;
430 nvbo = nouveau_bo(*bo);
431 *bo = NULL;
433 if (--nvbo->refcount)
434 return;
436 if (nvbo->pending) {
437 nvbo->pending = NULL;
438 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
441 nouveau_bo_ufree(nvbo);
443 if (!nouveau_device(nvbo->base.device)->mm_enabled && nvbo->fence) {
444 nouveau_fence_flush(nvbo->fence->channel);
445 if (nouveau_fence(nvbo->fence)->signalled) {
446 nouveau_bo_del_cb(nvbo);
447 } else {
448 nouveau_fence_signal_cb(nvbo->fence,
449 nouveau_bo_del_cb, nvbo);
451 } else {
452 nouveau_bo_del_cb(nvbo);
457 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
459 if (!pbo)
460 return -EINVAL;
462 if (ref)
463 nouveau_bo(ref)->refcount++;
465 if (*pbo)
466 nouveau_bo_del(pbo);
468 *pbo = ref;
469 return 0;
472 static int
473 nouveau_bo_wait_nomm(struct nouveau_bo *bo, int cpu_write)
475 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
476 int ret = 0;
478 if (cpu_write)
479 ret = nouveau_fence_wait(&nvbo->fence);
480 else
481 ret = nouveau_fence_wait(&nvbo->wr_fence);
482 if (ret)
483 return ret;
485 nvbo->write_marker = 0;
486 return 0;
489 static int
490 nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write)
492 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
493 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
494 struct drm_nouveau_gem_cpu_prep req;
495 int ret;
497 if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
498 return 0;
500 if (nvbo->pending &&
501 (nvbo->pending->write_domains || cpu_write)) {
502 nvbo->pending = NULL;
503 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
506 if (!nvdev->mm_enabled)
507 return nouveau_bo_wait_nomm(bo, cpu_write);
509 req.handle = nvbo->handle;
510 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
511 &req, sizeof(req));
512 if (ret)
513 return ret;
515 nvbo->write_marker = 0;
516 return 0;
520 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
522 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
523 int ret;
525 if (!nvbo || bo->map)
526 return -EINVAL;
528 if (!nouveau_bo_allocated(nvbo)) {
529 if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
530 ret = nouveau_bo_kalloc(nvbo, NULL);
531 if (ret)
532 return ret;
535 if (!nouveau_bo_allocated(nvbo)) {
536 ret = nouveau_bo_ualloc(nvbo);
537 if (ret)
538 return ret;
542 if (nvbo->sysmem) {
543 bo->map = nvbo->sysmem;
544 } else {
545 ret = nouveau_bo_kmap(nvbo);
546 if (ret)
547 return ret;
549 ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR));
550 if (ret)
551 return ret;
553 bo->map = nvbo->map;
556 return 0;
559 void
560 nouveau_bo_unmap(struct nouveau_bo *bo)
562 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
563 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
565 if (nvdev->mm_enabled && bo->map && !nvbo->sysmem) {
566 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
567 struct drm_nouveau_gem_cpu_fini req;
569 req.handle = nvbo->handle;
570 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
571 &req, sizeof(req));
574 bo->map = NULL;
578 nouveau_bo_validate_nomm(struct nouveau_bo_priv *nvbo, uint32_t flags)
580 struct nouveau_bo *new = NULL;
581 uint32_t t_handle, t_domain, t_offset, t_size;
582 void *t_map;
583 int ret;
585 if ((flags & NOUVEAU_BO_VRAM) && nvbo->domain == NOUVEAU_BO_VRAM)
586 return 0;
587 if ((flags & NOUVEAU_BO_GART) && nvbo->domain == NOUVEAU_BO_GART)
588 return 0;
589 assert(flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
591 /* Keep tiling info */
592 flags |= (nvbo->flags & (NOUVEAU_BO_TILED|NOUVEAU_BO_ZTILE));
594 ret = nouveau_bo_new(nvbo->base.device, flags, 0, nvbo->size, &new);
595 if (ret)
596 return ret;
598 ret = nouveau_bo_kalloc(nouveau_bo(new), NULL);
599 if (ret) {
600 nouveau_bo_ref(NULL, &new);
601 return ret;
604 if (nvbo->handle || nvbo->sysmem) {
605 nouveau_bo_kmap(nouveau_bo(new));
607 if (!nvbo->base.map) {
608 nouveau_bo_map(&nvbo->base, NOUVEAU_BO_RD);
609 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
610 nouveau_bo_unmap(&nvbo->base);
611 } else {
612 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
616 t_handle = nvbo->handle;
617 t_domain = nvbo->domain;
618 t_offset = nvbo->offset;
619 t_size = nvbo->size;
620 t_map = nvbo->map;
622 nvbo->handle = nouveau_bo(new)->handle;
623 nvbo->domain = nouveau_bo(new)->domain;
624 nvbo->offset = nouveau_bo(new)->offset;
625 nvbo->size = nouveau_bo(new)->size;
626 nvbo->map = nouveau_bo(new)->map;
628 nouveau_bo(new)->handle = t_handle;
629 nouveau_bo(new)->domain = t_domain;
630 nouveau_bo(new)->offset = t_offset;
631 nouveau_bo(new)->size = t_size;
632 nouveau_bo(new)->map = t_map;
634 nouveau_bo_ref(NULL, &new);
636 return 0;
639 static int
640 nouveau_bo_pin_nomm(struct nouveau_bo *bo, uint32_t flags)
642 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
643 int ret;
645 if (!nvbo->handle) {
646 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
647 return -EINVAL;
649 ret = nouveau_bo_validate_nomm(nvbo, flags & ~NOUVEAU_BO_PIN);
650 if (ret)
651 return ret;
654 nvbo->pinned = 1;
656 /* Fill in public nouveau_bo members */
657 bo->flags = nvbo->domain;
658 bo->offset = nvbo->offset;
660 return 0;
664 nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
666 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
667 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
668 struct drm_nouveau_gem_pin req;
669 int ret;
671 if (nvbo->pinned)
672 return 0;
674 if (!nvdev->mm_enabled)
675 return nouveau_bo_pin_nomm(bo, flags);
677 /* Ensure we have a kernel object... */
678 if (!nvbo->handle) {
679 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
680 return -EINVAL;
681 nvbo->flags = flags;
683 ret = nouveau_bo_kalloc(nvbo, NULL);
684 if (ret)
685 return ret;
688 /* Now force it to stay put :) */
689 req.handle = nvbo->handle;
690 req.domain = 0;
691 if (nvbo->flags & NOUVEAU_BO_VRAM)
692 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
693 if (nvbo->flags & NOUVEAU_BO_GART)
694 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
696 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
697 sizeof(struct drm_nouveau_gem_pin));
698 if (ret)
699 return ret;
700 nvbo->offset = req.offset;
701 nvbo->domain = req.domain;
702 nvbo->pinned = 1;
703 nvbo->flags |= NOUVEAU_BO_PIN;
705 /* Fill in public nouveau_bo members */
706 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
707 bo->flags = NOUVEAU_BO_VRAM;
708 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
709 bo->flags = NOUVEAU_BO_GART;
710 bo->offset = nvbo->offset;
712 return 0;
715 void
716 nouveau_bo_unpin(struct nouveau_bo *bo)
718 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
719 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
720 struct drm_nouveau_gem_unpin req;
722 if (!nvbo->pinned)
723 return;
725 if (nvdev->mm_enabled) {
726 req.handle = nvbo->handle;
727 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN,
728 &req, sizeof(req));
731 nvbo->pinned = bo->offset = bo->flags = 0;
735 nouveau_bo_tile(struct nouveau_bo *bo, uint32_t flags, uint32_t delta,
736 uint32_t size)
738 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
739 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
740 uint32_t kern_flags = 0;
741 int ret = 0;
743 if (flags & NOUVEAU_BO_TILED) {
744 kern_flags |= NOUVEAU_MEM_TILE;
745 if (flags & NOUVEAU_BO_ZTILE)
746 kern_flags |= NOUVEAU_MEM_TILE_ZETA;
749 if (nvdev->mm_enabled) {
750 struct drm_nouveau_gem_tile req;
752 req.handle = nvbo->handle;
753 req.delta = delta;
754 req.size = size;
755 req.flags = kern_flags;
756 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_TILE,
757 &req, sizeof(req));
758 } else {
759 struct drm_nouveau_mem_tile req;
761 req.offset = nvbo->offset;
762 req.delta = delta;
763 req.size = size;
764 req.flags = kern_flags;
766 if (flags & NOUVEAU_BO_VRAM)
767 req.flags |= NOUVEAU_MEM_FB;
768 if (flags & NOUVEAU_BO_GART)
769 req.flags |= NOUVEAU_MEM_AGP;
771 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_TILE,
772 &req, sizeof(req));
775 return 0;
779 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
781 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
782 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
784 if (!nvdev->mm_enabled) {
785 struct nouveau_fence *fence;
787 if (nvbo->pending && (nvbo->pending->write_domains ||
788 (access & NOUVEAU_BO_WR)))
789 return 1;
791 if (access & NOUVEAU_BO_WR)
792 fence = nvbo->fence;
793 else
794 fence = nvbo->wr_fence;
795 return !nouveau_fence(fence)->signalled;
798 return 1;
801 struct drm_nouveau_gem_pushbuf_bo *
802 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
804 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
805 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
806 struct drm_nouveau_gem_pushbuf_bo *pbbo;
807 struct nouveau_bo *ref = NULL;
808 int ret;
810 if (nvbo->pending)
811 return nvbo->pending;
813 if (!nvbo->handle) {
814 ret = nouveau_bo_kalloc(nvbo, chan);
815 if (ret)
816 return NULL;
818 if (nvbo->sysmem) {
819 void *sysmem_tmp = nvbo->sysmem;
821 nvbo->sysmem = NULL;
822 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
823 if (ret)
824 return NULL;
825 nvbo->sysmem = sysmem_tmp;
827 memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
828 nouveau_bo_unmap(bo);
829 nouveau_bo_ufree(nvbo);
833 if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
834 return NULL;
835 pbbo = nvpb->buffers + nvpb->nr_buffers++;
836 nvbo->pending = pbbo;
837 nvbo->pending_channel = chan;
839 nouveau_bo_ref(bo, &ref);
840 pbbo->user_priv = (uint64_t)(unsigned long)ref;
841 pbbo->handle = nvbo->handle;
842 pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
843 pbbo->read_domains = 0;
844 pbbo->write_domains = 0;
845 pbbo->presumed_domain = nvbo->domain;
846 pbbo->presumed_offset = nvbo->offset;
847 pbbo->presumed_ok = 1;
848 return pbbo;