2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <xf86atomic.h>
37 #include "libdrm_macros.h"
38 #include "libdrm_lists.h"
39 #include "nouveau_drm.h"
44 #include "nvif/class.h"
45 #include "nvif/cl0080.h"
46 #include "nvif/ioctl.h"
47 #include "nvif/unpack.h"
49 drm_private
FILE *nouveau_out
= NULL
;
50 drm_private
uint32_t nouveau_debug
= 0;
55 static bool once
= false;
62 debug
= getenv("NOUVEAU_LIBDRM_DEBUG");
64 int n
= strtol(debug
, NULL
, 0);
71 out
= getenv("NOUVEAU_LIBDRM_OUT");
73 FILE *fout
= fopen(out
, "w");
80 nouveau_object_ioctl(struct nouveau_object
*obj
, void *data
, uint32_t size
)
82 struct nouveau_drm
*drm
= nouveau_drm(obj
);
84 struct nvif_ioctl_v0 v0
;
89 if (!(ret
= nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, true))) {
91 if (obj
!= &drm
->client
)
92 args
->v0
.object
= (unsigned long)(void *)obj
;
95 args
->v0
.owner
= NVIF_IOCTL_V0_OWNER_ANY
;
96 args
->v0
.route
= 0x00;
98 args
->v0
.route
= 0xff;
99 args
->v0
.token
= obj
->handle
;
104 return drmCommandWriteRead(drm
->fd
, DRM_NOUVEAU_NVIF
, args
, argc
);
108 nouveau_object_mthd(struct nouveau_object
*obj
,
109 uint32_t mthd
, void *data
, uint32_t size
)
111 struct nouveau_drm
*drm
= nouveau_drm(obj
);
113 struct nvif_ioctl_v0 ioctl
;
114 struct nvif_ioctl_mthd_v0 mthd
;
116 uint32_t argc
= sizeof(*args
) + size
;
123 if (argc
> sizeof(stack
)) {
124 if (!(args
= malloc(argc
)))
127 args
= (void *)stack
;
129 args
->ioctl
.version
= 0;
130 args
->ioctl
.type
= NVIF_IOCTL_V0_MTHD
;
131 args
->mthd
.version
= 0;
132 args
->mthd
.method
= mthd
;
134 memcpy(args
->mthd
.data
, data
, size
);
135 ret
= nouveau_object_ioctl(obj
, args
, argc
);
136 memcpy(data
, args
->mthd
.data
, size
);
137 if (args
!= (void *)stack
)
143 nouveau_object_sclass_put(struct nouveau_sclass
**psclass
)
150 nouveau_object_sclass_get(struct nouveau_object
*obj
,
151 struct nouveau_sclass
**psclass
)
153 struct nouveau_drm
*drm
= nouveau_drm(obj
);
155 struct nvif_ioctl_v0 ioctl
;
156 struct nvif_ioctl_sclass_v0 sclass
;
158 struct nouveau_sclass
*sclass
;
163 return abi16_sclass(obj
, psclass
);
166 size
= sizeof(*args
) + cnt
* sizeof(args
->sclass
.oclass
[0]);
167 if (!(args
= malloc(size
)))
169 args
->ioctl
.version
= 0;
170 args
->ioctl
.type
= NVIF_IOCTL_V0_SCLASS
;
171 args
->sclass
.version
= 0;
172 args
->sclass
.count
= cnt
;
174 ret
= nouveau_object_ioctl(obj
, args
, size
);
175 if (ret
== 0 && args
->sclass
.count
<= cnt
)
177 cnt
= args
->sclass
.count
;
183 if ((sclass
= calloc(args
->sclass
.count
, sizeof(*sclass
)))) {
184 for (i
= 0; i
< args
->sclass
.count
; i
++) {
185 sclass
[i
].oclass
= args
->sclass
.oclass
[i
].oclass
;
186 sclass
[i
].minver
= args
->sclass
.oclass
[i
].minver
;
187 sclass
[i
].maxver
= args
->sclass
.oclass
[i
].maxver
;
190 ret
= args
->sclass
.count
;
200 nouveau_object_mclass(struct nouveau_object
*obj
,
201 const struct nouveau_mclass
*mclass
)
203 struct nouveau_sclass
*sclass
;
207 cnt
= nouveau_object_sclass_get(obj
, &sclass
);
211 for (i
= 0; ret
< 0 && mclass
[i
].oclass
; i
++) {
212 for (j
= 0; j
< cnt
; j
++) {
213 if (mclass
[i
].oclass
== sclass
[j
].oclass
&&
214 mclass
[i
].version
>= sclass
[j
].minver
&&
215 mclass
[i
].version
<= sclass
[j
].maxver
) {
222 nouveau_object_sclass_put(&sclass
);
227 nouveau_object_fini(struct nouveau_object
*obj
)
230 struct nvif_ioctl_v0 ioctl
;
231 struct nvif_ioctl_del del
;
233 .ioctl
.type
= NVIF_IOCTL_V0_DEL
,
243 nouveau_object_ioctl(obj
, &args
, sizeof(args
));
247 nouveau_object_init(struct nouveau_object
*parent
, uint32_t handle
,
248 int32_t oclass
, void *data
, uint32_t size
,
249 struct nouveau_object
*obj
)
251 struct nouveau_drm
*drm
= nouveau_drm(parent
);
253 struct nvif_ioctl_v0 ioctl
;
254 struct nvif_ioctl_new_v0
new;
256 uint32_t argc
= sizeof(*args
) + size
;
257 int (*func
)(struct nouveau_object
*);
260 obj
->parent
= parent
;
261 obj
->handle
= handle
;
262 obj
->oclass
= oclass
;
266 if (!abi16_object(obj
, &func
) && drm
->nvif
) {
267 if (!(args
= malloc(argc
)))
269 args
->ioctl
.version
= 0;
270 args
->ioctl
.type
= NVIF_IOCTL_V0_NEW
;
271 args
->new.version
= 0;
272 args
->new.route
= NVIF_IOCTL_V0_ROUTE_NVIF
;
273 args
->new.token
= (unsigned long)(void *)obj
;
274 args
->new.object
= (unsigned long)(void *)obj
;
275 args
->new.handle
= handle
;
276 args
->new.oclass
= oclass
;
277 memcpy(args
->new.data
, data
, size
);
278 ret
= nouveau_object_ioctl(parent
, args
, argc
);
279 memcpy(data
, args
->new.data
, size
);
283 obj
->length
= size
? size
: sizeof(struct nouveau_object
*);
284 if (!(obj
->data
= malloc(obj
->length
)))
287 memcpy(obj
->data
, data
, obj
->length
);
288 *(struct nouveau_object
**)obj
->data
= obj
;
294 nouveau_object_fini(obj
);
302 nouveau_object_new(struct nouveau_object
*parent
, uint64_t handle
,
303 uint32_t oclass
, void *data
, uint32_t length
,
304 struct nouveau_object
**pobj
)
306 struct nouveau_object
*obj
;
309 if (!(obj
= malloc(sizeof(*obj
))))
312 ret
= nouveau_object_init(parent
, handle
, oclass
, data
, length
, obj
);
323 nouveau_object_del(struct nouveau_object
**pobj
)
325 struct nouveau_object
*obj
= *pobj
;
327 nouveau_object_fini(obj
);
334 nouveau_drm_del(struct nouveau_drm
**pdrm
)
341 nouveau_drm_new(int fd
, struct nouveau_drm
**pdrm
)
343 struct nouveau_drm
*drm
;
348 if (!(drm
= calloc(1, sizeof(*drm
))))
352 if (!(ver
= drmGetVersion(fd
))) {
353 nouveau_drm_del(&drm
);
358 drm
->version
= (ver
->version_major
<< 24) |
359 (ver
->version_minor
<< 8) |
360 ver
->version_patchlevel
;
361 drm
->nvif
= (drm
->version
>= 0x01000301);
366 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
367 * is kept here to prevent AIGLX from crashing if the DDX is linked against
368 * the new libdrm, but the DRI driver against the old
371 nouveau_device_open_existing(struct nouveau_device
**pdev
, int close
, int fd
,
378 nouveau_device_new(struct nouveau_object
*parent
, int32_t oclass
,
379 void *data
, uint32_t size
, struct nouveau_device
**pdev
)
381 struct nv_device_info_v0 info
= {};
383 struct nv_device_v0 v0
;
385 uint32_t argc
= size
;
386 struct nouveau_drm
*drm
= nouveau_drm(parent
);
387 struct nouveau_device_priv
*nvdev
;
388 struct nouveau_device
*dev
;
393 if (oclass
!= NV_DEVICE
||
394 nvif_unpack(ret
, &data
, &size
, args
->v0
, 0, 0, false))
397 if (!(nvdev
= calloc(1, sizeof(*nvdev
))))
399 dev
= *pdev
= &nvdev
->base
;
403 ret
= nouveau_object_init(parent
, 0, oclass
, args
, argc
,
410 ret
= nouveau_object_mthd(&dev
->object
, NV_DEVICE_V0_INFO
,
411 &info
, sizeof(info
));
415 nvdev
->base
.chipset
= info
.chipset
;
416 nvdev
->have_bo_usage
= true;
418 if (args
->v0
.device
== ~0ULL) {
419 nvdev
->base
.object
.parent
= &drm
->client
;
420 nvdev
->base
.object
.handle
= ~0ULL;
421 nvdev
->base
.object
.oclass
= NOUVEAU_DEVICE_CLASS
;
422 nvdev
->base
.object
.length
= ~0;
424 ret
= nouveau_getparam(dev
, NOUVEAU_GETPARAM_CHIPSET_ID
, &v
);
427 nvdev
->base
.chipset
= v
;
429 ret
= nouveau_getparam(dev
, NOUVEAU_GETPARAM_HAS_BO_USAGE
, &v
);
431 nvdev
->have_bo_usage
= (v
!= 0);
435 ret
= nouveau_getparam(dev
, NOUVEAU_GETPARAM_FB_SIZE
, &v
);
438 nvdev
->base
.vram_size
= v
;
440 ret
= nouveau_getparam(dev
, NOUVEAU_GETPARAM_AGP_SIZE
, &v
);
443 nvdev
->base
.gart_size
= v
;
445 tmp
= getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
447 nvdev
->vram_limit_percent
= atoi(tmp
);
449 nvdev
->vram_limit_percent
= 80;
451 nvdev
->base
.vram_limit
=
452 (nvdev
->base
.vram_size
* nvdev
->vram_limit_percent
) / 100;
454 tmp
= getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
456 nvdev
->gart_limit_percent
= atoi(tmp
);
458 nvdev
->gart_limit_percent
= 80;
460 nvdev
->base
.gart_limit
=
461 (nvdev
->base
.gart_size
* nvdev
->gart_limit_percent
) / 100;
463 ret
= pthread_mutex_init(&nvdev
->lock
, NULL
);
464 DRMINITLISTHEAD(&nvdev
->bo_list
);
467 nouveau_device_del(pdev
);
472 nouveau_device_wrap(int fd
, int close
, struct nouveau_device
**pdev
)
474 struct nouveau_drm
*drm
;
475 struct nouveau_device_priv
*nvdev
;
478 ret
= nouveau_drm_new(fd
, &drm
);
483 ret
= nouveau_device_new(&drm
->client
, NV_DEVICE
,
484 &(struct nv_device_v0
) {
486 }, sizeof(struct nv_device_v0
), pdev
);
488 nouveau_drm_del(&drm
);
492 nvdev
= nouveau_device(*pdev
);
493 nvdev
->base
.fd
= drm
->fd
;
494 nvdev
->base
.drm_version
= drm
->version
;
495 nvdev
->close
= close
;
500 nouveau_device_open(const char *busid
, struct nouveau_device
**pdev
)
502 int ret
= -ENODEV
, fd
= drmOpen("nouveau", busid
);
504 ret
= nouveau_device_wrap(fd
, 1, pdev
);
512 nouveau_device_del(struct nouveau_device
**pdev
)
514 struct nouveau_device_priv
*nvdev
= nouveau_device(*pdev
);
517 pthread_mutex_destroy(&nvdev
->lock
);
518 if (nvdev
->base
.fd
>= 0) {
519 struct nouveau_drm
*drm
=
520 nouveau_drm(&nvdev
->base
.object
);
521 nouveau_drm_del(&drm
);
523 drmClose(nvdev
->base
.fd
);
531 nouveau_getparam(struct nouveau_device
*dev
, uint64_t param
, uint64_t *value
)
533 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
534 struct drm_nouveau_getparam r
= { .param
= param
};
535 int fd
= drm
->fd
, ret
=
536 drmCommandWriteRead(fd
, DRM_NOUVEAU_GETPARAM
, &r
, sizeof(r
));
542 nouveau_setparam(struct nouveau_device
*dev
, uint64_t param
, uint64_t value
)
544 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
545 struct drm_nouveau_setparam r
= { .param
= param
, .value
= value
};
546 return drmCommandWrite(drm
->fd
, DRM_NOUVEAU_SETPARAM
, &r
, sizeof(r
));
550 nouveau_client_new(struct nouveau_device
*dev
, struct nouveau_client
**pclient
)
552 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
553 struct nouveau_client_priv
*pcli
;
554 int id
= 0, i
, ret
= -ENOMEM
;
557 pthread_mutex_lock(&nvdev
->lock
);
559 for (i
= 0; i
< nvdev
->nr_client
; i
++) {
560 id
= ffs(nvdev
->client
[i
]) - 1;
565 clients
= realloc(nvdev
->client
, sizeof(uint32_t) * (i
+ 1));
568 nvdev
->client
= clients
;
569 nvdev
->client
[i
] = 0;
573 pcli
= calloc(1, sizeof(*pcli
));
575 nvdev
->client
[i
] |= (1 << id
);
576 pcli
->base
.device
= dev
;
577 pcli
->base
.id
= (i
* 32) + id
;
581 *pclient
= &pcli
->base
;
584 pthread_mutex_unlock(&nvdev
->lock
);
589 nouveau_client_del(struct nouveau_client
**pclient
)
591 struct nouveau_client_priv
*pcli
= nouveau_client(*pclient
);
592 struct nouveau_device_priv
*nvdev
;
594 int id
= pcli
->base
.id
;
595 nvdev
= nouveau_device(pcli
->base
.device
);
596 pthread_mutex_lock(&nvdev
->lock
);
597 nvdev
->client
[id
/ 32] &= ~(1 << (id
% 32));
598 pthread_mutex_unlock(&nvdev
->lock
);
605 nouveau_bo_del(struct nouveau_bo
*bo
)
607 struct nouveau_drm
*drm
= nouveau_drm(&bo
->device
->object
);
608 struct nouveau_device_priv
*nvdev
= nouveau_device(bo
->device
);
609 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
610 struct drm_gem_close req
= { .handle
= bo
->handle
};
612 if (nvbo
->head
.next
) {
613 pthread_mutex_lock(&nvdev
->lock
);
614 if (atomic_read(&nvbo
->refcnt
) == 0) {
615 DRMLISTDEL(&nvbo
->head
);
617 * This bo has to be closed with the lock held because
618 * gem handles are not refcounted. If a shared bo is
619 * closed and re-opened in another thread a race
620 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
621 * might cause the bo to be closed accidentally while
624 drmIoctl(drm
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
626 pthread_mutex_unlock(&nvdev
->lock
);
628 drmIoctl(drm
->fd
, DRM_IOCTL_GEM_CLOSE
, &req
);
631 drm_munmap(bo
->map
, bo
->size
);
636 nouveau_bo_new(struct nouveau_device
*dev
, uint32_t flags
, uint32_t align
,
637 uint64_t size
, union nouveau_bo_config
*config
,
638 struct nouveau_bo
**pbo
)
640 struct nouveau_bo_priv
*nvbo
= calloc(1, sizeof(*nvbo
));
641 struct nouveau_bo
*bo
= &nvbo
->base
;
646 atomic_set(&nvbo
->refcnt
, 1);
651 ret
= abi16_bo_init(bo
, align
, config
);
662 nouveau_bo_wrap_locked(struct nouveau_device
*dev
, uint32_t handle
,
663 struct nouveau_bo
**pbo
, int name
)
665 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
666 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
667 struct drm_nouveau_gem_info req
= { .handle
= handle
};
668 struct nouveau_bo_priv
*nvbo
;
671 DRMLISTFOREACHENTRY(nvbo
, &nvdev
->bo_list
, head
) {
672 if (nvbo
->base
.handle
== handle
) {
673 if (atomic_inc_return(&nvbo
->refcnt
) == 1) {
675 * Uh oh, this bo is dead and someone else
676 * will free it, but because refcnt is
677 * now non-zero fortunately they won't
678 * call the ioctl to close the bo.
680 * Remove this bo from the list so other
681 * calls to nouveau_bo_wrap_locked will
682 * see our replacement nvbo.
684 DRMLISTDEL(&nvbo
->head
);
695 ret
= drmCommandWriteRead(drm
->fd
, DRM_NOUVEAU_GEM_INFO
,
700 nvbo
= calloc(1, sizeof(*nvbo
));
702 atomic_set(&nvbo
->refcnt
, 1);
703 nvbo
->base
.device
= dev
;
704 abi16_bo_info(&nvbo
->base
, &req
);
706 DRMLISTADD(&nvbo
->head
, &nvdev
->bo_list
);
715 nouveau_bo_make_global(struct nouveau_bo_priv
*nvbo
)
717 if (!nvbo
->head
.next
) {
718 struct nouveau_device_priv
*nvdev
= nouveau_device(nvbo
->base
.device
);
719 pthread_mutex_lock(&nvdev
->lock
);
720 if (!nvbo
->head
.next
)
721 DRMLISTADD(&nvbo
->head
, &nvdev
->bo_list
);
722 pthread_mutex_unlock(&nvdev
->lock
);
727 nouveau_bo_wrap(struct nouveau_device
*dev
, uint32_t handle
,
728 struct nouveau_bo
**pbo
)
730 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
732 pthread_mutex_lock(&nvdev
->lock
);
733 ret
= nouveau_bo_wrap_locked(dev
, handle
, pbo
, 0);
734 pthread_mutex_unlock(&nvdev
->lock
);
739 nouveau_bo_name_ref(struct nouveau_device
*dev
, uint32_t name
,
740 struct nouveau_bo
**pbo
)
742 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
743 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
744 struct nouveau_bo_priv
*nvbo
;
745 struct drm_gem_open req
= { .name
= name
};
748 pthread_mutex_lock(&nvdev
->lock
);
749 DRMLISTFOREACHENTRY(nvbo
, &nvdev
->bo_list
, head
) {
750 if (nvbo
->name
== name
) {
751 ret
= nouveau_bo_wrap_locked(dev
, nvbo
->base
.handle
,
753 pthread_mutex_unlock(&nvdev
->lock
);
758 ret
= drmIoctl(drm
->fd
, DRM_IOCTL_GEM_OPEN
, &req
);
760 ret
= nouveau_bo_wrap_locked(dev
, req
.handle
, pbo
, name
);
763 pthread_mutex_unlock(&nvdev
->lock
);
768 nouveau_bo_name_get(struct nouveau_bo
*bo
, uint32_t *name
)
770 struct drm_gem_flink req
= { .handle
= bo
->handle
};
771 struct nouveau_drm
*drm
= nouveau_drm(&bo
->device
->object
);
772 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
776 int ret
= drmIoctl(drm
->fd
, DRM_IOCTL_GEM_FLINK
, &req
);
782 nvbo
->name
= *name
= req
.name
;
784 nouveau_bo_make_global(nvbo
);
790 nouveau_bo_ref(struct nouveau_bo
*bo
, struct nouveau_bo
**pref
)
792 struct nouveau_bo
*ref
= *pref
;
794 atomic_inc(&nouveau_bo(bo
)->refcnt
);
797 if (atomic_dec_and_test(&nouveau_bo(ref
)->refcnt
))
804 nouveau_bo_prime_handle_ref(struct nouveau_device
*dev
, int prime_fd
,
805 struct nouveau_bo
**bo
)
807 struct nouveau_drm
*drm
= nouveau_drm(&dev
->object
);
808 struct nouveau_device_priv
*nvdev
= nouveau_device(dev
);
812 nouveau_bo_ref(NULL
, bo
);
814 pthread_mutex_lock(&nvdev
->lock
);
815 ret
= drmPrimeFDToHandle(drm
->fd
, prime_fd
, &handle
);
817 ret
= nouveau_bo_wrap_locked(dev
, handle
, bo
, 0);
819 pthread_mutex_unlock(&nvdev
->lock
);
824 nouveau_bo_set_prime(struct nouveau_bo
*bo
, int *prime_fd
)
826 struct nouveau_drm
*drm
= nouveau_drm(&bo
->device
->object
);
827 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
830 ret
= drmPrimeHandleToFD(drm
->fd
, nvbo
->base
.handle
, DRM_CLOEXEC
, prime_fd
);
834 nouveau_bo_make_global(nvbo
);
839 nouveau_bo_wait(struct nouveau_bo
*bo
, uint32_t access
,
840 struct nouveau_client
*client
)
842 struct nouveau_drm
*drm
= nouveau_drm(&bo
->device
->object
);
843 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
844 struct drm_nouveau_gem_cpu_prep req
;
845 struct nouveau_pushbuf
*push
;
848 if (!(access
& NOUVEAU_BO_RDWR
))
851 push
= cli_push_get(client
, bo
);
852 if (push
&& push
->channel
)
853 nouveau_pushbuf_kick(push
, push
->channel
);
855 if (!nvbo
->head
.next
&& !(nvbo
->access
& NOUVEAU_BO_WR
) &&
856 !(access
& NOUVEAU_BO_WR
))
859 req
.handle
= bo
->handle
;
861 if (access
& NOUVEAU_BO_WR
)
862 req
.flags
|= NOUVEAU_GEM_CPU_PREP_WRITE
;
863 if (access
& NOUVEAU_BO_NOBLOCK
)
864 req
.flags
|= NOUVEAU_GEM_CPU_PREP_NOWAIT
;
866 ret
= drmCommandWrite(drm
->fd
, DRM_NOUVEAU_GEM_CPU_PREP
,
874 nouveau_bo_map(struct nouveau_bo
*bo
, uint32_t access
,
875 struct nouveau_client
*client
)
877 struct nouveau_drm
*drm
= nouveau_drm(&bo
->device
->object
);
878 struct nouveau_bo_priv
*nvbo
= nouveau_bo(bo
);
879 if (bo
->map
== NULL
) {
880 bo
->map
= drm_mmap(0, bo
->size
, PROT_READ
| PROT_WRITE
,
881 MAP_SHARED
, drm
->fd
, nvbo
->map_handle
);
882 if (bo
->map
== MAP_FAILED
) {
887 return nouveau_bo_wait(bo
, access
, client
);