x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / nouveau / nouveau_gem.c
blobd2dfdf7663c22c1947197f3d07ac74960cd9e114
1 /*
2 * Copyright (C) 2008 Ben Skeggs.
3 * All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial
15 * portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <subdev/fb.h>
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_fence.h"
32 #include "nouveau_abi16.h"
34 #include "nouveau_ttm.h"
35 #include "nouveau_gem.h"
37 int
38 nouveau_gem_object_new(struct drm_gem_object *gem)
40 return 0;
43 void
44 nouveau_gem_object_del(struct drm_gem_object *gem)
46 struct nouveau_bo *nvbo = gem->driver_private;
47 struct ttm_buffer_object *bo = &nvbo->bo;
49 if (!nvbo)
50 return;
51 nvbo->gem = NULL;
53 if (gem->import_attach)
54 drm_prime_gem_destroy(gem, nvbo->bo.sg);
56 ttm_bo_unref(&bo);
58 drm_gem_object_release(gem);
59 kfree(gem);
62 int
63 nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
65 struct nouveau_cli *cli = nouveau_cli(file_priv);
66 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
67 struct nouveau_vma *vma;
68 int ret;
70 if (!cli->base.vm)
71 return 0;
73 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
74 if (ret)
75 return ret;
77 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
78 if (!vma) {
79 vma = kzalloc(sizeof(*vma), GFP_KERNEL);
80 if (!vma) {
81 ret = -ENOMEM;
82 goto out;
85 ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma);
86 if (ret) {
87 kfree(vma);
88 goto out;
90 } else {
91 vma->refcount++;
94 out:
95 ttm_bo_unreserve(&nvbo->bo);
96 return ret;
99 static void
100 nouveau_gem_object_delete(void *data)
102 struct nouveau_vma *vma = data;
103 nouveau_vm_unmap(vma);
104 nouveau_vm_put(vma);
105 kfree(vma);
108 static void
109 nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
111 const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM;
112 struct nouveau_fence *fence = NULL;
114 list_del(&vma->head);
116 if (mapped) {
117 spin_lock(&nvbo->bo.bdev->fence_lock);
118 if (nvbo->bo.sync_obj)
119 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
120 spin_unlock(&nvbo->bo.bdev->fence_lock);
123 if (fence) {
124 nouveau_fence_work(fence, nouveau_gem_object_delete, vma);
125 } else {
126 if (mapped)
127 nouveau_vm_unmap(vma);
128 nouveau_vm_put(vma);
129 kfree(vma);
131 nouveau_fence_unref(&fence);
134 void
135 nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
137 struct nouveau_cli *cli = nouveau_cli(file_priv);
138 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
139 struct nouveau_vma *vma;
140 int ret;
142 if (!cli->base.vm)
143 return;
145 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
146 if (ret)
147 return;
149 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
150 if (vma) {
151 if (--vma->refcount == 0)
152 nouveau_gem_object_unmap(nvbo, vma);
154 ttm_bo_unreserve(&nvbo->bo);
158 nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
159 uint32_t tile_mode, uint32_t tile_flags,
160 struct nouveau_bo **pnvbo)
162 struct nouveau_drm *drm = nouveau_drm(dev);
163 struct nouveau_bo *nvbo;
164 u32 flags = 0;
165 int ret;
167 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
168 flags |= TTM_PL_FLAG_VRAM;
169 if (domain & NOUVEAU_GEM_DOMAIN_GART)
170 flags |= TTM_PL_FLAG_TT;
171 if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
172 flags |= TTM_PL_FLAG_SYSTEM;
174 ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
175 tile_flags, NULL, pnvbo);
176 if (ret)
177 return ret;
178 nvbo = *pnvbo;
180 /* we restrict allowed domains on nv50+ to only the types
181 * that were requested at creation time. not possibly on
182 * earlier chips without busting the ABI.
184 nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
185 NOUVEAU_GEM_DOMAIN_GART;
186 if (nv_device(drm->device)->card_type >= NV_50)
187 nvbo->valid_domains &= domain;
189 nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
190 if (!nvbo->gem) {
191 nouveau_bo_ref(NULL, pnvbo);
192 return -ENOMEM;
195 nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
196 nvbo->gem->driver_private = nvbo;
197 return 0;
200 static int
201 nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
202 struct drm_nouveau_gem_info *rep)
204 struct nouveau_cli *cli = nouveau_cli(file_priv);
205 struct nouveau_bo *nvbo = nouveau_gem_object(gem);
206 struct nouveau_vma *vma;
208 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
209 rep->domain = NOUVEAU_GEM_DOMAIN_GART;
210 else
211 rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
213 rep->offset = nvbo->bo.offset;
214 if (cli->base.vm) {
215 vma = nouveau_bo_vma_find(nvbo, cli->base.vm);
216 if (!vma)
217 return -EINVAL;
219 rep->offset = vma->offset;
222 rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
223 rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
224 rep->tile_mode = nvbo->tile_mode;
225 rep->tile_flags = nvbo->tile_flags;
226 return 0;
230 nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
231 struct drm_file *file_priv)
233 struct nouveau_drm *drm = nouveau_drm(dev);
234 struct nouveau_cli *cli = nouveau_cli(file_priv);
235 struct nouveau_fb *pfb = nouveau_fb(drm->device);
236 struct drm_nouveau_gem_new *req = data;
237 struct nouveau_bo *nvbo = NULL;
238 int ret = 0;
240 drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;
242 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
243 NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags);
244 return -EINVAL;
247 ret = nouveau_gem_new(dev, req->info.size, req->align,
248 req->info.domain, req->info.tile_mode,
249 req->info.tile_flags, &nvbo);
250 if (ret)
251 return ret;
253 ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
254 if (ret == 0) {
255 ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
256 if (ret)
257 drm_gem_handle_delete(file_priv, req->info.handle);
260 /* drop reference from allocate - handle holds it now */
261 drm_gem_object_unreference_unlocked(nvbo->gem);
262 return ret;
265 static int
266 nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
267 uint32_t write_domains, uint32_t valid_domains)
269 struct nouveau_bo *nvbo = gem->driver_private;
270 struct ttm_buffer_object *bo = &nvbo->bo;
271 uint32_t domains = valid_domains & nvbo->valid_domains &
272 (write_domains ? write_domains : read_domains);
273 uint32_t pref_flags = 0, valid_flags = 0;
275 if (!domains)
276 return -EINVAL;
278 if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
279 valid_flags |= TTM_PL_FLAG_VRAM;
281 if (valid_domains & NOUVEAU_GEM_DOMAIN_GART)
282 valid_flags |= TTM_PL_FLAG_TT;
284 if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
285 bo->mem.mem_type == TTM_PL_VRAM)
286 pref_flags |= TTM_PL_FLAG_VRAM;
288 else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
289 bo->mem.mem_type == TTM_PL_TT)
290 pref_flags |= TTM_PL_FLAG_TT;
292 else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
293 pref_flags |= TTM_PL_FLAG_VRAM;
295 else
296 pref_flags |= TTM_PL_FLAG_TT;
298 nouveau_bo_placement_set(nvbo, pref_flags, valid_flags);
300 return 0;
303 struct validate_op {
304 struct list_head vram_list;
305 struct list_head gart_list;
306 struct list_head both_list;
307 struct ww_acquire_ctx ticket;
310 static void
311 validate_fini_list(struct list_head *list, struct nouveau_fence *fence,
312 struct ww_acquire_ctx *ticket)
314 struct list_head *entry, *tmp;
315 struct nouveau_bo *nvbo;
317 list_for_each_safe(entry, tmp, list) {
318 nvbo = list_entry(entry, struct nouveau_bo, entry);
320 if (likely(fence))
321 nouveau_bo_fence(nvbo, fence);
323 if (unlikely(nvbo->validate_mapped)) {
324 ttm_bo_kunmap(&nvbo->kmap);
325 nvbo->validate_mapped = false;
328 list_del(&nvbo->entry);
329 nvbo->reserved_by = NULL;
330 ttm_bo_unreserve_ticket(&nvbo->bo, ticket);
331 drm_gem_object_unreference_unlocked(nvbo->gem);
335 static void
336 validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence)
338 validate_fini_list(&op->vram_list, fence, &op->ticket);
339 validate_fini_list(&op->gart_list, fence, &op->ticket);
340 validate_fini_list(&op->both_list, fence, &op->ticket);
343 static void
344 validate_fini(struct validate_op *op, struct nouveau_fence *fence)
346 validate_fini_no_ticket(op, fence);
347 ww_acquire_fini(&op->ticket);
350 static int
351 validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
352 struct drm_nouveau_gem_pushbuf_bo *pbbo,
353 int nr_buffers, struct validate_op *op)
355 struct nouveau_cli *cli = nouveau_cli(file_priv);
356 struct drm_device *dev = chan->drm->dev;
357 int trycnt = 0;
358 int ret, i;
359 struct nouveau_bo *res_bo = NULL;
361 ww_acquire_init(&op->ticket, &reservation_ww_class);
362 retry:
363 if (++trycnt > 100000) {
364 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
365 return -EINVAL;
368 for (i = 0; i < nr_buffers; i++) {
369 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[i];
370 struct drm_gem_object *gem;
371 struct nouveau_bo *nvbo;
373 gem = drm_gem_object_lookup(dev, file_priv, b->handle);
374 if (!gem) {
375 NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle);
376 ww_acquire_done(&op->ticket);
377 validate_fini(op, NULL);
378 return -ENOENT;
380 nvbo = gem->driver_private;
381 if (nvbo == res_bo) {
382 res_bo = NULL;
383 drm_gem_object_unreference_unlocked(gem);
384 continue;
387 if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
388 NV_ERROR(cli, "multiple instances of buffer %d on "
389 "validation list\n", b->handle);
390 drm_gem_object_unreference_unlocked(gem);
391 ww_acquire_done(&op->ticket);
392 validate_fini(op, NULL);
393 return -EINVAL;
396 ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket);
397 if (ret) {
398 validate_fini_no_ticket(op, NULL);
399 if (unlikely(ret == -EDEADLK)) {
400 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
401 &op->ticket);
402 if (!ret)
403 res_bo = nvbo;
405 if (unlikely(ret)) {
406 ww_acquire_done(&op->ticket);
407 ww_acquire_fini(&op->ticket);
408 drm_gem_object_unreference_unlocked(gem);
409 if (ret != -ERESTARTSYS)
410 NV_ERROR(cli, "fail reserve\n");
411 return ret;
415 b->user_priv = (uint64_t)(unsigned long)nvbo;
416 nvbo->reserved_by = file_priv;
417 nvbo->pbbo_index = i;
418 if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
419 (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
420 list_add_tail(&nvbo->entry, &op->both_list);
421 else
422 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM)
423 list_add_tail(&nvbo->entry, &op->vram_list);
424 else
425 if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART)
426 list_add_tail(&nvbo->entry, &op->gart_list);
427 else {
428 NV_ERROR(cli, "invalid valid domains: 0x%08x\n",
429 b->valid_domains);
430 list_add_tail(&nvbo->entry, &op->both_list);
431 ww_acquire_done(&op->ticket);
432 validate_fini(op, NULL);
433 return -EINVAL;
435 if (nvbo == res_bo)
436 goto retry;
439 ww_acquire_done(&op->ticket);
440 return 0;
443 static int
444 validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
446 struct nouveau_fence *fence = NULL;
447 int ret = 0;
449 spin_lock(&nvbo->bo.bdev->fence_lock);
450 if (nvbo->bo.sync_obj)
451 fence = nouveau_fence_ref(nvbo->bo.sync_obj);
452 spin_unlock(&nvbo->bo.bdev->fence_lock);
454 if (fence) {
455 ret = nouveau_fence_sync(fence, chan);
456 nouveau_fence_unref(&fence);
459 return ret;
462 static int
463 validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
464 struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo,
465 uint64_t user_pbbo_ptr)
467 struct nouveau_drm *drm = chan->drm;
468 struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
469 (void __force __user *)(uintptr_t)user_pbbo_ptr;
470 struct nouveau_bo *nvbo;
471 int ret, relocs = 0;
473 list_for_each_entry(nvbo, list, entry) {
474 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
476 ret = validate_sync(chan, nvbo);
477 if (unlikely(ret)) {
478 NV_ERROR(cli, "fail pre-validate sync\n");
479 return ret;
482 ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
483 b->write_domains,
484 b->valid_domains);
485 if (unlikely(ret)) {
486 NV_ERROR(cli, "fail set_domain\n");
487 return ret;
490 ret = nouveau_bo_validate(nvbo, true, false);
491 if (unlikely(ret)) {
492 if (ret != -ERESTARTSYS)
493 NV_ERROR(cli, "fail ttm_validate\n");
494 return ret;
497 ret = validate_sync(chan, nvbo);
498 if (unlikely(ret)) {
499 NV_ERROR(cli, "fail post-validate sync\n");
500 return ret;
503 if (nv_device(drm->device)->card_type < NV_50) {
504 if (nvbo->bo.offset == b->presumed.offset &&
505 ((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
506 b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
507 (nvbo->bo.mem.mem_type == TTM_PL_TT &&
508 b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
509 continue;
511 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
512 b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
513 else
514 b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
515 b->presumed.offset = nvbo->bo.offset;
516 b->presumed.valid = 0;
517 relocs++;
519 if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
520 &b->presumed, sizeof(b->presumed)))
521 return -EFAULT;
525 return relocs;
528 static int
529 nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
530 struct drm_file *file_priv,
531 struct drm_nouveau_gem_pushbuf_bo *pbbo,
532 uint64_t user_buffers, int nr_buffers,
533 struct validate_op *op, int *apply_relocs)
535 struct nouveau_cli *cli = nouveau_cli(file_priv);
536 int ret, relocs = 0;
538 INIT_LIST_HEAD(&op->vram_list);
539 INIT_LIST_HEAD(&op->gart_list);
540 INIT_LIST_HEAD(&op->both_list);
542 if (nr_buffers == 0)
543 return 0;
545 ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
546 if (unlikely(ret)) {
547 if (ret != -ERESTARTSYS)
548 NV_ERROR(cli, "validate_init\n");
549 return ret;
552 ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers);
553 if (unlikely(ret < 0)) {
554 if (ret != -ERESTARTSYS)
555 NV_ERROR(cli, "validate vram_list\n");
556 validate_fini(op, NULL);
557 return ret;
559 relocs += ret;
561 ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers);
562 if (unlikely(ret < 0)) {
563 if (ret != -ERESTARTSYS)
564 NV_ERROR(cli, "validate gart_list\n");
565 validate_fini(op, NULL);
566 return ret;
568 relocs += ret;
570 ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers);
571 if (unlikely(ret < 0)) {
572 if (ret != -ERESTARTSYS)
573 NV_ERROR(cli, "validate both_list\n");
574 validate_fini(op, NULL);
575 return ret;
577 relocs += ret;
579 *apply_relocs = relocs;
580 return 0;
583 static inline void
584 u_free(void *addr)
586 if (!is_vmalloc_addr(addr))
587 kfree(addr);
588 else
589 vfree(addr);
592 static inline void *
593 u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
595 void *mem;
596 void __user *userptr = (void __force __user *)(uintptr_t)user;
598 size *= nmemb;
600 mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
601 if (!mem)
602 mem = vmalloc(size);
603 if (!mem)
604 return ERR_PTR(-ENOMEM);
606 if (DRM_COPY_FROM_USER(mem, userptr, size)) {
607 u_free(mem);
608 return ERR_PTR(-EFAULT);
611 return mem;
614 static int
615 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
616 struct drm_nouveau_gem_pushbuf *req,
617 struct drm_nouveau_gem_pushbuf_bo *bo)
619 struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
620 int ret = 0;
621 unsigned i;
623 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
624 if (IS_ERR(reloc))
625 return PTR_ERR(reloc);
627 for (i = 0; i < req->nr_relocs; i++) {
628 struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
629 struct drm_nouveau_gem_pushbuf_bo *b;
630 struct nouveau_bo *nvbo;
631 uint32_t data;
633 if (unlikely(r->bo_index > req->nr_buffers)) {
634 NV_ERROR(cli, "reloc bo index invalid\n");
635 ret = -EINVAL;
636 break;
639 b = &bo[r->bo_index];
640 if (b->presumed.valid)
641 continue;
643 if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
644 NV_ERROR(cli, "reloc container bo index invalid\n");
645 ret = -EINVAL;
646 break;
648 nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
650 if (unlikely(r->reloc_bo_offset + 4 >
651 nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
652 NV_ERROR(cli, "reloc outside of bo\n");
653 ret = -EINVAL;
654 break;
657 if (!nvbo->kmap.virtual) {
658 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
659 &nvbo->kmap);
660 if (ret) {
661 NV_ERROR(cli, "failed kmap for reloc\n");
662 break;
664 nvbo->validate_mapped = true;
667 if (r->flags & NOUVEAU_GEM_RELOC_LOW)
668 data = b->presumed.offset + r->data;
669 else
670 if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
671 data = (b->presumed.offset + r->data) >> 32;
672 else
673 data = r->data;
675 if (r->flags & NOUVEAU_GEM_RELOC_OR) {
676 if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
677 data |= r->tor;
678 else
679 data |= r->vor;
682 spin_lock(&nvbo->bo.bdev->fence_lock);
683 ret = ttm_bo_wait(&nvbo->bo, false, false, false);
684 spin_unlock(&nvbo->bo.bdev->fence_lock);
685 if (ret) {
686 NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret);
687 break;
690 nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
693 u_free(reloc);
694 return ret;
698 nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
699 struct drm_file *file_priv)
701 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
702 struct nouveau_cli *cli = nouveau_cli(file_priv);
703 struct nouveau_abi16_chan *temp;
704 struct nouveau_drm *drm = nouveau_drm(dev);
705 struct drm_nouveau_gem_pushbuf *req = data;
706 struct drm_nouveau_gem_pushbuf_push *push;
707 struct drm_nouveau_gem_pushbuf_bo *bo;
708 struct nouveau_channel *chan = NULL;
709 struct validate_op op;
710 struct nouveau_fence *fence = NULL;
711 int i, j, ret = 0, do_reloc = 0;
713 if (unlikely(!abi16))
714 return -ENOMEM;
716 list_for_each_entry(temp, &abi16->channels, head) {
717 if (temp->chan->handle == (NVDRM_CHAN | req->channel)) {
718 chan = temp->chan;
719 break;
723 if (!chan)
724 return nouveau_abi16_put(abi16, -ENOENT);
726 req->vram_available = drm->gem.vram_available;
727 req->gart_available = drm->gem.gart_available;
728 if (unlikely(req->nr_push == 0))
729 goto out_next;
731 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
732 NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n",
733 req->nr_push, NOUVEAU_GEM_MAX_PUSH);
734 return nouveau_abi16_put(abi16, -EINVAL);
737 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
738 NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n",
739 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
740 return nouveau_abi16_put(abi16, -EINVAL);
743 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
744 NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n",
745 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
746 return nouveau_abi16_put(abi16, -EINVAL);
749 push = u_memcpya(req->push, req->nr_push, sizeof(*push));
750 if (IS_ERR(push))
751 return nouveau_abi16_put(abi16, PTR_ERR(push));
753 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
754 if (IS_ERR(bo)) {
755 u_free(push);
756 return nouveau_abi16_put(abi16, PTR_ERR(bo));
759 /* Ensure all push buffers are on validate list */
760 for (i = 0; i < req->nr_push; i++) {
761 if (push[i].bo_index >= req->nr_buffers) {
762 NV_ERROR(cli, "push %d buffer not in list\n", i);
763 ret = -EINVAL;
764 goto out_prevalid;
768 /* Validate buffer list */
769 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
770 req->nr_buffers, &op, &do_reloc);
771 if (ret) {
772 if (ret != -ERESTARTSYS)
773 NV_ERROR(cli, "validate: %d\n", ret);
774 goto out_prevalid;
777 /* Apply any relocations that are required */
778 if (do_reloc) {
779 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo);
780 if (ret) {
781 NV_ERROR(cli, "reloc apply: %d\n", ret);
782 goto out;
786 if (chan->dma.ib_max) {
787 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16);
788 if (ret) {
789 NV_ERROR(cli, "nv50cal_space: %d\n", ret);
790 goto out;
793 for (i = 0; i < req->nr_push; i++) {
794 struct nouveau_bo *nvbo = (void *)(unsigned long)
795 bo[push[i].bo_index].user_priv;
797 nv50_dma_push(chan, nvbo, push[i].offset,
798 push[i].length);
800 } else
801 if (nv_device(drm->device)->chipset >= 0x25) {
802 ret = RING_SPACE(chan, req->nr_push * 2);
803 if (ret) {
804 NV_ERROR(cli, "cal_space: %d\n", ret);
805 goto out;
808 for (i = 0; i < req->nr_push; i++) {
809 struct nouveau_bo *nvbo = (void *)(unsigned long)
810 bo[push[i].bo_index].user_priv;
812 OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2);
813 OUT_RING(chan, 0);
815 } else {
816 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
817 if (ret) {
818 NV_ERROR(cli, "jmp_space: %d\n", ret);
819 goto out;
822 for (i = 0; i < req->nr_push; i++) {
823 struct nouveau_bo *nvbo = (void *)(unsigned long)
824 bo[push[i].bo_index].user_priv;
825 uint32_t cmd;
827 cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2);
828 cmd |= 0x20000000;
829 if (unlikely(cmd != req->suffix0)) {
830 if (!nvbo->kmap.virtual) {
831 ret = ttm_bo_kmap(&nvbo->bo, 0,
832 nvbo->bo.mem.
833 num_pages,
834 &nvbo->kmap);
835 if (ret) {
836 WIND_RING(chan);
837 goto out;
839 nvbo->validate_mapped = true;
842 nouveau_bo_wr32(nvbo, (push[i].offset +
843 push[i].length - 8) / 4, cmd);
846 OUT_RING(chan, 0x20000000 |
847 (nvbo->bo.offset + push[i].offset));
848 OUT_RING(chan, 0);
849 for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
850 OUT_RING(chan, 0);
854 ret = nouveau_fence_new(chan, false, &fence);
855 if (ret) {
856 NV_ERROR(cli, "error fencing pushbuf: %d\n", ret);
857 WIND_RING(chan);
858 goto out;
861 out:
862 validate_fini(&op, fence);
863 nouveau_fence_unref(&fence);
865 out_prevalid:
866 u_free(bo);
867 u_free(push);
869 out_next:
870 if (chan->dma.ib_max) {
871 req->suffix0 = 0x00000000;
872 req->suffix1 = 0x00000000;
873 } else
874 if (nv_device(drm->device)->chipset >= 0x25) {
875 req->suffix0 = 0x00020000;
876 req->suffix1 = 0x00000000;
877 } else {
878 req->suffix0 = 0x20000000 |
879 (chan->push.vma.offset + ((chan->dma.cur + 2) << 2));
880 req->suffix1 = 0x00000000;
883 return nouveau_abi16_put(abi16, ret);
886 static inline uint32_t
887 domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
889 uint32_t flags = 0;
891 if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
892 flags |= TTM_PL_FLAG_VRAM;
893 if (domain & NOUVEAU_GEM_DOMAIN_GART)
894 flags |= TTM_PL_FLAG_TT;
896 return flags;
900 nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
901 struct drm_file *file_priv)
903 struct drm_nouveau_gem_cpu_prep *req = data;
904 struct drm_gem_object *gem;
905 struct nouveau_bo *nvbo;
906 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
907 int ret = -EINVAL;
909 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
910 if (!gem)
911 return -ENOENT;
912 nvbo = nouveau_gem_object(gem);
914 spin_lock(&nvbo->bo.bdev->fence_lock);
915 ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait);
916 spin_unlock(&nvbo->bo.bdev->fence_lock);
917 drm_gem_object_unreference_unlocked(gem);
918 return ret;
922 nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
923 struct drm_file *file_priv)
925 return 0;
929 nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
930 struct drm_file *file_priv)
932 struct drm_nouveau_gem_info *req = data;
933 struct drm_gem_object *gem;
934 int ret;
936 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
937 if (!gem)
938 return -ENOENT;
940 ret = nouveau_gem_info(file_priv, gem, req);
941 drm_gem_object_unreference_unlocked(gem);
942 return ret;