rcutorture: Eliminate unused ts_rem local from rcu_trace_clock_local()
[linux/fpc-iii.git] / drivers / gpu / drm / radeon / radeon_gem.c
blob574bf7e6b1186fed318cf10f05ff9ed313d671e1
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj);
44 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
45 int alignment, int initial_domain,
46 u32 flags, bool kernel,
47 struct drm_gem_object **obj)
49 struct radeon_bo *robj;
50 unsigned long max_size;
51 int r;
53 *obj = NULL;
54 /* At least align on page size */
55 if (alignment < PAGE_SIZE) {
56 alignment = PAGE_SIZE;
59 /* Maximum bo size is the unpinned gtt size since we use the gtt to
60 * handle vram to system pool migrations.
62 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
63 if (size > max_size) {
64 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
65 size >> 20, max_size >> 20);
66 return -ENOMEM;
69 retry:
70 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
71 flags, NULL, NULL, &robj);
72 if (r) {
73 if (r != -ERESTARTSYS) {
74 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
75 initial_domain |= RADEON_GEM_DOMAIN_GTT;
76 goto retry;
78 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
79 size, initial_domain, alignment, r);
81 return r;
83 *obj = &robj->gem_base;
84 robj->pid = task_pid_nr(current);
86 mutex_lock(&rdev->gem.mutex);
87 list_add_tail(&robj->list, &rdev->gem.objects);
88 mutex_unlock(&rdev->gem.mutex);
90 return 0;
93 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
94 uint32_t rdomain, uint32_t wdomain)
96 struct radeon_bo *robj;
97 uint32_t domain;
98 long r;
100 /* FIXME: reeimplement */
101 robj = gem_to_radeon_bo(gobj);
102 /* work out where to validate the buffer to */
103 domain = wdomain;
104 if (!domain) {
105 domain = rdomain;
107 if (!domain) {
108 /* Do nothings */
109 pr_warn("Set domain without domain !\n");
110 return 0;
112 if (domain == RADEON_GEM_DOMAIN_CPU) {
113 /* Asking for cpu access wait for object idle */
114 r = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
115 if (!r)
116 r = -EBUSY;
118 if (r < 0 && r != -EINTR) {
119 pr_err("Failed to wait for object: %li\n", r);
120 return r;
123 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
124 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
125 return -EINVAL;
127 return 0;
130 int radeon_gem_init(struct radeon_device *rdev)
132 INIT_LIST_HEAD(&rdev->gem.objects);
133 return 0;
136 void radeon_gem_fini(struct radeon_device *rdev)
138 radeon_bo_force_delete(rdev);
142 * Call from drm_gem_handle_create which appear in both new and open ioctl
143 * case.
145 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
147 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
148 struct radeon_device *rdev = rbo->rdev;
149 struct radeon_fpriv *fpriv = file_priv->driver_priv;
150 struct radeon_vm *vm = &fpriv->vm;
151 struct radeon_bo_va *bo_va;
152 int r;
154 if ((rdev->family < CHIP_CAYMAN) ||
155 (!rdev->accel_working)) {
156 return 0;
159 r = radeon_bo_reserve(rbo, false);
160 if (r) {
161 return r;
164 bo_va = radeon_vm_bo_find(vm, rbo);
165 if (!bo_va) {
166 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
167 } else {
168 ++bo_va->ref_count;
170 radeon_bo_unreserve(rbo);
172 return 0;
175 void radeon_gem_object_close(struct drm_gem_object *obj,
176 struct drm_file *file_priv)
178 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
179 struct radeon_device *rdev = rbo->rdev;
180 struct radeon_fpriv *fpriv = file_priv->driver_priv;
181 struct radeon_vm *vm = &fpriv->vm;
182 struct radeon_bo_va *bo_va;
183 int r;
185 if ((rdev->family < CHIP_CAYMAN) ||
186 (!rdev->accel_working)) {
187 return;
190 r = radeon_bo_reserve(rbo, true);
191 if (r) {
192 dev_err(rdev->dev, "leaking bo va because "
193 "we fail to reserve bo (%d)\n", r);
194 return;
196 bo_va = radeon_vm_bo_find(vm, rbo);
197 if (bo_va) {
198 if (--bo_va->ref_count == 0) {
199 radeon_vm_bo_rmv(rdev, bo_va);
202 radeon_bo_unreserve(rbo);
205 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
207 if (r == -EDEADLK) {
208 r = radeon_gpu_reset(rdev);
209 if (!r)
210 r = -EAGAIN;
212 return r;
216 * GEM ioctls.
218 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
219 struct drm_file *filp)
221 struct radeon_device *rdev = dev->dev_private;
222 struct drm_radeon_gem_info *args = data;
223 struct ttm_mem_type_manager *man;
225 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
227 args->vram_size = (u64)man->size << PAGE_SHIFT;
228 args->vram_visible = rdev->mc.visible_vram_size;
229 args->vram_visible -= rdev->vram_pin_size;
230 args->gart_size = rdev->mc.gtt_size;
231 args->gart_size -= rdev->gart_pin_size;
233 return 0;
236 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
237 struct drm_file *filp)
239 /* TODO: implement */
240 DRM_ERROR("unimplemented %s\n", __func__);
241 return -ENOSYS;
244 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
245 struct drm_file *filp)
247 /* TODO: implement */
248 DRM_ERROR("unimplemented %s\n", __func__);
249 return -ENOSYS;
252 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *filp)
255 struct radeon_device *rdev = dev->dev_private;
256 struct drm_radeon_gem_create *args = data;
257 struct drm_gem_object *gobj;
258 uint32_t handle;
259 int r;
261 down_read(&rdev->exclusive_lock);
262 /* create a gem object to contain this object in */
263 args->size = roundup(args->size, PAGE_SIZE);
264 r = radeon_gem_object_create(rdev, args->size, args->alignment,
265 args->initial_domain, args->flags,
266 false, &gobj);
267 if (r) {
268 up_read(&rdev->exclusive_lock);
269 r = radeon_gem_handle_lockup(rdev, r);
270 return r;
272 r = drm_gem_handle_create(filp, gobj, &handle);
273 /* drop reference from allocate - handle holds it now */
274 drm_gem_object_unreference_unlocked(gobj);
275 if (r) {
276 up_read(&rdev->exclusive_lock);
277 r = radeon_gem_handle_lockup(rdev, r);
278 return r;
280 args->handle = handle;
281 up_read(&rdev->exclusive_lock);
282 return 0;
285 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
286 struct drm_file *filp)
288 struct radeon_device *rdev = dev->dev_private;
289 struct drm_radeon_gem_userptr *args = data;
290 struct drm_gem_object *gobj;
291 struct radeon_bo *bo;
292 uint32_t handle;
293 int r;
295 if (offset_in_page(args->addr | args->size))
296 return -EINVAL;
298 /* reject unknown flag values */
299 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
300 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
301 RADEON_GEM_USERPTR_REGISTER))
302 return -EINVAL;
304 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
305 /* readonly pages not tested on older hardware */
306 if (rdev->family < CHIP_R600)
307 return -EINVAL;
309 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
310 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
312 /* if we want to write to it we must require anonymous
313 memory and install a MMU notifier */
314 return -EACCES;
317 down_read(&rdev->exclusive_lock);
319 /* create a gem object to contain this object in */
320 r = radeon_gem_object_create(rdev, args->size, 0,
321 RADEON_GEM_DOMAIN_CPU, 0,
322 false, &gobj);
323 if (r)
324 goto handle_lockup;
326 bo = gem_to_radeon_bo(gobj);
327 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags);
328 if (r)
329 goto release_object;
331 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
332 r = radeon_mn_register(bo, args->addr);
333 if (r)
334 goto release_object;
337 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
338 down_read(&current->mm->mmap_sem);
339 r = radeon_bo_reserve(bo, true);
340 if (r) {
341 up_read(&current->mm->mmap_sem);
342 goto release_object;
345 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
346 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
347 radeon_bo_unreserve(bo);
348 up_read(&current->mm->mmap_sem);
349 if (r)
350 goto release_object;
353 r = drm_gem_handle_create(filp, gobj, &handle);
354 /* drop reference from allocate - handle holds it now */
355 drm_gem_object_unreference_unlocked(gobj);
356 if (r)
357 goto handle_lockup;
359 args->handle = handle;
360 up_read(&rdev->exclusive_lock);
361 return 0;
363 release_object:
364 drm_gem_object_unreference_unlocked(gobj);
366 handle_lockup:
367 up_read(&rdev->exclusive_lock);
368 r = radeon_gem_handle_lockup(rdev, r);
370 return r;
373 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
374 struct drm_file *filp)
376 /* transition the BO to a domain -
377 * just validate the BO into a certain domain */
378 struct radeon_device *rdev = dev->dev_private;
379 struct drm_radeon_gem_set_domain *args = data;
380 struct drm_gem_object *gobj;
381 struct radeon_bo *robj;
382 int r;
384 /* for now if someone requests domain CPU -
385 * just make sure the buffer is finished with */
386 down_read(&rdev->exclusive_lock);
388 /* just do a BO wait for now */
389 gobj = drm_gem_object_lookup(filp, args->handle);
390 if (gobj == NULL) {
391 up_read(&rdev->exclusive_lock);
392 return -ENOENT;
394 robj = gem_to_radeon_bo(gobj);
396 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
398 drm_gem_object_unreference_unlocked(gobj);
399 up_read(&rdev->exclusive_lock);
400 r = radeon_gem_handle_lockup(robj->rdev, r);
401 return r;
404 int radeon_mode_dumb_mmap(struct drm_file *filp,
405 struct drm_device *dev,
406 uint32_t handle, uint64_t *offset_p)
408 struct drm_gem_object *gobj;
409 struct radeon_bo *robj;
411 gobj = drm_gem_object_lookup(filp, handle);
412 if (gobj == NULL) {
413 return -ENOENT;
415 robj = gem_to_radeon_bo(gobj);
416 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
417 drm_gem_object_unreference_unlocked(gobj);
418 return -EPERM;
420 *offset_p = radeon_bo_mmap_offset(robj);
421 drm_gem_object_unreference_unlocked(gobj);
422 return 0;
425 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
426 struct drm_file *filp)
428 struct drm_radeon_gem_mmap *args = data;
430 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
433 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
434 struct drm_file *filp)
436 struct drm_radeon_gem_busy *args = data;
437 struct drm_gem_object *gobj;
438 struct radeon_bo *robj;
439 int r;
440 uint32_t cur_placement = 0;
442 gobj = drm_gem_object_lookup(filp, args->handle);
443 if (gobj == NULL) {
444 return -ENOENT;
446 robj = gem_to_radeon_bo(gobj);
448 r = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
449 if (r == 0)
450 r = -EBUSY;
451 else
452 r = 0;
454 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
455 args->domain = radeon_mem_type_to_domain(cur_placement);
456 drm_gem_object_unreference_unlocked(gobj);
457 return r;
460 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
461 struct drm_file *filp)
463 struct radeon_device *rdev = dev->dev_private;
464 struct drm_radeon_gem_wait_idle *args = data;
465 struct drm_gem_object *gobj;
466 struct radeon_bo *robj;
467 int r = 0;
468 uint32_t cur_placement = 0;
469 long ret;
471 gobj = drm_gem_object_lookup(filp, args->handle);
472 if (gobj == NULL) {
473 return -ENOENT;
475 robj = gem_to_radeon_bo(gobj);
477 ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, 30 * HZ);
478 if (ret == 0)
479 r = -EBUSY;
480 else if (ret < 0)
481 r = ret;
483 /* Flush HDP cache via MMIO if necessary */
484 cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type);
485 if (rdev->asic->mmio_hdp_flush &&
486 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
487 robj->rdev->asic->mmio_hdp_flush(rdev);
488 drm_gem_object_unreference_unlocked(gobj);
489 r = radeon_gem_handle_lockup(rdev, r);
490 return r;
493 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
494 struct drm_file *filp)
496 struct drm_radeon_gem_set_tiling *args = data;
497 struct drm_gem_object *gobj;
498 struct radeon_bo *robj;
499 int r = 0;
501 DRM_DEBUG("%d \n", args->handle);
502 gobj = drm_gem_object_lookup(filp, args->handle);
503 if (gobj == NULL)
504 return -ENOENT;
505 robj = gem_to_radeon_bo(gobj);
506 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
507 drm_gem_object_unreference_unlocked(gobj);
508 return r;
511 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
512 struct drm_file *filp)
514 struct drm_radeon_gem_get_tiling *args = data;
515 struct drm_gem_object *gobj;
516 struct radeon_bo *rbo;
517 int r = 0;
519 DRM_DEBUG("\n");
520 gobj = drm_gem_object_lookup(filp, args->handle);
521 if (gobj == NULL)
522 return -ENOENT;
523 rbo = gem_to_radeon_bo(gobj);
524 r = radeon_bo_reserve(rbo, false);
525 if (unlikely(r != 0))
526 goto out;
527 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
528 radeon_bo_unreserve(rbo);
529 out:
530 drm_gem_object_unreference_unlocked(gobj);
531 return r;
535 * radeon_gem_va_update_vm -update the bo_va in its VM
537 * @rdev: radeon_device pointer
538 * @bo_va: bo_va to update
540 * Update the bo_va directly after setting it's address. Errors are not
541 * vital here, so they are not reported back to userspace.
543 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
544 struct radeon_bo_va *bo_va)
546 struct ttm_validate_buffer tv, *entry;
547 struct radeon_bo_list *vm_bos;
548 struct ww_acquire_ctx ticket;
549 struct list_head list;
550 unsigned domain;
551 int r;
553 INIT_LIST_HEAD(&list);
555 tv.bo = &bo_va->bo->tbo;
556 tv.shared = true;
557 list_add(&tv.head, &list);
559 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
560 if (!vm_bos)
561 return;
563 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
564 if (r)
565 goto error_free;
567 list_for_each_entry(entry, &list, head) {
568 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
569 /* if anything is swapped out don't swap it in here,
570 just abort and wait for the next CS */
571 if (domain == RADEON_GEM_DOMAIN_CPU)
572 goto error_unreserve;
575 mutex_lock(&bo_va->vm->mutex);
576 r = radeon_vm_clear_freed(rdev, bo_va->vm);
577 if (r)
578 goto error_unlock;
580 if (bo_va->it.start)
581 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
583 error_unlock:
584 mutex_unlock(&bo_va->vm->mutex);
586 error_unreserve:
587 ttm_eu_backoff_reservation(&ticket, &list);
589 error_free:
590 kvfree(vm_bos);
592 if (r && r != -ERESTARTSYS)
593 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
596 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
597 struct drm_file *filp)
599 struct drm_radeon_gem_va *args = data;
600 struct drm_gem_object *gobj;
601 struct radeon_device *rdev = dev->dev_private;
602 struct radeon_fpriv *fpriv = filp->driver_priv;
603 struct radeon_bo *rbo;
604 struct radeon_bo_va *bo_va;
605 u32 invalid_flags;
606 int r = 0;
608 if (!rdev->vm_manager.enabled) {
609 args->operation = RADEON_VA_RESULT_ERROR;
610 return -ENOTTY;
613 /* !! DONT REMOVE !!
614 * We don't support vm_id yet, to be sure we don't have have broken
615 * userspace, reject anyone trying to use non 0 value thus moving
616 * forward we can use those fields without breaking existant userspace
618 if (args->vm_id) {
619 args->operation = RADEON_VA_RESULT_ERROR;
620 return -EINVAL;
623 if (args->offset < RADEON_VA_RESERVED_SIZE) {
624 dev_err(&dev->pdev->dev,
625 "offset 0x%lX is in reserved area 0x%X\n",
626 (unsigned long)args->offset,
627 RADEON_VA_RESERVED_SIZE);
628 args->operation = RADEON_VA_RESULT_ERROR;
629 return -EINVAL;
632 /* don't remove, we need to enforce userspace to set the snooped flag
633 * otherwise we will endup with broken userspace and we won't be able
634 * to enable this feature without adding new interface
636 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
637 if ((args->flags & invalid_flags)) {
638 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
639 args->flags, invalid_flags);
640 args->operation = RADEON_VA_RESULT_ERROR;
641 return -EINVAL;
644 switch (args->operation) {
645 case RADEON_VA_MAP:
646 case RADEON_VA_UNMAP:
647 break;
648 default:
649 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
650 args->operation);
651 args->operation = RADEON_VA_RESULT_ERROR;
652 return -EINVAL;
655 gobj = drm_gem_object_lookup(filp, args->handle);
656 if (gobj == NULL) {
657 args->operation = RADEON_VA_RESULT_ERROR;
658 return -ENOENT;
660 rbo = gem_to_radeon_bo(gobj);
661 r = radeon_bo_reserve(rbo, false);
662 if (r) {
663 args->operation = RADEON_VA_RESULT_ERROR;
664 drm_gem_object_unreference_unlocked(gobj);
665 return r;
667 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
668 if (!bo_va) {
669 args->operation = RADEON_VA_RESULT_ERROR;
670 radeon_bo_unreserve(rbo);
671 drm_gem_object_unreference_unlocked(gobj);
672 return -ENOENT;
675 switch (args->operation) {
676 case RADEON_VA_MAP:
677 if (bo_va->it.start) {
678 args->operation = RADEON_VA_RESULT_VA_EXIST;
679 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
680 radeon_bo_unreserve(rbo);
681 goto out;
683 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
684 break;
685 case RADEON_VA_UNMAP:
686 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
687 break;
688 default:
689 break;
691 if (!r)
692 radeon_gem_va_update_vm(rdev, bo_va);
693 args->operation = RADEON_VA_RESULT_OK;
694 if (r) {
695 args->operation = RADEON_VA_RESULT_ERROR;
697 out:
698 drm_gem_object_unreference_unlocked(gobj);
699 return r;
702 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
703 struct drm_file *filp)
705 struct drm_radeon_gem_op *args = data;
706 struct drm_gem_object *gobj;
707 struct radeon_bo *robj;
708 int r;
710 gobj = drm_gem_object_lookup(filp, args->handle);
711 if (gobj == NULL) {
712 return -ENOENT;
714 robj = gem_to_radeon_bo(gobj);
716 r = -EPERM;
717 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm))
718 goto out;
720 r = radeon_bo_reserve(robj, false);
721 if (unlikely(r))
722 goto out;
724 switch (args->op) {
725 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
726 args->value = robj->initial_domain;
727 break;
728 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
729 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
730 RADEON_GEM_DOMAIN_GTT |
731 RADEON_GEM_DOMAIN_CPU);
732 break;
733 default:
734 r = -EINVAL;
737 radeon_bo_unreserve(robj);
738 out:
739 drm_gem_object_unreference_unlocked(gobj);
740 return r;
743 int radeon_mode_dumb_create(struct drm_file *file_priv,
744 struct drm_device *dev,
745 struct drm_mode_create_dumb *args)
747 struct radeon_device *rdev = dev->dev_private;
748 struct drm_gem_object *gobj;
749 uint32_t handle;
750 int r;
752 args->pitch = radeon_align_pitch(rdev, args->width,
753 DIV_ROUND_UP(args->bpp, 8), 0);
754 args->size = args->pitch * args->height;
755 args->size = ALIGN(args->size, PAGE_SIZE);
757 r = radeon_gem_object_create(rdev, args->size, 0,
758 RADEON_GEM_DOMAIN_VRAM, 0,
759 false, &gobj);
760 if (r)
761 return -ENOMEM;
763 r = drm_gem_handle_create(file_priv, gobj, &handle);
764 /* drop reference from allocate - handle holds it now */
765 drm_gem_object_unreference_unlocked(gobj);
766 if (r) {
767 return r;
769 args->handle = handle;
770 return 0;
773 #if defined(CONFIG_DEBUG_FS)
774 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
776 struct drm_info_node *node = (struct drm_info_node *)m->private;
777 struct drm_device *dev = node->minor->dev;
778 struct radeon_device *rdev = dev->dev_private;
779 struct radeon_bo *rbo;
780 unsigned i = 0;
782 mutex_lock(&rdev->gem.mutex);
783 list_for_each_entry(rbo, &rdev->gem.objects, list) {
784 unsigned domain;
785 const char *placement;
787 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
788 switch (domain) {
789 case RADEON_GEM_DOMAIN_VRAM:
790 placement = "VRAM";
791 break;
792 case RADEON_GEM_DOMAIN_GTT:
793 placement = " GTT";
794 break;
795 case RADEON_GEM_DOMAIN_CPU:
796 default:
797 placement = " CPU";
798 break;
800 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
801 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
802 placement, (unsigned long)rbo->pid);
803 i++;
805 mutex_unlock(&rdev->gem.mutex);
806 return 0;
809 static struct drm_info_list radeon_debugfs_gem_list[] = {
810 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
812 #endif
814 int radeon_gem_debugfs_init(struct radeon_device *rdev)
816 #if defined(CONFIG_DEBUG_FS)
817 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);
818 #endif
819 return 0;