2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
32 int radeon_gem_object_init(struct drm_gem_object
*obj
)
39 void radeon_gem_object_free(struct drm_gem_object
*gobj
)
41 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
44 if (robj
->gem_base
.import_attach
)
45 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
46 radeon_bo_unref(&robj
);
50 int radeon_gem_object_create(struct radeon_device
*rdev
, int size
,
51 int alignment
, int initial_domain
,
52 bool discardable
, bool kernel
,
53 struct drm_gem_object
**obj
)
55 struct radeon_bo
*robj
;
56 unsigned long max_size
;
60 /* At least align on page size */
61 if (alignment
< PAGE_SIZE
) {
62 alignment
= PAGE_SIZE
;
65 /* maximun bo size is the minimun btw visible vram and gtt size */
66 max_size
= min(rdev
->mc
.visible_vram_size
, rdev
->mc
.gtt_size
);
67 if (size
> max_size
) {
68 printk(KERN_WARNING
"%s:%d alloc size %dMb bigger than %ldMb limit\n",
69 __func__
, __LINE__
, size
>> 20, max_size
>> 20);
74 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
, NULL
, &robj
);
76 if (r
!= -ERESTARTSYS
) {
77 if (initial_domain
== RADEON_GEM_DOMAIN_VRAM
) {
78 initial_domain
|= RADEON_GEM_DOMAIN_GTT
;
81 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
82 size
, initial_domain
, alignment
, r
);
86 *obj
= &robj
->gem_base
;
87 robj
->pid
= task_pid_nr(current
);
89 mutex_lock(&rdev
->gem
.mutex
);
90 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
91 mutex_unlock(&rdev
->gem
.mutex
);
96 int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
97 uint32_t rdomain
, uint32_t wdomain
)
99 struct radeon_bo
*robj
;
103 /* FIXME: reeimplement */
104 robj
= gem_to_radeon_bo(gobj
);
105 /* work out where to validate the buffer to */
112 printk(KERN_WARNING
"Set domain without domain !\n");
115 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
116 /* Asking for cpu access wait for object idle */
117 r
= radeon_bo_wait(robj
, NULL
, false);
119 printk(KERN_ERR
"Failed to wait for object !\n");
126 int radeon_gem_init(struct radeon_device
*rdev
)
128 INIT_LIST_HEAD(&rdev
->gem
.objects
);
132 void radeon_gem_fini(struct radeon_device
*rdev
)
134 radeon_bo_force_delete(rdev
);
138 * Call from drm_gem_handle_create which appear in both new and open ioctl
141 int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
143 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
144 struct radeon_device
*rdev
= rbo
->rdev
;
145 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
146 struct radeon_vm
*vm
= &fpriv
->vm
;
147 struct radeon_bo_va
*bo_va
;
150 if (rdev
->family
< CHIP_CAYMAN
) {
154 r
= radeon_bo_reserve(rbo
, false);
159 bo_va
= radeon_vm_bo_find(vm
, rbo
);
161 bo_va
= radeon_vm_bo_add(rdev
, vm
, rbo
);
165 radeon_bo_unreserve(rbo
);
170 void radeon_gem_object_close(struct drm_gem_object
*obj
,
171 struct drm_file
*file_priv
)
173 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
174 struct radeon_device
*rdev
= rbo
->rdev
;
175 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
176 struct radeon_vm
*vm
= &fpriv
->vm
;
177 struct radeon_bo_va
*bo_va
;
180 if (rdev
->family
< CHIP_CAYMAN
) {
184 r
= radeon_bo_reserve(rbo
, true);
186 dev_err(rdev
->dev
, "leaking bo va because "
187 "we fail to reserve bo (%d)\n", r
);
190 bo_va
= radeon_vm_bo_find(vm
, rbo
);
192 if (--bo_va
->ref_count
== 0) {
193 radeon_vm_bo_rmv(rdev
, bo_va
);
196 radeon_bo_unreserve(rbo
);
199 static int radeon_gem_handle_lockup(struct radeon_device
*rdev
, int r
)
202 r
= radeon_gpu_reset(rdev
);
212 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
213 struct drm_file
*filp
)
215 struct radeon_device
*rdev
= dev
->dev_private
;
216 struct drm_radeon_gem_info
*args
= data
;
217 struct ttm_mem_type_manager
*man
;
220 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
222 args
->vram_size
= rdev
->mc
.real_vram_size
;
223 args
->vram_visible
= (u64
)man
->size
<< PAGE_SHIFT
;
224 if (rdev
->stollen_vga_memory
)
225 args
->vram_visible
-= radeon_bo_size(rdev
->stollen_vga_memory
);
226 args
->vram_visible
-= radeon_fbdev_total_size(rdev
);
227 args
->gart_size
= rdev
->mc
.gtt_size
- 4096 - RADEON_IB_POOL_SIZE
*64*1024;
228 for(i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
229 args
->gart_size
-= rdev
->ring
[i
].ring_size
;
233 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
234 struct drm_file
*filp
)
236 /* TODO: implement */
237 DRM_ERROR("unimplemented %s\n", __func__
);
241 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
242 struct drm_file
*filp
)
244 /* TODO: implement */
245 DRM_ERROR("unimplemented %s\n", __func__
);
249 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
250 struct drm_file
*filp
)
252 struct radeon_device
*rdev
= dev
->dev_private
;
253 struct drm_radeon_gem_create
*args
= data
;
254 struct drm_gem_object
*gobj
;
258 down_read(&rdev
->exclusive_lock
);
259 /* create a gem object to contain this object in */
260 args
->size
= roundup(args
->size
, PAGE_SIZE
);
261 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
262 args
->initial_domain
, false,
265 up_read(&rdev
->exclusive_lock
);
266 r
= radeon_gem_handle_lockup(rdev
, r
);
269 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
270 /* drop reference from allocate - handle holds it now */
271 drm_gem_object_unreference_unlocked(gobj
);
273 up_read(&rdev
->exclusive_lock
);
274 r
= radeon_gem_handle_lockup(rdev
, r
);
277 args
->handle
= handle
;
278 up_read(&rdev
->exclusive_lock
);
282 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
283 struct drm_file
*filp
)
285 /* transition the BO to a domain -
286 * just validate the BO into a certain domain */
287 struct radeon_device
*rdev
= dev
->dev_private
;
288 struct drm_radeon_gem_set_domain
*args
= data
;
289 struct drm_gem_object
*gobj
;
290 struct radeon_bo
*robj
;
293 /* for now if someone requests domain CPU -
294 * just make sure the buffer is finished with */
295 down_read(&rdev
->exclusive_lock
);
297 /* just do a BO wait for now */
298 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
300 up_read(&rdev
->exclusive_lock
);
303 robj
= gem_to_radeon_bo(gobj
);
305 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
307 drm_gem_object_unreference_unlocked(gobj
);
308 up_read(&rdev
->exclusive_lock
);
309 r
= radeon_gem_handle_lockup(robj
->rdev
, r
);
313 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
314 struct drm_device
*dev
,
315 uint32_t handle
, uint64_t *offset_p
)
317 struct drm_gem_object
*gobj
;
318 struct radeon_bo
*robj
;
320 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
324 robj
= gem_to_radeon_bo(gobj
);
325 *offset_p
= radeon_bo_mmap_offset(robj
);
326 drm_gem_object_unreference_unlocked(gobj
);
330 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
331 struct drm_file
*filp
)
333 struct drm_radeon_gem_mmap
*args
= data
;
335 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, &args
->addr_ptr
);
338 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
339 struct drm_file
*filp
)
341 struct radeon_device
*rdev
= dev
->dev_private
;
342 struct drm_radeon_gem_busy
*args
= data
;
343 struct drm_gem_object
*gobj
;
344 struct radeon_bo
*robj
;
346 uint32_t cur_placement
= 0;
348 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
352 robj
= gem_to_radeon_bo(gobj
);
353 r
= radeon_bo_wait(robj
, &cur_placement
, true);
354 switch (cur_placement
) {
356 args
->domain
= RADEON_GEM_DOMAIN_VRAM
;
359 args
->domain
= RADEON_GEM_DOMAIN_GTT
;
362 args
->domain
= RADEON_GEM_DOMAIN_CPU
;
366 drm_gem_object_unreference_unlocked(gobj
);
367 r
= radeon_gem_handle_lockup(rdev
, r
);
371 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
372 struct drm_file
*filp
)
374 struct radeon_device
*rdev
= dev
->dev_private
;
375 struct drm_radeon_gem_wait_idle
*args
= data
;
376 struct drm_gem_object
*gobj
;
377 struct radeon_bo
*robj
;
380 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
384 robj
= gem_to_radeon_bo(gobj
);
385 r
= radeon_bo_wait(robj
, NULL
, false);
386 /* callback hw specific functions if any */
387 if (rdev
->asic
->ioctl_wait_idle
)
388 robj
->rdev
->asic
->ioctl_wait_idle(rdev
, robj
);
389 drm_gem_object_unreference_unlocked(gobj
);
390 r
= radeon_gem_handle_lockup(rdev
, r
);
394 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
395 struct drm_file
*filp
)
397 struct drm_radeon_gem_set_tiling
*args
= data
;
398 struct drm_gem_object
*gobj
;
399 struct radeon_bo
*robj
;
402 DRM_DEBUG("%d \n", args
->handle
);
403 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
406 robj
= gem_to_radeon_bo(gobj
);
407 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
408 drm_gem_object_unreference_unlocked(gobj
);
412 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
413 struct drm_file
*filp
)
415 struct drm_radeon_gem_get_tiling
*args
= data
;
416 struct drm_gem_object
*gobj
;
417 struct radeon_bo
*rbo
;
421 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
424 rbo
= gem_to_radeon_bo(gobj
);
425 r
= radeon_bo_reserve(rbo
, false);
426 if (unlikely(r
!= 0))
428 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
429 radeon_bo_unreserve(rbo
);
431 drm_gem_object_unreference_unlocked(gobj
);
435 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
436 struct drm_file
*filp
)
438 struct drm_radeon_gem_va
*args
= data
;
439 struct drm_gem_object
*gobj
;
440 struct radeon_device
*rdev
= dev
->dev_private
;
441 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
442 struct radeon_bo
*rbo
;
443 struct radeon_bo_va
*bo_va
;
447 if (!rdev
->vm_manager
.enabled
) {
448 args
->operation
= RADEON_VA_RESULT_ERROR
;
453 * We don't support vm_id yet, to be sure we don't have have broken
454 * userspace, reject anyone trying to use non 0 value thus moving
455 * forward we can use those fields without breaking existant userspace
458 args
->operation
= RADEON_VA_RESULT_ERROR
;
462 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
463 dev_err(&dev
->pdev
->dev
,
464 "offset 0x%lX is in reserved area 0x%X\n",
465 (unsigned long)args
->offset
,
466 RADEON_VA_RESERVED_SIZE
);
467 args
->operation
= RADEON_VA_RESULT_ERROR
;
471 /* don't remove, we need to enforce userspace to set the snooped flag
472 * otherwise we will endup with broken userspace and we won't be able
473 * to enable this feature without adding new interface
475 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
476 if ((args
->flags
& invalid_flags
)) {
477 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
478 args
->flags
, invalid_flags
);
479 args
->operation
= RADEON_VA_RESULT_ERROR
;
482 if (!(args
->flags
& RADEON_VM_PAGE_SNOOPED
)) {
483 dev_err(&dev
->pdev
->dev
, "only supported snooped mapping for now\n");
484 args
->operation
= RADEON_VA_RESULT_ERROR
;
488 switch (args
->operation
) {
490 case RADEON_VA_UNMAP
:
493 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
495 args
->operation
= RADEON_VA_RESULT_ERROR
;
499 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
501 args
->operation
= RADEON_VA_RESULT_ERROR
;
504 rbo
= gem_to_radeon_bo(gobj
);
505 r
= radeon_bo_reserve(rbo
, false);
507 args
->operation
= RADEON_VA_RESULT_ERROR
;
508 drm_gem_object_unreference_unlocked(gobj
);
511 bo_va
= radeon_vm_bo_find(&fpriv
->vm
, rbo
);
513 args
->operation
= RADEON_VA_RESULT_ERROR
;
514 drm_gem_object_unreference_unlocked(gobj
);
518 switch (args
->operation
) {
520 if (bo_va
->soffset
) {
521 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
522 args
->offset
= bo_va
->soffset
;
525 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, args
->offset
, args
->flags
);
527 case RADEON_VA_UNMAP
:
528 r
= radeon_vm_bo_set_addr(rdev
, bo_va
, 0, 0);
533 args
->operation
= RADEON_VA_RESULT_OK
;
535 args
->operation
= RADEON_VA_RESULT_ERROR
;
538 radeon_bo_unreserve(rbo
);
539 drm_gem_object_unreference_unlocked(gobj
);
543 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
544 struct drm_device
*dev
,
545 struct drm_mode_create_dumb
*args
)
547 struct radeon_device
*rdev
= dev
->dev_private
;
548 struct drm_gem_object
*gobj
;
552 args
->pitch
= radeon_align_pitch(rdev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
553 args
->size
= args
->pitch
* args
->height
;
554 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
556 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
557 RADEON_GEM_DOMAIN_VRAM
,
558 false, ttm_bo_type_device
,
563 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
564 /* drop reference from allocate - handle holds it now */
565 drm_gem_object_unreference_unlocked(gobj
);
569 args
->handle
= handle
;
573 #if defined(CONFIG_DEBUG_FS)
574 static int radeon_debugfs_gem_info(struct seq_file
*m
, void *data
)
576 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
577 struct drm_device
*dev
= node
->minor
->dev
;
578 struct radeon_device
*rdev
= dev
->dev_private
;
579 struct radeon_bo
*rbo
;
582 mutex_lock(&rdev
->gem
.mutex
);
583 list_for_each_entry(rbo
, &rdev
->gem
.objects
, list
) {
585 const char *placement
;
587 domain
= radeon_mem_type_to_domain(rbo
->tbo
.mem
.mem_type
);
589 case RADEON_GEM_DOMAIN_VRAM
:
592 case RADEON_GEM_DOMAIN_GTT
:
595 case RADEON_GEM_DOMAIN_CPU
:
600 seq_printf(m
, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
601 i
, radeon_bo_size(rbo
) >> 10, radeon_bo_size(rbo
) >> 20,
602 placement
, (unsigned long)rbo
->pid
);
605 mutex_unlock(&rdev
->gem
.mutex
);
609 static struct drm_info_list radeon_debugfs_gem_list
[] = {
610 {"radeon_gem_info", &radeon_debugfs_gem_info
, 0, NULL
},
614 int radeon_gem_debugfs_init(struct radeon_device
*rdev
)
616 #if defined(CONFIG_DEBUG_FS)
617 return radeon_debugfs_add_files(rdev
, radeon_debugfs_gem_list
, 1);