2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "radeon_drm.h"
33 int radeon_gem_object_init(struct drm_gem_object
*obj
)
40 void radeon_gem_object_free(struct drm_gem_object
*gobj
)
42 struct radeon_bo
*robj
= gem_to_radeon_bo(gobj
);
45 radeon_bo_unref(&robj
);
49 int radeon_gem_object_create(struct radeon_device
*rdev
, int size
,
50 int alignment
, int initial_domain
,
51 bool discardable
, bool kernel
,
52 struct drm_gem_object
**obj
)
54 struct radeon_bo
*robj
;
58 /* At least align on page size */
59 if (alignment
< PAGE_SIZE
) {
60 alignment
= PAGE_SIZE
;
62 r
= radeon_bo_create(rdev
, size
, alignment
, kernel
, initial_domain
, &robj
);
64 if (r
!= -ERESTARTSYS
)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
66 size
, initial_domain
, alignment
, r
);
69 *obj
= &robj
->gem_base
;
71 mutex_lock(&rdev
->gem
.mutex
);
72 list_add_tail(&robj
->list
, &rdev
->gem
.objects
);
73 mutex_unlock(&rdev
->gem
.mutex
);
78 int radeon_gem_object_pin(struct drm_gem_object
*obj
, uint32_t pin_domain
,
81 struct radeon_bo
*robj
= gem_to_radeon_bo(obj
);
84 r
= radeon_bo_reserve(robj
, false);
87 r
= radeon_bo_pin(robj
, pin_domain
, gpu_addr
);
88 radeon_bo_unreserve(robj
);
92 void radeon_gem_object_unpin(struct drm_gem_object
*obj
)
94 struct radeon_bo
*robj
= gem_to_radeon_bo(obj
);
97 r
= radeon_bo_reserve(robj
, false);
99 radeon_bo_unpin(robj
);
100 radeon_bo_unreserve(robj
);
104 int radeon_gem_set_domain(struct drm_gem_object
*gobj
,
105 uint32_t rdomain
, uint32_t wdomain
)
107 struct radeon_bo
*robj
;
111 /* FIXME: reeimplement */
112 robj
= gem_to_radeon_bo(gobj
);
113 /* work out where to validate the buffer to */
120 printk(KERN_WARNING
"Set domain withou domain !\n");
123 if (domain
== RADEON_GEM_DOMAIN_CPU
) {
124 /* Asking for cpu access wait for object idle */
125 r
= radeon_bo_wait(robj
, NULL
, false);
127 printk(KERN_ERR
"Failed to wait for object !\n");
134 int radeon_gem_init(struct radeon_device
*rdev
)
136 INIT_LIST_HEAD(&rdev
->gem
.objects
);
140 void radeon_gem_fini(struct radeon_device
*rdev
)
142 radeon_bo_force_delete(rdev
);
146 * Call from drm_gem_handle_create which appear in both new and open ioctl
149 int radeon_gem_object_open(struct drm_gem_object
*obj
, struct drm_file
*file_priv
)
154 void radeon_gem_object_close(struct drm_gem_object
*obj
,
155 struct drm_file
*file_priv
)
157 struct radeon_bo
*rbo
= gem_to_radeon_bo(obj
);
158 struct radeon_device
*rdev
= rbo
->rdev
;
159 struct radeon_fpriv
*fpriv
= file_priv
->driver_priv
;
160 struct radeon_vm
*vm
= &fpriv
->vm
;
161 struct radeon_bo_va
*bo_va
, *tmp
;
163 if (rdev
->family
< CHIP_CAYMAN
) {
167 if (radeon_bo_reserve(rbo
, false)) {
170 list_for_each_entry_safe(bo_va
, tmp
, &rbo
->va
, bo_list
) {
171 if (bo_va
->vm
== vm
) {
172 /* remove from this vm address space */
173 mutex_lock(&vm
->mutex
);
174 list_del(&bo_va
->vm_list
);
175 mutex_unlock(&vm
->mutex
);
176 list_del(&bo_va
->bo_list
);
180 radeon_bo_unreserve(rbo
);
187 int radeon_gem_info_ioctl(struct drm_device
*dev
, void *data
,
188 struct drm_file
*filp
)
190 struct radeon_device
*rdev
= dev
->dev_private
;
191 struct drm_radeon_gem_info
*args
= data
;
192 struct ttm_mem_type_manager
*man
;
195 man
= &rdev
->mman
.bdev
.man
[TTM_PL_VRAM
];
197 args
->vram_size
= rdev
->mc
.real_vram_size
;
198 args
->vram_visible
= (u64
)man
->size
<< PAGE_SHIFT
;
199 if (rdev
->stollen_vga_memory
)
200 args
->vram_visible
-= radeon_bo_size(rdev
->stollen_vga_memory
);
201 args
->vram_visible
-= radeon_fbdev_total_size(rdev
);
202 args
->gart_size
= rdev
->mc
.gtt_size
- 4096 - RADEON_IB_POOL_SIZE
*64*1024;
203 for(i
= 0; i
< RADEON_NUM_RINGS
; ++i
)
204 args
->gart_size
-= rdev
->ring
[i
].ring_size
;
208 int radeon_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
209 struct drm_file
*filp
)
211 /* TODO: implement */
212 DRM_ERROR("unimplemented %s\n", __func__
);
216 int radeon_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
217 struct drm_file
*filp
)
219 /* TODO: implement */
220 DRM_ERROR("unimplemented %s\n", __func__
);
224 int radeon_gem_create_ioctl(struct drm_device
*dev
, void *data
,
225 struct drm_file
*filp
)
227 struct radeon_device
*rdev
= dev
->dev_private
;
228 struct drm_radeon_gem_create
*args
= data
;
229 struct drm_gem_object
*gobj
;
233 /* create a gem object to contain this object in */
234 args
->size
= roundup(args
->size
, PAGE_SIZE
);
235 r
= radeon_gem_object_create(rdev
, args
->size
, args
->alignment
,
236 args
->initial_domain
, false,
241 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
242 /* drop reference from allocate - handle holds it now */
243 drm_gem_object_unreference_unlocked(gobj
);
247 args
->handle
= handle
;
251 int radeon_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
252 struct drm_file
*filp
)
254 /* transition the BO to a domain -
255 * just validate the BO into a certain domain */
256 struct drm_radeon_gem_set_domain
*args
= data
;
257 struct drm_gem_object
*gobj
;
258 struct radeon_bo
*robj
;
261 /* for now if someone requests domain CPU -
262 * just make sure the buffer is finished with */
264 /* just do a BO wait for now */
265 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
269 robj
= gem_to_radeon_bo(gobj
);
271 r
= radeon_gem_set_domain(gobj
, args
->read_domains
, args
->write_domain
);
273 drm_gem_object_unreference_unlocked(gobj
);
277 int radeon_mode_dumb_mmap(struct drm_file
*filp
,
278 struct drm_device
*dev
,
279 uint32_t handle
, uint64_t *offset_p
)
281 struct drm_gem_object
*gobj
;
282 struct radeon_bo
*robj
;
284 gobj
= drm_gem_object_lookup(dev
, filp
, handle
);
288 robj
= gem_to_radeon_bo(gobj
);
289 *offset_p
= radeon_bo_mmap_offset(robj
);
290 drm_gem_object_unreference_unlocked(gobj
);
294 int radeon_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
295 struct drm_file
*filp
)
297 struct drm_radeon_gem_mmap
*args
= data
;
299 return radeon_mode_dumb_mmap(filp
, dev
, args
->handle
, &args
->addr_ptr
);
302 int radeon_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
303 struct drm_file
*filp
)
305 struct drm_radeon_gem_busy
*args
= data
;
306 struct drm_gem_object
*gobj
;
307 struct radeon_bo
*robj
;
309 uint32_t cur_placement
= 0;
311 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
315 robj
= gem_to_radeon_bo(gobj
);
316 r
= radeon_bo_wait(robj
, &cur_placement
, true);
317 switch (cur_placement
) {
319 args
->domain
= RADEON_GEM_DOMAIN_VRAM
;
322 args
->domain
= RADEON_GEM_DOMAIN_GTT
;
325 args
->domain
= RADEON_GEM_DOMAIN_CPU
;
329 drm_gem_object_unreference_unlocked(gobj
);
333 int radeon_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
334 struct drm_file
*filp
)
336 struct drm_radeon_gem_wait_idle
*args
= data
;
337 struct drm_gem_object
*gobj
;
338 struct radeon_bo
*robj
;
341 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
345 robj
= gem_to_radeon_bo(gobj
);
346 r
= radeon_bo_wait(robj
, NULL
, false);
347 /* callback hw specific functions if any */
348 if (robj
->rdev
->asic
->ioctl_wait_idle
)
349 robj
->rdev
->asic
->ioctl_wait_idle(robj
->rdev
, robj
);
350 drm_gem_object_unreference_unlocked(gobj
);
354 int radeon_gem_set_tiling_ioctl(struct drm_device
*dev
, void *data
,
355 struct drm_file
*filp
)
357 struct drm_radeon_gem_set_tiling
*args
= data
;
358 struct drm_gem_object
*gobj
;
359 struct radeon_bo
*robj
;
362 DRM_DEBUG("%d \n", args
->handle
);
363 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
366 robj
= gem_to_radeon_bo(gobj
);
367 r
= radeon_bo_set_tiling_flags(robj
, args
->tiling_flags
, args
->pitch
);
368 drm_gem_object_unreference_unlocked(gobj
);
372 int radeon_gem_get_tiling_ioctl(struct drm_device
*dev
, void *data
,
373 struct drm_file
*filp
)
375 struct drm_radeon_gem_get_tiling
*args
= data
;
376 struct drm_gem_object
*gobj
;
377 struct radeon_bo
*rbo
;
381 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
384 rbo
= gem_to_radeon_bo(gobj
);
385 r
= radeon_bo_reserve(rbo
, false);
386 if (unlikely(r
!= 0))
388 radeon_bo_get_tiling_flags(rbo
, &args
->tiling_flags
, &args
->pitch
);
389 radeon_bo_unreserve(rbo
);
391 drm_gem_object_unreference_unlocked(gobj
);
395 int radeon_gem_va_ioctl(struct drm_device
*dev
, void *data
,
396 struct drm_file
*filp
)
398 struct drm_radeon_gem_va
*args
= data
;
399 struct drm_gem_object
*gobj
;
400 struct radeon_device
*rdev
= dev
->dev_private
;
401 struct radeon_fpriv
*fpriv
= filp
->driver_priv
;
402 struct radeon_bo
*rbo
;
403 struct radeon_bo_va
*bo_va
;
407 if (!rdev
->vm_manager
.enabled
) {
408 args
->operation
= RADEON_VA_RESULT_ERROR
;
413 * We don't support vm_id yet, to be sure we don't have have broken
414 * userspace, reject anyone trying to use non 0 value thus moving
415 * forward we can use those fields without breaking existant userspace
418 args
->operation
= RADEON_VA_RESULT_ERROR
;
422 if (args
->offset
< RADEON_VA_RESERVED_SIZE
) {
423 dev_err(&dev
->pdev
->dev
,
424 "offset 0x%lX is in reserved area 0x%X\n",
425 (unsigned long)args
->offset
,
426 RADEON_VA_RESERVED_SIZE
);
427 args
->operation
= RADEON_VA_RESULT_ERROR
;
431 /* don't remove, we need to enforce userspace to set the snooped flag
432 * otherwise we will endup with broken userspace and we won't be able
433 * to enable this feature without adding new interface
435 invalid_flags
= RADEON_VM_PAGE_VALID
| RADEON_VM_PAGE_SYSTEM
;
436 if ((args
->flags
& invalid_flags
)) {
437 dev_err(&dev
->pdev
->dev
, "invalid flags 0x%08X vs 0x%08X\n",
438 args
->flags
, invalid_flags
);
439 args
->operation
= RADEON_VA_RESULT_ERROR
;
442 if (!(args
->flags
& RADEON_VM_PAGE_SNOOPED
)) {
443 dev_err(&dev
->pdev
->dev
, "only supported snooped mapping for now\n");
444 args
->operation
= RADEON_VA_RESULT_ERROR
;
448 switch (args
->operation
) {
450 case RADEON_VA_UNMAP
:
453 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
455 args
->operation
= RADEON_VA_RESULT_ERROR
;
459 gobj
= drm_gem_object_lookup(dev
, filp
, args
->handle
);
461 args
->operation
= RADEON_VA_RESULT_ERROR
;
464 rbo
= gem_to_radeon_bo(gobj
);
465 r
= radeon_bo_reserve(rbo
, false);
467 args
->operation
= RADEON_VA_RESULT_ERROR
;
468 drm_gem_object_unreference_unlocked(gobj
);
471 switch (args
->operation
) {
473 bo_va
= radeon_bo_va(rbo
, &fpriv
->vm
);
475 args
->operation
= RADEON_VA_RESULT_VA_EXIST
;
476 args
->offset
= bo_va
->soffset
;
479 r
= radeon_vm_bo_add(rdev
, &fpriv
->vm
, rbo
,
480 args
->offset
, args
->flags
);
482 case RADEON_VA_UNMAP
:
483 r
= radeon_vm_bo_rmv(rdev
, &fpriv
->vm
, rbo
);
488 args
->operation
= RADEON_VA_RESULT_OK
;
490 args
->operation
= RADEON_VA_RESULT_ERROR
;
493 radeon_bo_unreserve(rbo
);
494 drm_gem_object_unreference_unlocked(gobj
);
498 int radeon_mode_dumb_create(struct drm_file
*file_priv
,
499 struct drm_device
*dev
,
500 struct drm_mode_create_dumb
*args
)
502 struct radeon_device
*rdev
= dev
->dev_private
;
503 struct drm_gem_object
*gobj
;
507 args
->pitch
= radeon_align_pitch(rdev
, args
->width
, args
->bpp
, 0) * ((args
->bpp
+ 1) / 8);
508 args
->size
= args
->pitch
* args
->height
;
509 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
511 r
= radeon_gem_object_create(rdev
, args
->size
, 0,
512 RADEON_GEM_DOMAIN_VRAM
,
513 false, ttm_bo_type_device
,
518 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
519 /* drop reference from allocate - handle holds it now */
520 drm_gem_object_unreference_unlocked(gobj
);
524 args
->handle
= handle
;
528 int radeon_mode_dumb_destroy(struct drm_file
*file_priv
,
529 struct drm_device
*dev
,
532 return drm_gem_handle_delete(file_priv
, handle
);