2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
29 #include <linux/pagemap.h>
31 #include <drm/amdgpu_drm.h>
34 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
36 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
39 if (robj
->gem_base
.import_attach
)
40 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
41 amdgpu_mn_unregister(robj
);
42 amdgpu_bo_unref(&robj
);
46 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
47 int alignment
, u32 initial_domain
,
48 u64 flags
, bool kernel
,
49 struct drm_gem_object
**obj
)
51 struct amdgpu_bo
*robj
;
52 unsigned long max_size
;
56 /* At least align on page size */
57 if (alignment
< PAGE_SIZE
) {
58 alignment
= PAGE_SIZE
;
61 if (!(initial_domain
& (AMDGPU_GEM_DOMAIN_GDS
| AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
))) {
62 /* Maximum bo size is the unpinned gtt size since we use the gtt to
63 * handle vram to system pool migrations.
65 max_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
66 if (size
> max_size
) {
67 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
68 size
>> 20, max_size
>> 20);
73 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
,
74 flags
, NULL
, NULL
, &robj
);
76 if (r
!= -ERESTARTSYS
) {
77 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
78 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
81 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
82 size
, initial_domain
, alignment
, r
);
86 *obj
= &robj
->gem_base
;
91 void amdgpu_gem_force_release(struct amdgpu_device
*adev
)
93 struct drm_device
*ddev
= adev
->ddev
;
94 struct drm_file
*file
;
96 mutex_lock(&ddev
->filelist_mutex
);
98 list_for_each_entry(file
, &ddev
->filelist
, lhead
) {
99 struct drm_gem_object
*gobj
;
102 WARN_ONCE(1, "Still active user space clients!\n");
103 spin_lock(&file
->table_lock
);
104 idr_for_each_entry(&file
->object_idr
, gobj
, handle
) {
105 WARN_ONCE(1, "And also active allocations!\n");
106 drm_gem_object_unreference_unlocked(gobj
);
108 idr_destroy(&file
->object_idr
);
109 spin_unlock(&file
->table_lock
);
112 mutex_unlock(&ddev
->filelist_mutex
);
116 * Call from drm_gem_handle_create which appear in both new and open ioctl
119 int amdgpu_gem_object_open(struct drm_gem_object
*obj
,
120 struct drm_file
*file_priv
)
122 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(obj
);
123 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
124 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
125 struct amdgpu_vm
*vm
= &fpriv
->vm
;
126 struct amdgpu_bo_va
*bo_va
;
128 r
= amdgpu_bo_reserve(abo
, false);
132 bo_va
= amdgpu_vm_bo_find(vm
, abo
);
134 bo_va
= amdgpu_vm_bo_add(adev
, vm
, abo
);
138 amdgpu_bo_unreserve(abo
);
142 static int amdgpu_gem_vm_check(void *param
, struct amdgpu_bo
*bo
)
144 /* if anything is swapped out don't swap it in here,
145 just abort and wait for the next CS */
146 if (!amdgpu_bo_gpu_accessible(bo
))
149 if (bo
->shadow
&& !amdgpu_bo_gpu_accessible(bo
->shadow
))
155 static bool amdgpu_gem_vm_ready(struct amdgpu_device
*adev
,
156 struct amdgpu_vm
*vm
,
157 struct list_head
*list
)
159 struct ttm_validate_buffer
*entry
;
161 list_for_each_entry(entry
, list
, head
) {
162 struct amdgpu_bo
*bo
=
163 container_of(entry
->bo
, struct amdgpu_bo
, tbo
);
164 if (amdgpu_gem_vm_check(NULL
, bo
))
168 return !amdgpu_vm_validate_pt_bos(adev
, vm
, amdgpu_gem_vm_check
, NULL
);
171 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
172 struct drm_file
*file_priv
)
174 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
175 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
176 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
177 struct amdgpu_vm
*vm
= &fpriv
->vm
;
179 struct amdgpu_bo_list_entry vm_pd
;
180 struct list_head list
;
181 struct ttm_validate_buffer tv
;
182 struct ww_acquire_ctx ticket
;
183 struct amdgpu_bo_va
*bo_va
;
186 INIT_LIST_HEAD(&list
);
190 list_add(&tv
.head
, &list
);
192 amdgpu_vm_get_pd_bo(vm
, &list
, &vm_pd
);
194 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, NULL
);
196 dev_err(adev
->dev
, "leaking bo va because "
197 "we fail to reserve bo (%d)\n", r
);
200 bo_va
= amdgpu_vm_bo_find(vm
, bo
);
201 if (bo_va
&& --bo_va
->ref_count
== 0) {
202 amdgpu_vm_bo_rmv(adev
, bo_va
);
204 if (amdgpu_gem_vm_ready(adev
, vm
, &list
)) {
205 struct dma_fence
*fence
= NULL
;
207 r
= amdgpu_vm_clear_freed(adev
, vm
, &fence
);
209 dev_err(adev
->dev
, "failed to clear page "
210 "tables on GEM object close (%d)\n", r
);
214 amdgpu_bo_fence(bo
, fence
, true);
215 dma_fence_put(fence
);
219 ttm_eu_backoff_reservation(&ticket
, &list
);
222 static int amdgpu_gem_handle_lockup(struct amdgpu_device
*adev
, int r
)
225 r
= amdgpu_gpu_reset(adev
);
235 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
236 struct drm_file
*filp
)
238 struct amdgpu_device
*adev
= dev
->dev_private
;
239 union drm_amdgpu_gem_create
*args
= data
;
240 uint64_t size
= args
->in
.bo_size
;
241 struct drm_gem_object
*gobj
;
246 /* reject invalid gem flags */
247 if (args
->in
.domain_flags
& ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
248 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
249 AMDGPU_GEM_CREATE_CPU_GTT_USWC
|
250 AMDGPU_GEM_CREATE_VRAM_CLEARED
|
251 AMDGPU_GEM_CREATE_SHADOW
|
252 AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS
)) {
256 /* reject invalid gem domains */
257 if (args
->in
.domains
& ~(AMDGPU_GEM_DOMAIN_CPU
|
258 AMDGPU_GEM_DOMAIN_GTT
|
259 AMDGPU_GEM_DOMAIN_VRAM
|
260 AMDGPU_GEM_DOMAIN_GDS
|
261 AMDGPU_GEM_DOMAIN_GWS
|
262 AMDGPU_GEM_DOMAIN_OA
)) {
267 /* create a gem object to contain this object in */
268 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
269 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
271 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
272 size
= size
<< AMDGPU_GDS_SHIFT
;
273 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
274 size
= size
<< AMDGPU_GWS_SHIFT
;
275 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
276 size
= size
<< AMDGPU_OA_SHIFT
;
282 size
= roundup(size
, PAGE_SIZE
);
284 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
285 (u32
)(0xffffffff & args
->in
.domains
),
286 args
->in
.domain_flags
,
291 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
292 /* drop reference from allocate - handle holds it now */
293 drm_gem_object_unreference_unlocked(gobj
);
297 memset(args
, 0, sizeof(*args
));
298 args
->out
.handle
= handle
;
302 r
= amdgpu_gem_handle_lockup(adev
, r
);
306 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
307 struct drm_file
*filp
)
309 struct amdgpu_device
*adev
= dev
->dev_private
;
310 struct drm_amdgpu_gem_userptr
*args
= data
;
311 struct drm_gem_object
*gobj
;
312 struct amdgpu_bo
*bo
;
316 if (offset_in_page(args
->addr
| args
->size
))
319 /* reject unknown flag values */
320 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
321 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
322 AMDGPU_GEM_USERPTR_REGISTER
))
325 if (!(args
->flags
& AMDGPU_GEM_USERPTR_READONLY
) &&
326 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
328 /* if we want to write to it we must install a MMU notifier */
332 /* create a gem object to contain this object in */
333 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
334 AMDGPU_GEM_DOMAIN_CPU
, 0,
339 bo
= gem_to_amdgpu_bo(gobj
);
340 bo
->prefered_domains
= AMDGPU_GEM_DOMAIN_GTT
;
341 bo
->allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
342 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
346 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
347 r
= amdgpu_mn_register(bo
, args
->addr
);
352 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
353 down_read(¤t
->mm
->mmap_sem
);
355 r
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
,
358 goto unlock_mmap_sem
;
360 r
= amdgpu_bo_reserve(bo
, true);
364 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
365 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
366 amdgpu_bo_unreserve(bo
);
370 up_read(¤t
->mm
->mmap_sem
);
373 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
374 /* drop reference from allocate - handle holds it now */
375 drm_gem_object_unreference_unlocked(gobj
);
379 args
->handle
= handle
;
383 release_pages(bo
->tbo
.ttm
->pages
, bo
->tbo
.ttm
->num_pages
, false);
386 up_read(¤t
->mm
->mmap_sem
);
389 drm_gem_object_unreference_unlocked(gobj
);
392 r
= amdgpu_gem_handle_lockup(adev
, r
);
397 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
398 struct drm_device
*dev
,
399 uint32_t handle
, uint64_t *offset_p
)
401 struct drm_gem_object
*gobj
;
402 struct amdgpu_bo
*robj
;
404 gobj
= drm_gem_object_lookup(filp
, handle
);
408 robj
= gem_to_amdgpu_bo(gobj
);
409 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
) ||
410 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
411 drm_gem_object_unreference_unlocked(gobj
);
414 *offset_p
= amdgpu_bo_mmap_offset(robj
);
415 drm_gem_object_unreference_unlocked(gobj
);
419 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
420 struct drm_file
*filp
)
422 union drm_amdgpu_gem_mmap
*args
= data
;
423 uint32_t handle
= args
->in
.handle
;
424 memset(args
, 0, sizeof(*args
));
425 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
429 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
431 * @timeout_ns: timeout in ns
433 * Calculate the timeout in jiffies from an absolute timeout in ns.
435 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
437 unsigned long timeout_jiffies
;
440 /* clamp timeout if it's to large */
441 if (((int64_t)timeout_ns
) < 0)
442 return MAX_SCHEDULE_TIMEOUT
;
444 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
445 if (ktime_to_ns(timeout
) < 0)
448 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
449 /* clamp timeout to avoid unsigned-> signed overflow */
450 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
451 return MAX_SCHEDULE_TIMEOUT
- 1;
453 return timeout_jiffies
;
456 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
457 struct drm_file
*filp
)
459 struct amdgpu_device
*adev
= dev
->dev_private
;
460 union drm_amdgpu_gem_wait_idle
*args
= data
;
461 struct drm_gem_object
*gobj
;
462 struct amdgpu_bo
*robj
;
463 uint32_t handle
= args
->in
.handle
;
464 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
468 gobj
= drm_gem_object_lookup(filp
, handle
);
472 robj
= gem_to_amdgpu_bo(gobj
);
473 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true,
476 /* ret == 0 means not signaled,
477 * ret > 0 means signaled
478 * ret < 0 means interrupted before timeout
481 memset(args
, 0, sizeof(*args
));
482 args
->out
.status
= (ret
== 0);
486 drm_gem_object_unreference_unlocked(gobj
);
487 r
= amdgpu_gem_handle_lockup(adev
, r
);
491 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
492 struct drm_file
*filp
)
494 struct drm_amdgpu_gem_metadata
*args
= data
;
495 struct drm_gem_object
*gobj
;
496 struct amdgpu_bo
*robj
;
499 DRM_DEBUG("%d \n", args
->handle
);
500 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
503 robj
= gem_to_amdgpu_bo(gobj
);
505 r
= amdgpu_bo_reserve(robj
, false);
506 if (unlikely(r
!= 0))
509 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
510 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
511 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
512 sizeof(args
->data
.data
),
513 &args
->data
.data_size_bytes
,
515 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
516 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
520 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
522 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
523 args
->data
.data_size_bytes
,
528 amdgpu_bo_unreserve(robj
);
530 drm_gem_object_unreference_unlocked(gobj
);
535 * amdgpu_gem_va_update_vm -update the bo_va in its VM
537 * @adev: amdgpu_device pointer
539 * @bo_va: bo_va to update
540 * @list: validation list
541 * @operation: map, unmap or clear
543 * Update the bo_va directly after setting its address. Errors are not
544 * vital here, so they are not reported back to userspace.
546 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
547 struct amdgpu_vm
*vm
,
548 struct amdgpu_bo_va
*bo_va
,
549 struct list_head
*list
,
552 int r
= -ERESTARTSYS
;
554 if (!amdgpu_gem_vm_ready(adev
, vm
, list
))
557 r
= amdgpu_vm_update_directories(adev
, vm
);
561 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
565 if (operation
== AMDGPU_VA_OP_MAP
||
566 operation
== AMDGPU_VA_OP_REPLACE
)
567 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
570 if (r
&& r
!= -ERESTARTSYS
)
571 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
574 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
575 struct drm_file
*filp
)
577 const uint32_t valid_flags
= AMDGPU_VM_DELAY_UPDATE
|
578 AMDGPU_VM_PAGE_READABLE
| AMDGPU_VM_PAGE_WRITEABLE
|
579 AMDGPU_VM_PAGE_EXECUTABLE
| AMDGPU_VM_MTYPE_MASK
;
580 const uint32_t prt_flags
= AMDGPU_VM_DELAY_UPDATE
|
583 struct drm_amdgpu_gem_va
*args
= data
;
584 struct drm_gem_object
*gobj
;
585 struct amdgpu_device
*adev
= dev
->dev_private
;
586 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
587 struct amdgpu_bo
*abo
;
588 struct amdgpu_bo_va
*bo_va
;
589 struct amdgpu_bo_list_entry vm_pd
;
590 struct ttm_validate_buffer tv
;
591 struct ww_acquire_ctx ticket
;
592 struct list_head list
;
596 if (!adev
->vm_manager
.enabled
)
599 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
600 dev_err(&dev
->pdev
->dev
,
601 "va_address 0x%lX is in reserved area 0x%X\n",
602 (unsigned long)args
->va_address
,
603 AMDGPU_VA_RESERVED_SIZE
);
607 if ((args
->flags
& ~valid_flags
) && (args
->flags
& ~prt_flags
)) {
608 dev_err(&dev
->pdev
->dev
, "invalid flags combination 0x%08X\n",
613 switch (args
->operation
) {
614 case AMDGPU_VA_OP_MAP
:
615 case AMDGPU_VA_OP_UNMAP
:
616 case AMDGPU_VA_OP_CLEAR
:
617 case AMDGPU_VA_OP_REPLACE
:
620 dev_err(&dev
->pdev
->dev
, "unsupported operation %d\n",
625 INIT_LIST_HEAD(&list
);
626 if ((args
->operation
!= AMDGPU_VA_OP_CLEAR
) &&
627 !(args
->flags
& AMDGPU_VM_PAGE_PRT
)) {
628 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
631 abo
= gem_to_amdgpu_bo(gobj
);
634 list_add(&tv
.head
, &list
);
640 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &list
, &vm_pd
);
642 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, NULL
);
647 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, abo
);
652 } else if (args
->operation
!= AMDGPU_VA_OP_CLEAR
) {
653 bo_va
= fpriv
->prt_va
;
658 switch (args
->operation
) {
659 case AMDGPU_VA_OP_MAP
:
660 r
= amdgpu_vm_alloc_pts(adev
, bo_va
->vm
, args
->va_address
,
665 va_flags
= amdgpu_vm_get_pte_flags(adev
, args
->flags
);
666 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
667 args
->offset_in_bo
, args
->map_size
,
670 case AMDGPU_VA_OP_UNMAP
:
671 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
674 case AMDGPU_VA_OP_CLEAR
:
675 r
= amdgpu_vm_bo_clear_mappings(adev
, &fpriv
->vm
,
679 case AMDGPU_VA_OP_REPLACE
:
680 r
= amdgpu_vm_alloc_pts(adev
, bo_va
->vm
, args
->va_address
,
685 va_flags
= amdgpu_vm_get_pte_flags(adev
, args
->flags
);
686 r
= amdgpu_vm_bo_replace_map(adev
, bo_va
, args
->va_address
,
687 args
->offset_in_bo
, args
->map_size
,
693 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
) && !amdgpu_vm_debug
)
694 amdgpu_gem_va_update_vm(adev
, &fpriv
->vm
, bo_va
, &list
,
698 ttm_eu_backoff_reservation(&ticket
, &list
);
701 drm_gem_object_unreference_unlocked(gobj
);
705 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
706 struct drm_file
*filp
)
708 struct drm_amdgpu_gem_op
*args
= data
;
709 struct drm_gem_object
*gobj
;
710 struct amdgpu_bo
*robj
;
713 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
717 robj
= gem_to_amdgpu_bo(gobj
);
719 r
= amdgpu_bo_reserve(robj
, false);
724 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
725 struct drm_amdgpu_gem_create_in info
;
726 void __user
*out
= (void __user
*)(uintptr_t)args
->value
;
728 info
.bo_size
= robj
->gem_base
.size
;
729 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
730 info
.domains
= robj
->prefered_domains
;
731 info
.domain_flags
= robj
->flags
;
732 amdgpu_bo_unreserve(robj
);
733 if (copy_to_user(out
, &info
, sizeof(info
)))
737 case AMDGPU_GEM_OP_SET_PLACEMENT
:
738 if (robj
->prime_shared_count
&& (args
->value
& AMDGPU_GEM_DOMAIN_VRAM
)) {
740 amdgpu_bo_unreserve(robj
);
743 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
)) {
745 amdgpu_bo_unreserve(robj
);
748 robj
->prefered_domains
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
749 AMDGPU_GEM_DOMAIN_GTT
|
750 AMDGPU_GEM_DOMAIN_CPU
);
751 robj
->allowed_domains
= robj
->prefered_domains
;
752 if (robj
->allowed_domains
== AMDGPU_GEM_DOMAIN_VRAM
)
753 robj
->allowed_domains
|= AMDGPU_GEM_DOMAIN_GTT
;
755 amdgpu_bo_unreserve(robj
);
758 amdgpu_bo_unreserve(robj
);
763 drm_gem_object_unreference_unlocked(gobj
);
767 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
768 struct drm_device
*dev
,
769 struct drm_mode_create_dumb
*args
)
771 struct amdgpu_device
*adev
= dev
->dev_private
;
772 struct drm_gem_object
*gobj
;
776 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
,
777 DIV_ROUND_UP(args
->bpp
, 8), 0);
778 args
->size
= (u64
)args
->pitch
* args
->height
;
779 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
781 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
782 AMDGPU_GEM_DOMAIN_VRAM
,
783 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
789 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
790 /* drop reference from allocate - handle holds it now */
791 drm_gem_object_unreference_unlocked(gobj
);
795 args
->handle
= handle
;
799 #if defined(CONFIG_DEBUG_FS)
800 static int amdgpu_debugfs_gem_bo_info(int id
, void *ptr
, void *data
)
802 struct drm_gem_object
*gobj
= ptr
;
803 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(gobj
);
804 struct seq_file
*m
= data
;
807 const char *placement
;
810 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
812 case AMDGPU_GEM_DOMAIN_VRAM
:
815 case AMDGPU_GEM_DOMAIN_GTT
:
818 case AMDGPU_GEM_DOMAIN_CPU
:
823 seq_printf(m
, "\t0x%08x: %12ld byte %s @ 0x%010Lx",
824 id
, amdgpu_bo_size(bo
), placement
,
825 amdgpu_bo_gpu_offset(bo
));
827 pin_count
= ACCESS_ONCE(bo
->pin_count
);
829 seq_printf(m
, " pin count %d", pin_count
);
835 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
837 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
838 struct drm_device
*dev
= node
->minor
->dev
;
839 struct drm_file
*file
;
842 r
= mutex_lock_interruptible(&dev
->filelist_mutex
);
846 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
847 struct task_struct
*task
;
850 * Although we have a valid reference on file->pid, that does
851 * not guarantee that the task_struct who called get_pid() is
852 * still alive (e.g. get_pid(current) => fork() => exit()).
853 * Therefore, we need to protect this ->comm access using RCU.
856 task
= pid_task(file
->pid
, PIDTYPE_PID
);
857 seq_printf(m
, "pid %8d command %s:\n", pid_nr(file
->pid
),
858 task
? task
->comm
: "<unknown>");
861 spin_lock(&file
->table_lock
);
862 idr_for_each(&file
->object_idr
, amdgpu_debugfs_gem_bo_info
, m
);
863 spin_unlock(&file
->table_lock
);
866 mutex_unlock(&dev
->filelist_mutex
);
870 static const struct drm_info_list amdgpu_debugfs_gem_list
[] = {
871 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
875 int amdgpu_gem_debugfs_init(struct amdgpu_device
*adev
)
877 #if defined(CONFIG_DEBUG_FS)
878 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);