2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/ktime.h>
29 #include <linux/pagemap.h>
31 #include <drm/amdgpu_drm.h>
34 void amdgpu_gem_object_free(struct drm_gem_object
*gobj
)
36 struct amdgpu_bo
*robj
= gem_to_amdgpu_bo(gobj
);
39 if (robj
->gem_base
.import_attach
)
40 drm_prime_gem_destroy(&robj
->gem_base
, robj
->tbo
.sg
);
41 amdgpu_mn_unregister(robj
);
42 amdgpu_bo_unref(&robj
);
46 int amdgpu_gem_object_create(struct amdgpu_device
*adev
, unsigned long size
,
47 int alignment
, u32 initial_domain
,
48 u64 flags
, bool kernel
,
49 struct reservation_object
*resv
,
50 struct drm_gem_object
**obj
)
56 /* At least align on page size */
57 if (alignment
< PAGE_SIZE
) {
58 alignment
= PAGE_SIZE
;
62 r
= amdgpu_bo_create(adev
, size
, alignment
, kernel
, initial_domain
,
63 flags
, NULL
, resv
, 0, &bo
);
65 if (r
!= -ERESTARTSYS
) {
66 if (flags
& AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
) {
67 flags
&= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
71 if (initial_domain
== AMDGPU_GEM_DOMAIN_VRAM
) {
72 initial_domain
|= AMDGPU_GEM_DOMAIN_GTT
;
75 DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
76 size
, initial_domain
, alignment
, r
);
85 void amdgpu_gem_force_release(struct amdgpu_device
*adev
)
87 struct drm_device
*ddev
= adev
->ddev
;
88 struct drm_file
*file
;
90 mutex_lock(&ddev
->filelist_mutex
);
92 list_for_each_entry(file
, &ddev
->filelist
, lhead
) {
93 struct drm_gem_object
*gobj
;
96 WARN_ONCE(1, "Still active user space clients!\n");
97 spin_lock(&file
->table_lock
);
98 idr_for_each_entry(&file
->object_idr
, gobj
, handle
) {
99 WARN_ONCE(1, "And also active allocations!\n");
100 drm_gem_object_put_unlocked(gobj
);
102 idr_destroy(&file
->object_idr
);
103 spin_unlock(&file
->table_lock
);
106 mutex_unlock(&ddev
->filelist_mutex
);
110 * Call from drm_gem_handle_create which appear in both new and open ioctl
113 int amdgpu_gem_object_open(struct drm_gem_object
*obj
,
114 struct drm_file
*file_priv
)
116 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(obj
);
117 struct amdgpu_device
*adev
= amdgpu_ttm_adev(abo
->tbo
.bdev
);
118 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
119 struct amdgpu_vm
*vm
= &fpriv
->vm
;
120 struct amdgpu_bo_va
*bo_va
;
121 struct mm_struct
*mm
;
124 mm
= amdgpu_ttm_tt_get_usermm(abo
->tbo
.ttm
);
125 if (mm
&& mm
!= current
->mm
)
128 if (abo
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
&&
129 abo
->tbo
.resv
!= vm
->root
.base
.bo
->tbo
.resv
)
132 r
= amdgpu_bo_reserve(abo
, false);
136 bo_va
= amdgpu_vm_bo_find(vm
, abo
);
138 bo_va
= amdgpu_vm_bo_add(adev
, vm
, abo
);
142 amdgpu_bo_unreserve(abo
);
146 void amdgpu_gem_object_close(struct drm_gem_object
*obj
,
147 struct drm_file
*file_priv
)
149 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(obj
);
150 struct amdgpu_device
*adev
= amdgpu_ttm_adev(bo
->tbo
.bdev
);
151 struct amdgpu_fpriv
*fpriv
= file_priv
->driver_priv
;
152 struct amdgpu_vm
*vm
= &fpriv
->vm
;
154 struct amdgpu_bo_list_entry vm_pd
;
155 struct list_head list
, duplicates
;
156 struct ttm_validate_buffer tv
;
157 struct ww_acquire_ctx ticket
;
158 struct amdgpu_bo_va
*bo_va
;
161 INIT_LIST_HEAD(&list
);
162 INIT_LIST_HEAD(&duplicates
);
166 list_add(&tv
.head
, &list
);
168 amdgpu_vm_get_pd_bo(vm
, &list
, &vm_pd
);
170 r
= ttm_eu_reserve_buffers(&ticket
, &list
, false, &duplicates
);
172 dev_err(adev
->dev
, "leaking bo va because "
173 "we fail to reserve bo (%d)\n", r
);
176 bo_va
= amdgpu_vm_bo_find(vm
, bo
);
177 if (bo_va
&& --bo_va
->ref_count
== 0) {
178 amdgpu_vm_bo_rmv(adev
, bo_va
);
180 if (amdgpu_vm_ready(vm
)) {
181 struct dma_fence
*fence
= NULL
;
183 r
= amdgpu_vm_clear_freed(adev
, vm
, &fence
);
185 dev_err(adev
->dev
, "failed to clear page "
186 "tables on GEM object close (%d)\n", r
);
190 amdgpu_bo_fence(bo
, fence
, true);
191 dma_fence_put(fence
);
195 ttm_eu_backoff_reservation(&ticket
, &list
);
201 int amdgpu_gem_create_ioctl(struct drm_device
*dev
, void *data
,
202 struct drm_file
*filp
)
204 struct amdgpu_device
*adev
= dev
->dev_private
;
205 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
206 struct amdgpu_vm
*vm
= &fpriv
->vm
;
207 union drm_amdgpu_gem_create
*args
= data
;
208 uint64_t flags
= args
->in
.domain_flags
;
209 uint64_t size
= args
->in
.bo_size
;
210 struct reservation_object
*resv
= NULL
;
211 struct drm_gem_object
*gobj
;
215 /* reject invalid gem flags */
216 if (flags
& ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
|
217 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
|
218 AMDGPU_GEM_CREATE_CPU_GTT_USWC
|
219 AMDGPU_GEM_CREATE_VRAM_CLEARED
|
220 AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
|
221 AMDGPU_GEM_CREATE_EXPLICIT_SYNC
))
225 /* reject invalid gem domains */
226 if (args
->in
.domains
& ~(AMDGPU_GEM_DOMAIN_CPU
|
227 AMDGPU_GEM_DOMAIN_GTT
|
228 AMDGPU_GEM_DOMAIN_VRAM
|
229 AMDGPU_GEM_DOMAIN_GDS
|
230 AMDGPU_GEM_DOMAIN_GWS
|
231 AMDGPU_GEM_DOMAIN_OA
))
234 /* create a gem object to contain this object in */
235 if (args
->in
.domains
& (AMDGPU_GEM_DOMAIN_GDS
|
236 AMDGPU_GEM_DOMAIN_GWS
| AMDGPU_GEM_DOMAIN_OA
)) {
237 flags
|= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
238 if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GDS
)
239 size
= size
<< AMDGPU_GDS_SHIFT
;
240 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_GWS
)
241 size
= size
<< AMDGPU_GWS_SHIFT
;
242 else if (args
->in
.domains
== AMDGPU_GEM_DOMAIN_OA
)
243 size
= size
<< AMDGPU_OA_SHIFT
;
247 size
= roundup(size
, PAGE_SIZE
);
249 if (flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
) {
250 r
= amdgpu_bo_reserve(vm
->root
.base
.bo
, false);
254 resv
= vm
->root
.base
.bo
->tbo
.resv
;
257 r
= amdgpu_gem_object_create(adev
, size
, args
->in
.alignment
,
258 (u32
)(0xffffffff & args
->in
.domains
),
259 flags
, false, resv
, &gobj
);
260 if (flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
) {
262 struct amdgpu_bo
*abo
= gem_to_amdgpu_bo(gobj
);
264 abo
->parent
= amdgpu_bo_ref(vm
->root
.base
.bo
);
266 amdgpu_bo_unreserve(vm
->root
.base
.bo
);
271 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
272 /* drop reference from allocate - handle holds it now */
273 drm_gem_object_put_unlocked(gobj
);
277 memset(args
, 0, sizeof(*args
));
278 args
->out
.handle
= handle
;
282 int amdgpu_gem_userptr_ioctl(struct drm_device
*dev
, void *data
,
283 struct drm_file
*filp
)
285 struct ttm_operation_ctx ctx
= { true, false };
286 struct amdgpu_device
*adev
= dev
->dev_private
;
287 struct drm_amdgpu_gem_userptr
*args
= data
;
288 struct drm_gem_object
*gobj
;
289 struct amdgpu_bo
*bo
;
293 if (offset_in_page(args
->addr
| args
->size
))
296 /* reject unknown flag values */
297 if (args
->flags
& ~(AMDGPU_GEM_USERPTR_READONLY
|
298 AMDGPU_GEM_USERPTR_ANONONLY
| AMDGPU_GEM_USERPTR_VALIDATE
|
299 AMDGPU_GEM_USERPTR_REGISTER
))
302 if (!(args
->flags
& AMDGPU_GEM_USERPTR_READONLY
) &&
303 !(args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
)) {
305 /* if we want to write to it we must install a MMU notifier */
309 /* create a gem object to contain this object in */
310 r
= amdgpu_gem_object_create(adev
, args
->size
, 0, AMDGPU_GEM_DOMAIN_CPU
,
315 bo
= gem_to_amdgpu_bo(gobj
);
316 bo
->preferred_domains
= AMDGPU_GEM_DOMAIN_GTT
;
317 bo
->allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
318 r
= amdgpu_ttm_tt_set_userptr(bo
->tbo
.ttm
, args
->addr
, args
->flags
);
322 if (args
->flags
& AMDGPU_GEM_USERPTR_REGISTER
) {
323 r
= amdgpu_mn_register(bo
, args
->addr
);
328 if (args
->flags
& AMDGPU_GEM_USERPTR_VALIDATE
) {
329 r
= amdgpu_ttm_tt_get_user_pages(bo
->tbo
.ttm
,
334 r
= amdgpu_bo_reserve(bo
, true);
338 amdgpu_ttm_placement_from_domain(bo
, AMDGPU_GEM_DOMAIN_GTT
);
339 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, &ctx
);
340 amdgpu_bo_unreserve(bo
);
345 r
= drm_gem_handle_create(filp
, gobj
, &handle
);
346 /* drop reference from allocate - handle holds it now */
347 drm_gem_object_put_unlocked(gobj
);
351 args
->handle
= handle
;
355 release_pages(bo
->tbo
.ttm
->pages
, bo
->tbo
.ttm
->num_pages
);
358 drm_gem_object_put_unlocked(gobj
);
363 int amdgpu_mode_dumb_mmap(struct drm_file
*filp
,
364 struct drm_device
*dev
,
365 uint32_t handle
, uint64_t *offset_p
)
367 struct drm_gem_object
*gobj
;
368 struct amdgpu_bo
*robj
;
370 gobj
= drm_gem_object_lookup(filp
, handle
);
374 robj
= gem_to_amdgpu_bo(gobj
);
375 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
) ||
376 (robj
->flags
& AMDGPU_GEM_CREATE_NO_CPU_ACCESS
)) {
377 drm_gem_object_put_unlocked(gobj
);
380 *offset_p
= amdgpu_bo_mmap_offset(robj
);
381 drm_gem_object_put_unlocked(gobj
);
385 int amdgpu_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
386 struct drm_file
*filp
)
388 union drm_amdgpu_gem_mmap
*args
= data
;
389 uint32_t handle
= args
->in
.handle
;
390 memset(args
, 0, sizeof(*args
));
391 return amdgpu_mode_dumb_mmap(filp
, dev
, handle
, &args
->out
.addr_ptr
);
395 * amdgpu_gem_timeout - calculate jiffies timeout from absolute value
397 * @timeout_ns: timeout in ns
399 * Calculate the timeout in jiffies from an absolute timeout in ns.
401 unsigned long amdgpu_gem_timeout(uint64_t timeout_ns
)
403 unsigned long timeout_jiffies
;
406 /* clamp timeout if it's to large */
407 if (((int64_t)timeout_ns
) < 0)
408 return MAX_SCHEDULE_TIMEOUT
;
410 timeout
= ktime_sub(ns_to_ktime(timeout_ns
), ktime_get());
411 if (ktime_to_ns(timeout
) < 0)
414 timeout_jiffies
= nsecs_to_jiffies(ktime_to_ns(timeout
));
415 /* clamp timeout to avoid unsigned-> signed overflow */
416 if (timeout_jiffies
> MAX_SCHEDULE_TIMEOUT
)
417 return MAX_SCHEDULE_TIMEOUT
- 1;
419 return timeout_jiffies
;
422 int amdgpu_gem_wait_idle_ioctl(struct drm_device
*dev
, void *data
,
423 struct drm_file
*filp
)
425 union drm_amdgpu_gem_wait_idle
*args
= data
;
426 struct drm_gem_object
*gobj
;
427 struct amdgpu_bo
*robj
;
428 uint32_t handle
= args
->in
.handle
;
429 unsigned long timeout
= amdgpu_gem_timeout(args
->in
.timeout
);
433 gobj
= drm_gem_object_lookup(filp
, handle
);
437 robj
= gem_to_amdgpu_bo(gobj
);
438 ret
= reservation_object_wait_timeout_rcu(robj
->tbo
.resv
, true, true,
441 /* ret == 0 means not signaled,
442 * ret > 0 means signaled
443 * ret < 0 means interrupted before timeout
446 memset(args
, 0, sizeof(*args
));
447 args
->out
.status
= (ret
== 0);
451 drm_gem_object_put_unlocked(gobj
);
455 int amdgpu_gem_metadata_ioctl(struct drm_device
*dev
, void *data
,
456 struct drm_file
*filp
)
458 struct drm_amdgpu_gem_metadata
*args
= data
;
459 struct drm_gem_object
*gobj
;
460 struct amdgpu_bo
*robj
;
463 DRM_DEBUG("%d \n", args
->handle
);
464 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
467 robj
= gem_to_amdgpu_bo(gobj
);
469 r
= amdgpu_bo_reserve(robj
, false);
470 if (unlikely(r
!= 0))
473 if (args
->op
== AMDGPU_GEM_METADATA_OP_GET_METADATA
) {
474 amdgpu_bo_get_tiling_flags(robj
, &args
->data
.tiling_info
);
475 r
= amdgpu_bo_get_metadata(robj
, args
->data
.data
,
476 sizeof(args
->data
.data
),
477 &args
->data
.data_size_bytes
,
479 } else if (args
->op
== AMDGPU_GEM_METADATA_OP_SET_METADATA
) {
480 if (args
->data
.data_size_bytes
> sizeof(args
->data
.data
)) {
484 r
= amdgpu_bo_set_tiling_flags(robj
, args
->data
.tiling_info
);
486 r
= amdgpu_bo_set_metadata(robj
, args
->data
.data
,
487 args
->data
.data_size_bytes
,
492 amdgpu_bo_unreserve(robj
);
494 drm_gem_object_put_unlocked(gobj
);
499 * amdgpu_gem_va_update_vm -update the bo_va in its VM
501 * @adev: amdgpu_device pointer
503 * @bo_va: bo_va to update
504 * @list: validation list
505 * @operation: map, unmap or clear
507 * Update the bo_va directly after setting its address. Errors are not
508 * vital here, so they are not reported back to userspace.
510 static void amdgpu_gem_va_update_vm(struct amdgpu_device
*adev
,
511 struct amdgpu_vm
*vm
,
512 struct amdgpu_bo_va
*bo_va
,
513 struct list_head
*list
,
518 if (!amdgpu_vm_ready(vm
))
521 r
= amdgpu_vm_clear_freed(adev
, vm
, NULL
);
525 if (operation
== AMDGPU_VA_OP_MAP
||
526 operation
== AMDGPU_VA_OP_REPLACE
)
527 r
= amdgpu_vm_bo_update(adev
, bo_va
, false);
529 r
= amdgpu_vm_update_directories(adev
, vm
);
534 if (r
&& r
!= -ERESTARTSYS
)
535 DRM_ERROR("Couldn't update BO_VA (%d)\n", r
);
538 int amdgpu_gem_va_ioctl(struct drm_device
*dev
, void *data
,
539 struct drm_file
*filp
)
541 const uint32_t valid_flags
= AMDGPU_VM_DELAY_UPDATE
|
542 AMDGPU_VM_PAGE_READABLE
| AMDGPU_VM_PAGE_WRITEABLE
|
543 AMDGPU_VM_PAGE_EXECUTABLE
| AMDGPU_VM_MTYPE_MASK
;
544 const uint32_t prt_flags
= AMDGPU_VM_DELAY_UPDATE
|
547 struct drm_amdgpu_gem_va
*args
= data
;
548 struct drm_gem_object
*gobj
;
549 struct amdgpu_device
*adev
= dev
->dev_private
;
550 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
551 struct amdgpu_bo
*abo
;
552 struct amdgpu_bo_va
*bo_va
;
553 struct amdgpu_bo_list_entry vm_pd
;
554 struct ttm_validate_buffer tv
;
555 struct ww_acquire_ctx ticket
;
556 struct list_head list
, duplicates
;
560 if (args
->va_address
< AMDGPU_VA_RESERVED_SIZE
) {
561 dev_dbg(&dev
->pdev
->dev
,
562 "va_address 0x%LX is in reserved area 0x%LX\n",
563 args
->va_address
, AMDGPU_VA_RESERVED_SIZE
);
567 if (args
->va_address
>= AMDGPU_VA_HOLE_START
&&
568 args
->va_address
< AMDGPU_VA_HOLE_END
) {
569 dev_dbg(&dev
->pdev
->dev
,
570 "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
571 args
->va_address
, AMDGPU_VA_HOLE_START
,
576 args
->va_address
&= AMDGPU_VA_HOLE_MASK
;
578 if ((args
->flags
& ~valid_flags
) && (args
->flags
& ~prt_flags
)) {
579 dev_dbg(&dev
->pdev
->dev
, "invalid flags combination 0x%08X\n",
584 switch (args
->operation
) {
585 case AMDGPU_VA_OP_MAP
:
586 case AMDGPU_VA_OP_UNMAP
:
587 case AMDGPU_VA_OP_CLEAR
:
588 case AMDGPU_VA_OP_REPLACE
:
591 dev_dbg(&dev
->pdev
->dev
, "unsupported operation %d\n",
596 INIT_LIST_HEAD(&list
);
597 INIT_LIST_HEAD(&duplicates
);
598 if ((args
->operation
!= AMDGPU_VA_OP_CLEAR
) &&
599 !(args
->flags
& AMDGPU_VM_PAGE_PRT
)) {
600 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
603 abo
= gem_to_amdgpu_bo(gobj
);
606 list_add(&tv
.head
, &list
);
612 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &list
, &vm_pd
);
614 r
= ttm_eu_reserve_buffers(&ticket
, &list
, true, &duplicates
);
619 bo_va
= amdgpu_vm_bo_find(&fpriv
->vm
, abo
);
624 } else if (args
->operation
!= AMDGPU_VA_OP_CLEAR
) {
625 bo_va
= fpriv
->prt_va
;
630 switch (args
->operation
) {
631 case AMDGPU_VA_OP_MAP
:
632 r
= amdgpu_vm_alloc_pts(adev
, bo_va
->base
.vm
, args
->va_address
,
637 va_flags
= amdgpu_vm_get_pte_flags(adev
, args
->flags
);
638 r
= amdgpu_vm_bo_map(adev
, bo_va
, args
->va_address
,
639 args
->offset_in_bo
, args
->map_size
,
642 case AMDGPU_VA_OP_UNMAP
:
643 r
= amdgpu_vm_bo_unmap(adev
, bo_va
, args
->va_address
);
646 case AMDGPU_VA_OP_CLEAR
:
647 r
= amdgpu_vm_bo_clear_mappings(adev
, &fpriv
->vm
,
651 case AMDGPU_VA_OP_REPLACE
:
652 r
= amdgpu_vm_alloc_pts(adev
, bo_va
->base
.vm
, args
->va_address
,
657 va_flags
= amdgpu_vm_get_pte_flags(adev
, args
->flags
);
658 r
= amdgpu_vm_bo_replace_map(adev
, bo_va
, args
->va_address
,
659 args
->offset_in_bo
, args
->map_size
,
665 if (!r
&& !(args
->flags
& AMDGPU_VM_DELAY_UPDATE
) && !amdgpu_vm_debug
)
666 amdgpu_gem_va_update_vm(adev
, &fpriv
->vm
, bo_va
, &list
,
670 ttm_eu_backoff_reservation(&ticket
, &list
);
673 drm_gem_object_put_unlocked(gobj
);
677 int amdgpu_gem_op_ioctl(struct drm_device
*dev
, void *data
,
678 struct drm_file
*filp
)
680 struct amdgpu_device
*adev
= dev
->dev_private
;
681 struct drm_amdgpu_gem_op
*args
= data
;
682 struct drm_gem_object
*gobj
;
683 struct amdgpu_bo
*robj
;
686 gobj
= drm_gem_object_lookup(filp
, args
->handle
);
690 robj
= gem_to_amdgpu_bo(gobj
);
692 r
= amdgpu_bo_reserve(robj
, false);
697 case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO
: {
698 struct drm_amdgpu_gem_create_in info
;
699 void __user
*out
= u64_to_user_ptr(args
->value
);
701 info
.bo_size
= robj
->gem_base
.size
;
702 info
.alignment
= robj
->tbo
.mem
.page_alignment
<< PAGE_SHIFT
;
703 info
.domains
= robj
->preferred_domains
;
704 info
.domain_flags
= robj
->flags
;
705 amdgpu_bo_unreserve(robj
);
706 if (copy_to_user(out
, &info
, sizeof(info
)))
710 case AMDGPU_GEM_OP_SET_PLACEMENT
:
711 if (robj
->prime_shared_count
&& (args
->value
& AMDGPU_GEM_DOMAIN_VRAM
)) {
713 amdgpu_bo_unreserve(robj
);
716 if (amdgpu_ttm_tt_get_usermm(robj
->tbo
.ttm
)) {
718 amdgpu_bo_unreserve(robj
);
721 robj
->preferred_domains
= args
->value
& (AMDGPU_GEM_DOMAIN_VRAM
|
722 AMDGPU_GEM_DOMAIN_GTT
|
723 AMDGPU_GEM_DOMAIN_CPU
);
724 robj
->allowed_domains
= robj
->preferred_domains
;
725 if (robj
->allowed_domains
== AMDGPU_GEM_DOMAIN_VRAM
)
726 robj
->allowed_domains
|= AMDGPU_GEM_DOMAIN_GTT
;
728 if (robj
->flags
& AMDGPU_GEM_CREATE_VM_ALWAYS_VALID
)
729 amdgpu_vm_bo_invalidate(adev
, robj
, true);
731 amdgpu_bo_unreserve(robj
);
734 amdgpu_bo_unreserve(robj
);
739 drm_gem_object_put_unlocked(gobj
);
743 int amdgpu_mode_dumb_create(struct drm_file
*file_priv
,
744 struct drm_device
*dev
,
745 struct drm_mode_create_dumb
*args
)
747 struct amdgpu_device
*adev
= dev
->dev_private
;
748 struct drm_gem_object
*gobj
;
752 args
->pitch
= amdgpu_align_pitch(adev
, args
->width
,
753 DIV_ROUND_UP(args
->bpp
, 8), 0);
754 args
->size
= (u64
)args
->pitch
* args
->height
;
755 args
->size
= ALIGN(args
->size
, PAGE_SIZE
);
757 r
= amdgpu_gem_object_create(adev
, args
->size
, 0,
758 AMDGPU_GEM_DOMAIN_VRAM
,
759 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
,
764 r
= drm_gem_handle_create(file_priv
, gobj
, &handle
);
765 /* drop reference from allocate - handle holds it now */
766 drm_gem_object_put_unlocked(gobj
);
770 args
->handle
= handle
;
774 #if defined(CONFIG_DEBUG_FS)
775 static int amdgpu_debugfs_gem_bo_info(int id
, void *ptr
, void *data
)
777 struct drm_gem_object
*gobj
= ptr
;
778 struct amdgpu_bo
*bo
= gem_to_amdgpu_bo(gobj
);
779 struct seq_file
*m
= data
;
782 const char *placement
;
786 domain
= amdgpu_mem_type_to_domain(bo
->tbo
.mem
.mem_type
);
788 case AMDGPU_GEM_DOMAIN_VRAM
:
791 case AMDGPU_GEM_DOMAIN_GTT
:
794 case AMDGPU_GEM_DOMAIN_CPU
:
799 seq_printf(m
, "\t0x%08x: %12ld byte %s",
800 id
, amdgpu_bo_size(bo
), placement
);
802 offset
= READ_ONCE(bo
->tbo
.mem
.start
);
803 if (offset
!= AMDGPU_BO_INVALID_OFFSET
)
804 seq_printf(m
, " @ 0x%010Lx", offset
);
806 pin_count
= READ_ONCE(bo
->pin_count
);
808 seq_printf(m
, " pin count %d", pin_count
);
814 static int amdgpu_debugfs_gem_info(struct seq_file
*m
, void *data
)
816 struct drm_info_node
*node
= (struct drm_info_node
*)m
->private;
817 struct drm_device
*dev
= node
->minor
->dev
;
818 struct drm_file
*file
;
821 r
= mutex_lock_interruptible(&dev
->filelist_mutex
);
825 list_for_each_entry(file
, &dev
->filelist
, lhead
) {
826 struct task_struct
*task
;
829 * Although we have a valid reference on file->pid, that does
830 * not guarantee that the task_struct who called get_pid() is
831 * still alive (e.g. get_pid(current) => fork() => exit()).
832 * Therefore, we need to protect this ->comm access using RCU.
835 task
= pid_task(file
->pid
, PIDTYPE_PID
);
836 seq_printf(m
, "pid %8d command %s:\n", pid_nr(file
->pid
),
837 task
? task
->comm
: "<unknown>");
840 spin_lock(&file
->table_lock
);
841 idr_for_each(&file
->object_idr
, amdgpu_debugfs_gem_bo_info
, m
);
842 spin_unlock(&file
->table_lock
);
845 mutex_unlock(&dev
->filelist_mutex
);
849 static const struct drm_info_list amdgpu_debugfs_gem_list
[] = {
850 {"amdgpu_gem_info", &amdgpu_debugfs_gem_info
, 0, NULL
},
854 int amdgpu_debugfs_gem_init(struct amdgpu_device
*adev
)
856 #if defined(CONFIG_DEBUG_FS)
857 return amdgpu_debugfs_add_files(adev
, amdgpu_debugfs_gem_list
, 1);