1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <drm/ttm/ttm_placement.h>
31 #include "vmwgfx_drv.h"
32 #include "ttm_object.h"
36 * struct vmw_user_buffer_object - User-space-visible buffer object
38 * @prime: The prime object providing user visibility.
39 * @vbo: The struct vmw_buffer_object
41 struct vmw_user_buffer_object
{
42 struct ttm_prime_object prime
;
43 struct vmw_buffer_object vbo
;
48 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
51 * @bo: Pointer to the TTM buffer object.
52 * Return: Pointer to the struct vmw_buffer_object embedding the
55 static struct vmw_buffer_object
*
56 vmw_buffer_object(struct ttm_buffer_object
*bo
)
58 return container_of(bo
, struct vmw_buffer_object
, base
);
63 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
64 * vmw_user_buffer_object.
66 * @bo: Pointer to the TTM buffer object.
67 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
70 static struct vmw_user_buffer_object
*
71 vmw_user_buffer_object(struct ttm_buffer_object
*bo
)
73 struct vmw_buffer_object
*vmw_bo
= vmw_buffer_object(bo
);
75 return container_of(vmw_bo
, struct vmw_user_buffer_object
, vbo
);
80 * vmw_bo_pin_in_placement - Validate a buffer to placement.
82 * @dev_priv: Driver private.
83 * @buf: DMA buffer to move.
84 * @placement: The placement to pin it.
85 * @interruptible: Use interruptible wait.
86 * Return: Zero on success, Negative error code on failure. In particular
87 * -ERESTARTSYS if interrupted by a signal
89 int vmw_bo_pin_in_placement(struct vmw_private
*dev_priv
,
90 struct vmw_buffer_object
*buf
,
91 struct ttm_placement
*placement
,
94 struct ttm_operation_ctx ctx
= {interruptible
, false };
95 struct ttm_buffer_object
*bo
= &buf
->base
;
99 ret
= ttm_write_lock(&dev_priv
->reservation_sem
, interruptible
);
100 if (unlikely(ret
!= 0))
103 vmw_execbuf_release_pinned_bo(dev_priv
);
105 ret
= ttm_bo_reserve(bo
, interruptible
, false, NULL
);
106 if (unlikely(ret
!= 0))
109 if (buf
->pin_count
> 0)
110 ret
= ttm_bo_mem_compat(placement
, &bo
->mem
,
111 &new_flags
) == true ? 0 : -EINVAL
;
113 ret
= ttm_bo_validate(bo
, placement
, &ctx
);
116 vmw_bo_pin_reserved(buf
, true);
118 ttm_bo_unreserve(bo
);
121 ttm_write_unlock(&dev_priv
->reservation_sem
);
127 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
129 * This function takes the reservation_sem in write mode.
130 * Flushes and unpins the query bo to avoid failures.
132 * @dev_priv: Driver private.
133 * @buf: DMA buffer to move.
134 * @pin: Pin buffer if true.
135 * @interruptible: Use interruptible wait.
136 * Return: Zero on success, Negative error code on failure. In particular
137 * -ERESTARTSYS if interrupted by a signal
139 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private
*dev_priv
,
140 struct vmw_buffer_object
*buf
,
143 struct ttm_operation_ctx ctx
= {interruptible
, false };
144 struct ttm_buffer_object
*bo
= &buf
->base
;
148 ret
= ttm_write_lock(&dev_priv
->reservation_sem
, interruptible
);
149 if (unlikely(ret
!= 0))
152 vmw_execbuf_release_pinned_bo(dev_priv
);
154 ret
= ttm_bo_reserve(bo
, interruptible
, false, NULL
);
155 if (unlikely(ret
!= 0))
158 if (buf
->pin_count
> 0) {
159 ret
= ttm_bo_mem_compat(&vmw_vram_gmr_placement
, &bo
->mem
,
160 &new_flags
) == true ? 0 : -EINVAL
;
164 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, &ctx
);
165 if (likely(ret
== 0) || ret
== -ERESTARTSYS
)
168 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, &ctx
);
172 vmw_bo_pin_reserved(buf
, true);
174 ttm_bo_unreserve(bo
);
176 ttm_write_unlock(&dev_priv
->reservation_sem
);
182 * vmw_bo_pin_in_vram - Move a buffer to vram.
184 * This function takes the reservation_sem in write mode.
185 * Flushes and unpins the query bo to avoid failures.
187 * @dev_priv: Driver private.
188 * @buf: DMA buffer to move.
189 * @interruptible: Use interruptible wait.
190 * Return: Zero on success, Negative error code on failure. In particular
191 * -ERESTARTSYS if interrupted by a signal
193 int vmw_bo_pin_in_vram(struct vmw_private
*dev_priv
,
194 struct vmw_buffer_object
*buf
,
197 return vmw_bo_pin_in_placement(dev_priv
, buf
, &vmw_vram_placement
,
203 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
205 * This function takes the reservation_sem in write mode.
206 * Flushes and unpins the query bo to avoid failures.
208 * @dev_priv: Driver private.
209 * @buf: DMA buffer to pin.
210 * @interruptible: Use interruptible wait.
211 * Return: Zero on success, Negative error code on failure. In particular
212 * -ERESTARTSYS if interrupted by a signal
214 int vmw_bo_pin_in_start_of_vram(struct vmw_private
*dev_priv
,
215 struct vmw_buffer_object
*buf
,
218 struct ttm_operation_ctx ctx
= {interruptible
, false };
219 struct ttm_buffer_object
*bo
= &buf
->base
;
220 struct ttm_placement placement
;
221 struct ttm_place place
;
225 place
= vmw_vram_placement
.placement
[0];
226 place
.lpfn
= bo
->num_pages
;
227 placement
.num_placement
= 1;
228 placement
.placement
= &place
;
229 placement
.num_busy_placement
= 1;
230 placement
.busy_placement
= &place
;
232 ret
= ttm_write_lock(&dev_priv
->reservation_sem
, interruptible
);
233 if (unlikely(ret
!= 0))
236 vmw_execbuf_release_pinned_bo(dev_priv
);
237 ret
= ttm_bo_reserve(bo
, interruptible
, false, NULL
);
238 if (unlikely(ret
!= 0))
242 * Is this buffer already in vram but not at the start of it?
243 * In that case, evict it first because TTM isn't good at handling
246 if (bo
->mem
.mem_type
== TTM_PL_VRAM
&&
247 bo
->mem
.start
< bo
->num_pages
&&
249 buf
->pin_count
== 0) {
250 ctx
.interruptible
= false;
251 (void) ttm_bo_validate(bo
, &vmw_sys_placement
, &ctx
);
254 if (buf
->pin_count
> 0)
255 ret
= ttm_bo_mem_compat(&placement
, &bo
->mem
,
256 &new_flags
) == true ? 0 : -EINVAL
;
258 ret
= ttm_bo_validate(bo
, &placement
, &ctx
);
260 /* For some reason we didn't end up at the start of vram */
261 WARN_ON(ret
== 0 && bo
->offset
!= 0);
263 vmw_bo_pin_reserved(buf
, true);
265 ttm_bo_unreserve(bo
);
267 ttm_write_unlock(&dev_priv
->reservation_sem
);
274 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
276 * This function takes the reservation_sem in write mode.
278 * @dev_priv: Driver private.
279 * @buf: DMA buffer to unpin.
280 * @interruptible: Use interruptible wait.
281 * Return: Zero on success, Negative error code on failure. In particular
282 * -ERESTARTSYS if interrupted by a signal
284 int vmw_bo_unpin(struct vmw_private
*dev_priv
,
285 struct vmw_buffer_object
*buf
,
288 struct ttm_buffer_object
*bo
= &buf
->base
;
291 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, interruptible
);
292 if (unlikely(ret
!= 0))
295 ret
= ttm_bo_reserve(bo
, interruptible
, false, NULL
);
296 if (unlikely(ret
!= 0))
299 vmw_bo_pin_reserved(buf
, false);
301 ttm_bo_unreserve(bo
);
304 ttm_read_unlock(&dev_priv
->reservation_sem
);
309 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
312 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
313 * @ptr: SVGAGuestPtr returning the result.
315 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object
*bo
,
318 if (bo
->mem
.mem_type
== TTM_PL_VRAM
) {
319 ptr
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
320 ptr
->offset
= bo
->offset
;
322 ptr
->gmrId
= bo
->mem
.start
;
329 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
331 * @vbo: The buffer object. Must be reserved.
332 * @pin: Whether to pin or unpin.
335 void vmw_bo_pin_reserved(struct vmw_buffer_object
*vbo
, bool pin
)
337 struct ttm_operation_ctx ctx
= { false, true };
339 struct ttm_placement placement
;
340 struct ttm_buffer_object
*bo
= &vbo
->base
;
341 uint32_t old_mem_type
= bo
->mem
.mem_type
;
344 dma_resv_assert_held(bo
->base
.resv
);
347 if (vbo
->pin_count
++ > 0)
350 WARN_ON(vbo
->pin_count
<= 0);
351 if (--vbo
->pin_count
> 0)
357 pl
.flags
= TTM_PL_FLAG_VRAM
| VMW_PL_FLAG_GMR
| VMW_PL_FLAG_MOB
358 | TTM_PL_FLAG_SYSTEM
| TTM_PL_FLAG_CACHED
;
360 pl
.flags
|= TTM_PL_FLAG_NO_EVICT
;
362 memset(&placement
, 0, sizeof(placement
));
363 placement
.num_placement
= 1;
364 placement
.placement
= &pl
;
366 ret
= ttm_bo_validate(bo
, &placement
, &ctx
);
368 BUG_ON(ret
!= 0 || bo
->mem
.mem_type
!= old_mem_type
);
373 * vmw_bo_map_and_cache - Map a buffer object and cache the map
375 * @vbo: The buffer object to map
376 * Return: A kernel virtual address or NULL if mapping failed.
378 * This function maps a buffer object into the kernel address space, or
379 * returns the virtual kernel address of an already existing map. The virtual
380 * address remains valid as long as the buffer object is pinned or reserved.
381 * The cached map is torn down on either
382 * 1) Buffer object move
383 * 2) Buffer object swapout
384 * 3) Buffer object destruction
387 void *vmw_bo_map_and_cache(struct vmw_buffer_object
*vbo
)
389 struct ttm_buffer_object
*bo
= &vbo
->base
;
394 virtual = ttm_kmap_obj_virtual(&vbo
->map
, ¬_used
);
398 ret
= ttm_bo_kmap(bo
, 0, bo
->num_pages
, &vbo
->map
);
400 DRM_ERROR("Buffer object map failed: %d.\n", ret
);
402 return ttm_kmap_obj_virtual(&vbo
->map
, ¬_used
);
407 * vmw_bo_unmap - Tear down a cached buffer object map.
409 * @vbo: The buffer object whose map we are tearing down.
411 * This function tears down a cached map set up using
412 * vmw_buffer_object_map_and_cache().
414 void vmw_bo_unmap(struct vmw_buffer_object
*vbo
)
416 if (vbo
->map
.bo
== NULL
)
419 ttm_bo_kunmap(&vbo
->map
);
424 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
426 * @dev_priv: Pointer to a struct vmw_private identifying the device.
427 * @size: The requested buffer size.
428 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
430 static size_t vmw_bo_acc_size(struct vmw_private
*dev_priv
, size_t size
,
433 static size_t struct_size
, user_struct_size
;
434 size_t num_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
435 size_t page_array_size
= ttm_round_pot(num_pages
* sizeof(void *));
437 if (unlikely(struct_size
== 0)) {
438 size_t backend_size
= ttm_round_pot(vmw_tt_size
);
440 struct_size
= backend_size
+
441 ttm_round_pot(sizeof(struct vmw_buffer_object
));
442 user_struct_size
= backend_size
+
443 ttm_round_pot(sizeof(struct vmw_user_buffer_object
)) +
447 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
449 ttm_round_pot(num_pages
* sizeof(dma_addr_t
));
451 return ((user
) ? user_struct_size
: struct_size
) +
457 * vmw_bo_bo_free - vmw buffer object destructor
459 * @bo: Pointer to the embedded struct ttm_buffer_object
461 void vmw_bo_bo_free(struct ttm_buffer_object
*bo
)
463 struct vmw_buffer_object
*vmw_bo
= vmw_buffer_object(bo
);
465 WARN_ON(vmw_bo
->dirty
);
466 WARN_ON(!RB_EMPTY_ROOT(&vmw_bo
->res_tree
));
467 vmw_bo_unmap(vmw_bo
);
473 * vmw_user_bo_destroy - vmw buffer object destructor
475 * @bo: Pointer to the embedded struct ttm_buffer_object
477 static void vmw_user_bo_destroy(struct ttm_buffer_object
*bo
)
479 struct vmw_user_buffer_object
*vmw_user_bo
= vmw_user_buffer_object(bo
);
480 struct vmw_buffer_object
*vbo
= &vmw_user_bo
->vbo
;
483 WARN_ON(!RB_EMPTY_ROOT(&vbo
->res_tree
));
485 ttm_prime_object_kfree(vmw_user_bo
, prime
);
490 * vmw_bo_init - Initialize a vmw buffer object
492 * @dev_priv: Pointer to the device private struct
493 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
494 * @size: Buffer object size in bytes.
495 * @placement: Initial placement.
496 * @interruptible: Whether waits should be performed interruptible.
497 * @bo_free: The buffer object destructor.
498 * Returns: Zero on success, negative error code on error.
500 * Note that on error, the code will free the buffer object.
502 int vmw_bo_init(struct vmw_private
*dev_priv
,
503 struct vmw_buffer_object
*vmw_bo
,
504 size_t size
, struct ttm_placement
*placement
,
506 void (*bo_free
)(struct ttm_buffer_object
*bo
))
508 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
511 bool user
= (bo_free
== &vmw_user_bo_destroy
);
513 WARN_ON_ONCE(!bo_free
&& (!user
&& (bo_free
!= vmw_bo_bo_free
)));
515 acc_size
= vmw_bo_acc_size(dev_priv
, size
, user
);
516 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
517 BUILD_BUG_ON(TTM_MAX_BO_PRIORITY
<= 3);
518 vmw_bo
->base
.priority
= 3;
519 vmw_bo
->res_tree
= RB_ROOT
;
521 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
522 ttm_bo_type_device
, placement
,
523 0, interruptible
, acc_size
,
524 NULL
, NULL
, bo_free
);
530 * vmw_user_bo_release - TTM reference base object release callback for
531 * vmw user buffer objects
533 * @p_base: The TTM base object pointer about to be unreferenced.
535 * Clears the TTM base object pointer and drops the reference the
536 * base object has on the underlying struct vmw_buffer_object.
538 static void vmw_user_bo_release(struct ttm_base_object
**p_base
)
540 struct vmw_user_buffer_object
*vmw_user_bo
;
541 struct ttm_base_object
*base
= *p_base
;
545 if (unlikely(base
== NULL
))
548 vmw_user_bo
= container_of(base
, struct vmw_user_buffer_object
,
550 ttm_bo_put(&vmw_user_bo
->vbo
.base
);
555 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
556 * for vmw user buffer objects
558 * @base: Pointer to the TTM base object
559 * @ref_type: Reference type of the reference reaching zero.
561 * Called when user-space drops its last synccpu reference on the buffer
562 * object, Either explicitly or as part of a cleanup file close.
564 static void vmw_user_bo_ref_obj_release(struct ttm_base_object
*base
,
565 enum ttm_ref_type ref_type
)
567 struct vmw_user_buffer_object
*user_bo
;
569 user_bo
= container_of(base
, struct vmw_user_buffer_object
, prime
.base
);
572 case TTM_REF_SYNCCPU_WRITE
:
573 atomic_dec(&user_bo
->vbo
.cpu_writers
);
576 WARN_ONCE(true, "Undefined buffer object reference release.\n");
582 * vmw_user_bo_alloc - Allocate a user buffer object
584 * @dev_priv: Pointer to a struct device private.
585 * @tfile: Pointer to a struct ttm_object_file on which to register the user
587 * @size: Size of the buffer object.
588 * @shareable: Boolean whether the buffer is shareable with other open files.
589 * @handle: Pointer to where the handle value should be assigned.
590 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
591 * should be assigned.
592 * Return: Zero on success, negative error code on error.
594 int vmw_user_bo_alloc(struct vmw_private
*dev_priv
,
595 struct ttm_object_file
*tfile
,
599 struct vmw_buffer_object
**p_vbo
,
600 struct ttm_base_object
**p_base
)
602 struct vmw_user_buffer_object
*user_bo
;
605 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
606 if (unlikely(!user_bo
)) {
607 DRM_ERROR("Failed to allocate a buffer.\n");
611 ret
= vmw_bo_init(dev_priv
, &user_bo
->vbo
, size
,
612 (dev_priv
->has_mob
) ?
614 &vmw_vram_sys_placement
, true,
615 &vmw_user_bo_destroy
);
616 if (unlikely(ret
!= 0))
619 ttm_bo_get(&user_bo
->vbo
.base
);
620 ret
= ttm_prime_object_init(tfile
,
625 &vmw_user_bo_release
,
626 &vmw_user_bo_ref_obj_release
);
627 if (unlikely(ret
!= 0)) {
628 ttm_bo_put(&user_bo
->vbo
.base
);
629 goto out_no_base_object
;
632 *p_vbo
= &user_bo
->vbo
;
634 *p_base
= &user_bo
->prime
.base
;
635 kref_get(&(*p_base
)->refcount
);
637 *handle
= user_bo
->prime
.base
.handle
;
645 * vmw_user_bo_verify_access - verify access permissions on this
648 * @bo: Pointer to the buffer object being accessed
649 * @tfile: Identifying the caller.
651 int vmw_user_bo_verify_access(struct ttm_buffer_object
*bo
,
652 struct ttm_object_file
*tfile
)
654 struct vmw_user_buffer_object
*vmw_user_bo
;
656 if (unlikely(bo
->destroy
!= vmw_user_bo_destroy
))
659 vmw_user_bo
= vmw_user_buffer_object(bo
);
661 /* Check that the caller has opened the object. */
662 if (likely(ttm_ref_object_exists(tfile
, &vmw_user_bo
->prime
.base
)))
665 DRM_ERROR("Could not grant buffer access.\n");
671 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
672 * access, idling previous GPU operations on the buffer and optionally
673 * blocking it for further command submissions.
675 * @user_bo: Pointer to the buffer object being grabbed for CPU access
676 * @tfile: Identifying the caller.
677 * @flags: Flags indicating how the grab should be performed.
678 * Return: Zero on success, Negative error code on error. In particular,
679 * -EBUSY will be returned if a dontblock operation is requested and the
680 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
681 * interrupted by a signal.
683 * A blocking grab will be automatically released when @tfile is closed.
685 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object
*user_bo
,
686 struct ttm_object_file
*tfile
,
689 bool nonblock
= !!(flags
& drm_vmw_synccpu_dontblock
);
690 struct ttm_buffer_object
*bo
= &user_bo
->vbo
.base
;
694 if (flags
& drm_vmw_synccpu_allow_cs
) {
697 lret
= dma_resv_wait_timeout_rcu
698 (bo
->base
.resv
, true, true,
699 nonblock
? 0 : MAX_SCHEDULE_TIMEOUT
);
707 ret
= ttm_bo_reserve(bo
, true, nonblock
, NULL
);
708 if (unlikely(ret
!= 0))
711 ret
= ttm_bo_wait(bo
, true, nonblock
);
712 if (likely(ret
== 0))
713 atomic_inc(&user_bo
->vbo
.cpu_writers
);
715 ttm_bo_unreserve(bo
);
716 if (unlikely(ret
!= 0))
719 ret
= ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
720 TTM_REF_SYNCCPU_WRITE
, &existed
, false);
721 if (ret
!= 0 || existed
)
722 atomic_dec(&user_bo
->vbo
.cpu_writers
);
728 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
729 * and unblock command submission on the buffer if blocked.
731 * @handle: Handle identifying the buffer object.
732 * @tfile: Identifying the caller.
733 * @flags: Flags indicating the type of release.
735 static int vmw_user_bo_synccpu_release(uint32_t handle
,
736 struct ttm_object_file
*tfile
,
739 if (!(flags
& drm_vmw_synccpu_allow_cs
))
740 return ttm_ref_object_base_unref(tfile
, handle
,
741 TTM_REF_SYNCCPU_WRITE
);
748 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
751 * @dev: Identifies the drm device.
752 * @data: Pointer to the ioctl argument.
753 * @file_priv: Identifies the caller.
754 * Return: Zero on success, negative error code on error.
756 * This function checks the ioctl arguments for validity and calls the
757 * relevant synccpu functions.
759 int vmw_user_bo_synccpu_ioctl(struct drm_device
*dev
, void *data
,
760 struct drm_file
*file_priv
)
762 struct drm_vmw_synccpu_arg
*arg
=
763 (struct drm_vmw_synccpu_arg
*) data
;
764 struct vmw_buffer_object
*vbo
;
765 struct vmw_user_buffer_object
*user_bo
;
766 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
767 struct ttm_base_object
*buffer_base
;
770 if ((arg
->flags
& (drm_vmw_synccpu_read
| drm_vmw_synccpu_write
)) == 0
771 || (arg
->flags
& ~(drm_vmw_synccpu_read
| drm_vmw_synccpu_write
|
772 drm_vmw_synccpu_dontblock
|
773 drm_vmw_synccpu_allow_cs
)) != 0) {
774 DRM_ERROR("Illegal synccpu flags.\n");
779 case drm_vmw_synccpu_grab
:
780 ret
= vmw_user_bo_lookup(tfile
, arg
->handle
, &vbo
,
782 if (unlikely(ret
!= 0))
785 user_bo
= container_of(vbo
, struct vmw_user_buffer_object
,
787 ret
= vmw_user_bo_synccpu_grab(user_bo
, tfile
, arg
->flags
);
788 vmw_bo_unreference(&vbo
);
789 ttm_base_object_unref(&buffer_base
);
790 if (unlikely(ret
!= 0 && ret
!= -ERESTARTSYS
&&
792 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
793 (unsigned int) arg
->handle
);
797 case drm_vmw_synccpu_release
:
798 ret
= vmw_user_bo_synccpu_release(arg
->handle
, tfile
,
800 if (unlikely(ret
!= 0)) {
801 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
802 (unsigned int) arg
->handle
);
807 DRM_ERROR("Invalid synccpu operation.\n");
816 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
817 * allocation functionality.
819 * @dev: Identifies the drm device.
820 * @data: Pointer to the ioctl argument.
821 * @file_priv: Identifies the caller.
822 * Return: Zero on success, negative error code on error.
824 * This function checks the ioctl arguments for validity and allocates a
825 * struct vmw_user_buffer_object bo.
827 int vmw_bo_alloc_ioctl(struct drm_device
*dev
, void *data
,
828 struct drm_file
*file_priv
)
830 struct vmw_private
*dev_priv
= vmw_priv(dev
);
831 union drm_vmw_alloc_dmabuf_arg
*arg
=
832 (union drm_vmw_alloc_dmabuf_arg
*)data
;
833 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
834 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
835 struct vmw_buffer_object
*vbo
;
839 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
840 if (unlikely(ret
!= 0))
843 ret
= vmw_user_bo_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
844 req
->size
, false, &handle
, &vbo
,
846 if (unlikely(ret
!= 0))
849 rep
->handle
= handle
;
850 rep
->map_handle
= drm_vma_node_offset_addr(&vbo
->base
.base
.vma_node
);
851 rep
->cur_gmr_id
= handle
;
852 rep
->cur_gmr_offset
= 0;
854 vmw_bo_unreference(&vbo
);
857 ttm_read_unlock(&dev_priv
->reservation_sem
);
864 * vmw_bo_unref_ioctl - Generic handle close ioctl.
866 * @dev: Identifies the drm device.
867 * @data: Pointer to the ioctl argument.
868 * @file_priv: Identifies the caller.
869 * Return: Zero on success, negative error code on error.
871 * This function checks the ioctl arguments for validity and closes a
872 * handle to a TTM base object, optionally freeing the object.
874 int vmw_bo_unref_ioctl(struct drm_device
*dev
, void *data
,
875 struct drm_file
*file_priv
)
877 struct drm_vmw_unref_dmabuf_arg
*arg
=
878 (struct drm_vmw_unref_dmabuf_arg
*)data
;
880 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
887 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
889 * @tfile: The TTM object file the handle is registered with.
890 * @handle: The user buffer object handle
891 * @out: Pointer to a where a pointer to the embedded
892 * struct vmw_buffer_object should be placed.
893 * @p_base: Pointer to where a pointer to the TTM base object should be
894 * placed, or NULL if no such pointer is required.
895 * Return: Zero on success, Negative error code on error.
897 * Both the output base object pointer and the vmw buffer object pointer
898 * will be refcounted.
900 int vmw_user_bo_lookup(struct ttm_object_file
*tfile
,
901 uint32_t handle
, struct vmw_buffer_object
**out
,
902 struct ttm_base_object
**p_base
)
904 struct vmw_user_buffer_object
*vmw_user_bo
;
905 struct ttm_base_object
*base
;
907 base
= ttm_base_object_lookup(tfile
, handle
);
908 if (unlikely(base
== NULL
)) {
909 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
910 (unsigned long)handle
);
914 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
915 ttm_base_object_unref(&base
);
916 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
917 (unsigned long)handle
);
921 vmw_user_bo
= container_of(base
, struct vmw_user_buffer_object
,
923 ttm_bo_get(&vmw_user_bo
->vbo
.base
);
927 ttm_base_object_unref(&base
);
928 *out
= &vmw_user_bo
->vbo
;
934 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
935 * @tfile: The TTM object file the handle is registered with.
936 * @handle: The user buffer object handle.
938 * This function looks up a struct vmw_user_bo and returns a pointer to the
939 * struct vmw_buffer_object it derives from without refcounting the pointer.
940 * The returned pointer is only valid until vmw_user_bo_noref_release() is
941 * called, and the object pointed to by the returned pointer may be doomed.
942 * Any persistent usage of the object requires a refcount to be taken using
943 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
944 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
945 * or scheduling functions may be called inbetween these function calls.
947 * Return: A struct vmw_buffer_object pointer if successful or negative
948 * error pointer on failure.
950 struct vmw_buffer_object
*
951 vmw_user_bo_noref_lookup(struct ttm_object_file
*tfile
, u32 handle
)
953 struct vmw_user_buffer_object
*vmw_user_bo
;
954 struct ttm_base_object
*base
;
956 base
= ttm_base_object_noref_lookup(tfile
, handle
);
958 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
959 (unsigned long)handle
);
960 return ERR_PTR(-ESRCH
);
963 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
964 ttm_base_object_noref_release();
965 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
966 (unsigned long)handle
);
967 return ERR_PTR(-EINVAL
);
970 vmw_user_bo
= container_of(base
, struct vmw_user_buffer_object
,
972 return &vmw_user_bo
->vbo
;
976 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
978 * @tfile: The TTM object file to register the handle with.
979 * @vbo: The embedded vmw buffer object.
980 * @handle: Pointer to where the new handle should be placed.
981 * Return: Zero on success, Negative error code on error.
983 int vmw_user_bo_reference(struct ttm_object_file
*tfile
,
984 struct vmw_buffer_object
*vbo
,
987 struct vmw_user_buffer_object
*user_bo
;
989 if (vbo
->base
.destroy
!= vmw_user_bo_destroy
)
992 user_bo
= container_of(vbo
, struct vmw_user_buffer_object
, vbo
);
994 *handle
= user_bo
->prime
.base
.handle
;
995 return ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
996 TTM_REF_USAGE
, NULL
, false);
1001 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
1002 * object without unreserving it.
1004 * @bo: Pointer to the struct ttm_buffer_object to fence.
1005 * @fence: Pointer to the fence. If NULL, this function will
1006 * insert a fence into the command stream..
1008 * Contrary to the ttm_eu version of this function, it takes only
1009 * a single buffer object instead of a list, and it also doesn't
1010 * unreserve the buffer object, which needs to be done separately.
1012 void vmw_bo_fence_single(struct ttm_buffer_object
*bo
,
1013 struct vmw_fence_obj
*fence
)
1015 struct ttm_bo_device
*bdev
= bo
->bdev
;
1017 struct vmw_private
*dev_priv
=
1018 container_of(bdev
, struct vmw_private
, bdev
);
1020 if (fence
== NULL
) {
1021 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1022 dma_resv_add_excl_fence(bo
->base
.resv
, &fence
->base
);
1023 dma_fence_put(&fence
->base
);
1025 dma_resv_add_excl_fence(bo
->base
.resv
, &fence
->base
);
1030 * vmw_dumb_create - Create a dumb kms buffer
1032 * @file_priv: Pointer to a struct drm_file identifying the caller.
1033 * @dev: Pointer to the drm device.
1034 * @args: Pointer to a struct drm_mode_create_dumb structure
1035 * Return: Zero on success, negative error code on failure.
1037 * This is a driver callback for the core drm create_dumb functionality.
1038 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1039 * that the arguments have a different format.
1041 int vmw_dumb_create(struct drm_file
*file_priv
,
1042 struct drm_device
*dev
,
1043 struct drm_mode_create_dumb
*args
)
1045 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1046 struct vmw_buffer_object
*vbo
;
1049 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
1050 args
->size
= args
->pitch
* args
->height
;
1052 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
1053 if (unlikely(ret
!= 0))
1056 ret
= vmw_user_bo_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
1057 args
->size
, false, &args
->handle
,
1059 if (unlikely(ret
!= 0))
1062 vmw_bo_unreference(&vbo
);
1064 ttm_read_unlock(&dev_priv
->reservation_sem
);
1070 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1072 * @file_priv: Pointer to a struct drm_file identifying the caller.
1073 * @dev: Pointer to the drm device.
1074 * @handle: Handle identifying the dumb buffer.
1075 * @offset: The address space offset returned.
1076 * Return: Zero on success, negative error code on failure.
1078 * This is a driver callback for the core drm dumb_map_offset functionality.
1080 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
1081 struct drm_device
*dev
, uint32_t handle
,
1084 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1085 struct vmw_buffer_object
*out_buf
;
1088 ret
= vmw_user_bo_lookup(tfile
, handle
, &out_buf
, NULL
);
1092 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.base
.vma_node
);
1093 vmw_bo_unreference(&out_buf
);
1099 * vmw_dumb_destroy - Destroy a dumb boffer
1101 * @file_priv: Pointer to a struct drm_file identifying the caller.
1102 * @dev: Pointer to the drm device.
1103 * @handle: Handle identifying the dumb buffer.
1104 * Return: Zero on success, negative error code on failure.
1106 * This is a driver callback for the core drm dumb_destroy functionality.
1108 int vmw_dumb_destroy(struct drm_file
*file_priv
,
1109 struct drm_device
*dev
,
1112 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1113 handle
, TTM_REF_USAGE
);
1118 * vmw_bo_swap_notify - swapout notify callback.
1120 * @bo: The buffer object to be swapped out.
1122 void vmw_bo_swap_notify(struct ttm_buffer_object
*bo
)
1124 /* Is @bo embedded in a struct vmw_buffer_object? */
1125 if (bo
->destroy
!= vmw_bo_bo_free
&&
1126 bo
->destroy
!= vmw_user_bo_destroy
)
1129 /* Kill any cached kernel maps before swapout */
1130 vmw_bo_unmap(vmw_buffer_object(bo
));
1135 * vmw_bo_move_notify - TTM move_notify_callback
1137 * @bo: The TTM buffer object about to move.
1138 * @mem: The struct ttm_mem_reg indicating to what memory
1139 * region the move is taking place.
1141 * Detaches cached maps and device bindings that require that the
1142 * buffer doesn't move.
1144 void vmw_bo_move_notify(struct ttm_buffer_object
*bo
,
1145 struct ttm_mem_reg
*mem
)
1147 struct vmw_buffer_object
*vbo
;
1152 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1153 if (bo
->destroy
!= vmw_bo_bo_free
&&
1154 bo
->destroy
!= vmw_user_bo_destroy
)
1157 vbo
= container_of(bo
, struct vmw_buffer_object
, base
);
1160 * Kill any cached kernel maps before move to or from VRAM.
1161 * With other types of moves, the underlying pages stay the same,
1162 * and the map can be kept.
1164 if (mem
->mem_type
== TTM_PL_VRAM
|| bo
->mem
.mem_type
== TTM_PL_VRAM
)
1168 * If we're moving a backup MOB out of MOB placement, then make sure we
1169 * read back all resource content first, and unbind the MOB from
1172 if (mem
->mem_type
!= VMW_PL_MOB
&& bo
->mem
.mem_type
== VMW_PL_MOB
)
1173 vmw_resource_unbind_list(vbo
);