dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / vmwgfx / vmwgfx_bo.c
blob5d5c2bce01f3d241dded1d40c724a55d6bee2b08
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
5 * All Rights Reserved.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
23 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
29 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_drv.h"
33 #include "ttm_object.h"
36 /**
37 * struct vmw_user_buffer_object - User-space-visible buffer object
39 * @prime: The prime object providing user visibility.
40 * @vbo: The struct vmw_buffer_object
42 struct vmw_user_buffer_object {
43 struct ttm_prime_object prime;
44 struct vmw_buffer_object vbo;
48 /**
49 * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
50 * vmw_buffer_object.
52 * @bo: Pointer to the TTM buffer object.
53 * Return: Pointer to the struct vmw_buffer_object embedding the
54 * TTM buffer object.
56 static struct vmw_buffer_object *
57 vmw_buffer_object(struct ttm_buffer_object *bo)
59 return container_of(bo, struct vmw_buffer_object, base);
63 /**
64 * vmw_user_buffer_object - Convert a struct ttm_buffer_object to a struct
65 * vmw_user_buffer_object.
67 * @bo: Pointer to the TTM buffer object.
68 * Return: Pointer to the struct vmw_buffer_object embedding the TTM buffer
69 * object.
71 static struct vmw_user_buffer_object *
72 vmw_user_buffer_object(struct ttm_buffer_object *bo)
74 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
76 return container_of(vmw_bo, struct vmw_user_buffer_object, vbo);
80 /**
81 * vmw_bo_pin_in_placement - Validate a buffer to placement.
83 * @dev_priv: Driver private.
84 * @buf: DMA buffer to move.
85 * @placement: The placement to pin it.
86 * @interruptible: Use interruptible wait.
87 * Return: Zero on success, Negative error code on failure. In particular
88 * -ERESTARTSYS if interrupted by a signal
90 int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
91 struct vmw_buffer_object *buf,
92 struct ttm_placement *placement,
93 bool interruptible)
95 struct ttm_operation_ctx ctx = {interruptible, false };
96 struct ttm_buffer_object *bo = &buf->base;
97 int ret;
98 uint32_t new_flags;
100 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
101 if (unlikely(ret != 0))
102 return ret;
104 vmw_execbuf_release_pinned_bo(dev_priv);
106 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
107 if (unlikely(ret != 0))
108 goto err;
110 if (buf->pin_count > 0)
111 ret = ttm_bo_mem_compat(placement, &bo->mem,
112 &new_flags) == true ? 0 : -EINVAL;
113 else
114 ret = ttm_bo_validate(bo, placement, &ctx);
116 if (!ret)
117 vmw_bo_pin_reserved(buf, true);
119 ttm_bo_unreserve(bo);
121 err:
122 ttm_write_unlock(&dev_priv->reservation_sem);
123 return ret;
128 * vmw_bo_pin_in_vram_or_gmr - Move a buffer to vram or gmr.
130 * This function takes the reservation_sem in write mode.
131 * Flushes and unpins the query bo to avoid failures.
133 * @dev_priv: Driver private.
134 * @buf: DMA buffer to move.
135 * @pin: Pin buffer if true.
136 * @interruptible: Use interruptible wait.
137 * Return: Zero on success, Negative error code on failure. In particular
138 * -ERESTARTSYS if interrupted by a signal
140 int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
141 struct vmw_buffer_object *buf,
142 bool interruptible)
144 struct ttm_operation_ctx ctx = {interruptible, false };
145 struct ttm_buffer_object *bo = &buf->base;
146 int ret;
147 uint32_t new_flags;
149 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
150 if (unlikely(ret != 0))
151 return ret;
153 vmw_execbuf_release_pinned_bo(dev_priv);
155 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
156 if (unlikely(ret != 0))
157 goto err;
159 if (buf->pin_count > 0) {
160 ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
161 &new_flags) == true ? 0 : -EINVAL;
162 goto out_unreserve;
165 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
166 if (likely(ret == 0) || ret == -ERESTARTSYS)
167 goto out_unreserve;
169 ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
171 out_unreserve:
172 if (!ret)
173 vmw_bo_pin_reserved(buf, true);
175 ttm_bo_unreserve(bo);
176 err:
177 ttm_write_unlock(&dev_priv->reservation_sem);
178 return ret;
183 * vmw_bo_pin_in_vram - Move a buffer to vram.
185 * This function takes the reservation_sem in write mode.
186 * Flushes and unpins the query bo to avoid failures.
188 * @dev_priv: Driver private.
189 * @buf: DMA buffer to move.
190 * @interruptible: Use interruptible wait.
191 * Return: Zero on success, Negative error code on failure. In particular
192 * -ERESTARTSYS if interrupted by a signal
194 int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
195 struct vmw_buffer_object *buf,
196 bool interruptible)
198 return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
199 interruptible);
204 * vmw_bo_pin_in_start_of_vram - Move a buffer to start of vram.
206 * This function takes the reservation_sem in write mode.
207 * Flushes and unpins the query bo to avoid failures.
209 * @dev_priv: Driver private.
210 * @buf: DMA buffer to pin.
211 * @interruptible: Use interruptible wait.
212 * Return: Zero on success, Negative error code on failure. In particular
213 * -ERESTARTSYS if interrupted by a signal
215 int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
216 struct vmw_buffer_object *buf,
217 bool interruptible)
219 struct ttm_operation_ctx ctx = {interruptible, false };
220 struct ttm_buffer_object *bo = &buf->base;
221 struct ttm_placement placement;
222 struct ttm_place place;
223 int ret = 0;
224 uint32_t new_flags;
226 place = vmw_vram_placement.placement[0];
227 place.lpfn = bo->num_pages;
228 placement.num_placement = 1;
229 placement.placement = &place;
230 placement.num_busy_placement = 1;
231 placement.busy_placement = &place;
233 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
234 if (unlikely(ret != 0))
235 return ret;
237 vmw_execbuf_release_pinned_bo(dev_priv);
238 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
239 if (unlikely(ret != 0))
240 goto err_unlock;
243 * Is this buffer already in vram but not at the start of it?
244 * In that case, evict it first because TTM isn't good at handling
245 * that situation.
247 if (bo->mem.mem_type == TTM_PL_VRAM &&
248 bo->mem.start < bo->num_pages &&
249 bo->mem.start > 0 &&
250 buf->pin_count == 0) {
251 ctx.interruptible = false;
252 (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
255 if (buf->pin_count > 0)
256 ret = ttm_bo_mem_compat(&placement, &bo->mem,
257 &new_flags) == true ? 0 : -EINVAL;
258 else
259 ret = ttm_bo_validate(bo, &placement, &ctx);
261 /* For some reason we didn't end up at the start of vram */
262 WARN_ON(ret == 0 && bo->offset != 0);
263 if (!ret)
264 vmw_bo_pin_reserved(buf, true);
266 ttm_bo_unreserve(bo);
267 err_unlock:
268 ttm_write_unlock(&dev_priv->reservation_sem);
270 return ret;
275 * vmw_bo_unpin - Unpin the buffer given buffer, does not move the buffer.
277 * This function takes the reservation_sem in write mode.
279 * @dev_priv: Driver private.
280 * @buf: DMA buffer to unpin.
281 * @interruptible: Use interruptible wait.
282 * Return: Zero on success, Negative error code on failure. In particular
283 * -ERESTARTSYS if interrupted by a signal
285 int vmw_bo_unpin(struct vmw_private *dev_priv,
286 struct vmw_buffer_object *buf,
287 bool interruptible)
289 struct ttm_buffer_object *bo = &buf->base;
290 int ret;
292 ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
293 if (unlikely(ret != 0))
294 return ret;
296 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
297 if (unlikely(ret != 0))
298 goto err;
300 vmw_bo_pin_reserved(buf, false);
302 ttm_bo_unreserve(bo);
304 err:
305 ttm_read_unlock(&dev_priv->reservation_sem);
306 return ret;
310 * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
311 * of a buffer.
313 * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
314 * @ptr: SVGAGuestPtr returning the result.
316 void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
317 SVGAGuestPtr *ptr)
319 if (bo->mem.mem_type == TTM_PL_VRAM) {
320 ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
321 ptr->offset = bo->offset;
322 } else {
323 ptr->gmrId = bo->mem.start;
324 ptr->offset = 0;
330 * vmw_bo_pin_reserved - Pin or unpin a buffer object without moving it.
332 * @vbo: The buffer object. Must be reserved.
333 * @pin: Whether to pin or unpin.
336 void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
338 struct ttm_operation_ctx ctx = { false, true };
339 struct ttm_place pl;
340 struct ttm_placement placement;
341 struct ttm_buffer_object *bo = &vbo->base;
342 uint32_t old_mem_type = bo->mem.mem_type;
343 int ret;
345 lockdep_assert_held(&bo->resv->lock.base);
347 if (pin) {
348 if (vbo->pin_count++ > 0)
349 return;
350 } else {
351 WARN_ON(vbo->pin_count <= 0);
352 if (--vbo->pin_count > 0)
353 return;
356 pl.fpfn = 0;
357 pl.lpfn = 0;
358 pl.flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | VMW_PL_FLAG_MOB
359 | TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
360 if (pin)
361 pl.flags |= TTM_PL_FLAG_NO_EVICT;
363 memset(&placement, 0, sizeof(placement));
364 placement.num_placement = 1;
365 placement.placement = &pl;
367 ret = ttm_bo_validate(bo, &placement, &ctx);
369 BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
374 * vmw_bo_map_and_cache - Map a buffer object and cache the map
376 * @vbo: The buffer object to map
377 * Return: A kernel virtual address or NULL if mapping failed.
379 * This function maps a buffer object into the kernel address space, or
380 * returns the virtual kernel address of an already existing map. The virtual
381 * address remains valid as long as the buffer object is pinned or reserved.
382 * The cached map is torn down on either
383 * 1) Buffer object move
384 * 2) Buffer object swapout
385 * 3) Buffer object destruction
388 void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
390 struct ttm_buffer_object *bo = &vbo->base;
391 bool not_used;
392 void *virtual;
393 int ret;
395 virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
396 if (virtual)
397 return virtual;
399 ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
400 if (ret)
401 DRM_ERROR("Buffer object map failed: %d.\n", ret);
403 return ttm_kmap_obj_virtual(&vbo->map, &not_used);
408 * vmw_bo_unmap - Tear down a cached buffer object map.
410 * @vbo: The buffer object whose map we are tearing down.
412 * This function tears down a cached map set up using
413 * vmw_buffer_object_map_and_cache().
415 void vmw_bo_unmap(struct vmw_buffer_object *vbo)
417 if (vbo->map.bo == NULL)
418 return;
420 ttm_bo_kunmap(&vbo->map);
425 * vmw_bo_acc_size - Calculate the pinned memory usage of buffers
427 * @dev_priv: Pointer to a struct vmw_private identifying the device.
428 * @size: The requested buffer size.
429 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
431 static size_t vmw_bo_acc_size(struct vmw_private *dev_priv, size_t size,
432 bool user)
434 static size_t struct_size, user_struct_size;
435 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
436 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
438 if (unlikely(struct_size == 0)) {
439 size_t backend_size = ttm_round_pot(vmw_tt_size);
441 struct_size = backend_size +
442 ttm_round_pot(sizeof(struct vmw_buffer_object));
443 user_struct_size = backend_size +
444 ttm_round_pot(sizeof(struct vmw_user_buffer_object)) +
445 TTM_OBJ_EXTRA_SIZE;
448 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
449 page_array_size +=
450 ttm_round_pot(num_pages * sizeof(dma_addr_t));
452 return ((user) ? user_struct_size : struct_size) +
453 page_array_size;
458 * vmw_bo_bo_free - vmw buffer object destructor
460 * @bo: Pointer to the embedded struct ttm_buffer_object
462 void vmw_bo_bo_free(struct ttm_buffer_object *bo)
464 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
466 vmw_bo_unmap(vmw_bo);
467 kfree(vmw_bo);
472 * vmw_user_bo_destroy - vmw buffer object destructor
474 * @bo: Pointer to the embedded struct ttm_buffer_object
476 static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
478 struct vmw_user_buffer_object *vmw_user_bo = vmw_user_buffer_object(bo);
480 vmw_bo_unmap(&vmw_user_bo->vbo);
481 ttm_prime_object_kfree(vmw_user_bo, prime);
486 * vmw_bo_init - Initialize a vmw buffer object
488 * @dev_priv: Pointer to the device private struct
489 * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
490 * @size: Buffer object size in bytes.
491 * @placement: Initial placement.
492 * @interruptible: Whether waits should be performed interruptible.
493 * @bo_free: The buffer object destructor.
494 * Returns: Zero on success, negative error code on error.
496 * Note that on error, the code will free the buffer object.
498 int vmw_bo_init(struct vmw_private *dev_priv,
499 struct vmw_buffer_object *vmw_bo,
500 size_t size, struct ttm_placement *placement,
501 bool interruptible,
502 void (*bo_free)(struct ttm_buffer_object *bo))
504 struct ttm_bo_device *bdev = &dev_priv->bdev;
505 size_t acc_size;
506 int ret;
507 bool user = (bo_free == &vmw_user_bo_destroy);
509 WARN_ON_ONCE(!bo_free && (!user && (bo_free != vmw_bo_bo_free)));
511 acc_size = vmw_bo_acc_size(dev_priv, size, user);
512 memset(vmw_bo, 0, sizeof(*vmw_bo));
514 INIT_LIST_HEAD(&vmw_bo->res_list);
516 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
517 ttm_bo_type_device, placement,
518 0, interruptible, acc_size,
519 NULL, NULL, bo_free);
520 return ret;
525 * vmw_user_bo_release - TTM reference base object release callback for
526 * vmw user buffer objects
528 * @p_base: The TTM base object pointer about to be unreferenced.
530 * Clears the TTM base object pointer and drops the reference the
531 * base object has on the underlying struct vmw_buffer_object.
533 static void vmw_user_bo_release(struct ttm_base_object **p_base)
535 struct vmw_user_buffer_object *vmw_user_bo;
536 struct ttm_base_object *base = *p_base;
538 *p_base = NULL;
540 if (unlikely(base == NULL))
541 return;
543 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
544 prime.base);
545 ttm_bo_put(&vmw_user_bo->vbo.base);
550 * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
551 * for vmw user buffer objects
553 * @base: Pointer to the TTM base object
554 * @ref_type: Reference type of the reference reaching zero.
556 * Called when user-space drops its last synccpu reference on the buffer
557 * object, Either explicitly or as part of a cleanup file close.
559 static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
560 enum ttm_ref_type ref_type)
562 struct vmw_user_buffer_object *user_bo;
564 user_bo = container_of(base, struct vmw_user_buffer_object, prime.base);
566 switch (ref_type) {
567 case TTM_REF_SYNCCPU_WRITE:
568 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
569 break;
570 default:
571 WARN_ONCE(true, "Undefined buffer object reference release.\n");
577 * vmw_user_bo_alloc - Allocate a user buffer object
579 * @dev_priv: Pointer to a struct device private.
580 * @tfile: Pointer to a struct ttm_object_file on which to register the user
581 * object.
582 * @size: Size of the buffer object.
583 * @shareable: Boolean whether the buffer is shareable with other open files.
584 * @handle: Pointer to where the handle value should be assigned.
585 * @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
586 * should be assigned.
587 * Return: Zero on success, negative error code on error.
589 int vmw_user_bo_alloc(struct vmw_private *dev_priv,
590 struct ttm_object_file *tfile,
591 uint32_t size,
592 bool shareable,
593 uint32_t *handle,
594 struct vmw_buffer_object **p_vbo,
595 struct ttm_base_object **p_base)
597 struct vmw_user_buffer_object *user_bo;
598 int ret;
600 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
601 if (unlikely(!user_bo)) {
602 DRM_ERROR("Failed to allocate a buffer.\n");
603 return -ENOMEM;
606 ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
607 (dev_priv->has_mob) ?
608 &vmw_sys_placement :
609 &vmw_vram_sys_placement, true,
610 &vmw_user_bo_destroy);
611 if (unlikely(ret != 0))
612 return ret;
614 ttm_bo_get(&user_bo->vbo.base);
615 ret = ttm_prime_object_init(tfile,
616 size,
617 &user_bo->prime,
618 shareable,
619 ttm_buffer_type,
620 &vmw_user_bo_release,
621 &vmw_user_bo_ref_obj_release);
622 if (unlikely(ret != 0)) {
623 ttm_bo_put(&user_bo->vbo.base);
624 goto out_no_base_object;
627 *p_vbo = &user_bo->vbo;
628 if (p_base) {
629 *p_base = &user_bo->prime.base;
630 kref_get(&(*p_base)->refcount);
632 *handle = user_bo->prime.base.handle;
634 out_no_base_object:
635 return ret;
640 * vmw_user_bo_verify_access - verify access permissions on this
641 * buffer object.
643 * @bo: Pointer to the buffer object being accessed
644 * @tfile: Identifying the caller.
646 int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
647 struct ttm_object_file *tfile)
649 struct vmw_user_buffer_object *vmw_user_bo;
651 if (unlikely(bo->destroy != vmw_user_bo_destroy))
652 return -EPERM;
654 vmw_user_bo = vmw_user_buffer_object(bo);
656 /* Check that the caller has opened the object. */
657 if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
658 return 0;
660 DRM_ERROR("Could not grant buffer access.\n");
661 return -EPERM;
666 * vmw_user_bo_synccpu_grab - Grab a struct vmw_user_buffer_object for cpu
667 * access, idling previous GPU operations on the buffer and optionally
668 * blocking it for further command submissions.
670 * @user_bo: Pointer to the buffer object being grabbed for CPU access
671 * @tfile: Identifying the caller.
672 * @flags: Flags indicating how the grab should be performed.
673 * Return: Zero on success, Negative error code on error. In particular,
674 * -EBUSY will be returned if a dontblock operation is requested and the
675 * buffer object is busy, and -ERESTARTSYS will be returned if a wait is
676 * interrupted by a signal.
678 * A blocking grab will be automatically released when @tfile is closed.
680 static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
681 struct ttm_object_file *tfile,
682 uint32_t flags)
684 struct ttm_buffer_object *bo = &user_bo->vbo.base;
685 bool existed;
686 int ret;
688 if (flags & drm_vmw_synccpu_allow_cs) {
689 bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
690 long lret;
692 lret = reservation_object_wait_timeout_rcu
693 (bo->resv, true, true,
694 nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
695 if (!lret)
696 return -EBUSY;
697 else if (lret < 0)
698 return lret;
699 return 0;
702 ret = ttm_bo_synccpu_write_grab
703 (bo, !!(flags & drm_vmw_synccpu_dontblock));
704 if (unlikely(ret != 0))
705 return ret;
707 ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
708 TTM_REF_SYNCCPU_WRITE, &existed, false);
709 if (ret != 0 || existed)
710 ttm_bo_synccpu_write_release(&user_bo->vbo.base);
712 return ret;
716 * vmw_user_bo_synccpu_release - Release a previous grab for CPU access,
717 * and unblock command submission on the buffer if blocked.
719 * @handle: Handle identifying the buffer object.
720 * @tfile: Identifying the caller.
721 * @flags: Flags indicating the type of release.
723 static int vmw_user_bo_synccpu_release(uint32_t handle,
724 struct ttm_object_file *tfile,
725 uint32_t flags)
727 if (!(flags & drm_vmw_synccpu_allow_cs))
728 return ttm_ref_object_base_unref(tfile, handle,
729 TTM_REF_SYNCCPU_WRITE);
731 return 0;
736 * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
737 * functionality.
739 * @dev: Identifies the drm device.
740 * @data: Pointer to the ioctl argument.
741 * @file_priv: Identifies the caller.
742 * Return: Zero on success, negative error code on error.
744 * This function checks the ioctl arguments for validity and calls the
745 * relevant synccpu functions.
747 int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
748 struct drm_file *file_priv)
750 struct drm_vmw_synccpu_arg *arg =
751 (struct drm_vmw_synccpu_arg *) data;
752 struct vmw_buffer_object *vbo;
753 struct vmw_user_buffer_object *user_bo;
754 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
755 struct ttm_base_object *buffer_base;
756 int ret;
758 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
759 || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
760 drm_vmw_synccpu_dontblock |
761 drm_vmw_synccpu_allow_cs)) != 0) {
762 DRM_ERROR("Illegal synccpu flags.\n");
763 return -EINVAL;
766 switch (arg->op) {
767 case drm_vmw_synccpu_grab:
768 ret = vmw_user_bo_lookup(tfile, arg->handle, &vbo,
769 &buffer_base);
770 if (unlikely(ret != 0))
771 return ret;
773 user_bo = container_of(vbo, struct vmw_user_buffer_object,
774 vbo);
775 ret = vmw_user_bo_synccpu_grab(user_bo, tfile, arg->flags);
776 vmw_bo_unreference(&vbo);
777 ttm_base_object_unref(&buffer_base);
778 if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
779 ret != -EBUSY)) {
780 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
781 (unsigned int) arg->handle);
782 return ret;
784 break;
785 case drm_vmw_synccpu_release:
786 ret = vmw_user_bo_synccpu_release(arg->handle, tfile,
787 arg->flags);
788 if (unlikely(ret != 0)) {
789 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
790 (unsigned int) arg->handle);
791 return ret;
793 break;
794 default:
795 DRM_ERROR("Invalid synccpu operation.\n");
796 return -EINVAL;
799 return 0;
804 * vmw_bo_alloc_ioctl - ioctl function implementing the buffer object
805 * allocation functionality.
807 * @dev: Identifies the drm device.
808 * @data: Pointer to the ioctl argument.
809 * @file_priv: Identifies the caller.
810 * Return: Zero on success, negative error code on error.
812 * This function checks the ioctl arguments for validity and allocates a
813 * struct vmw_user_buffer_object bo.
815 int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
816 struct drm_file *file_priv)
818 struct vmw_private *dev_priv = vmw_priv(dev);
819 union drm_vmw_alloc_dmabuf_arg *arg =
820 (union drm_vmw_alloc_dmabuf_arg *)data;
821 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
822 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
823 struct vmw_buffer_object *vbo;
824 uint32_t handle;
825 int ret;
827 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
828 if (unlikely(ret != 0))
829 return ret;
831 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
832 req->size, false, &handle, &vbo,
833 NULL);
834 if (unlikely(ret != 0))
835 goto out_no_bo;
837 rep->handle = handle;
838 rep->map_handle = drm_vma_node_offset_addr(&vbo->base.vma_node);
839 rep->cur_gmr_id = handle;
840 rep->cur_gmr_offset = 0;
842 vmw_bo_unreference(&vbo);
844 out_no_bo:
845 ttm_read_unlock(&dev_priv->reservation_sem);
847 return ret;
852 * vmw_bo_unref_ioctl - Generic handle close ioctl.
854 * @dev: Identifies the drm device.
855 * @data: Pointer to the ioctl argument.
856 * @file_priv: Identifies the caller.
857 * Return: Zero on success, negative error code on error.
859 * This function checks the ioctl arguments for validity and closes a
860 * handle to a TTM base object, optionally freeing the object.
862 int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
863 struct drm_file *file_priv)
865 struct drm_vmw_unref_dmabuf_arg *arg =
866 (struct drm_vmw_unref_dmabuf_arg *)data;
868 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
869 arg->handle,
870 TTM_REF_USAGE);
875 * vmw_user_bo_lookup - Look up a vmw user buffer object from a handle.
877 * @tfile: The TTM object file the handle is registered with.
878 * @handle: The user buffer object handle
879 * @out: Pointer to a where a pointer to the embedded
880 * struct vmw_buffer_object should be placed.
881 * @p_base: Pointer to where a pointer to the TTM base object should be
882 * placed, or NULL if no such pointer is required.
883 * Return: Zero on success, Negative error code on error.
885 * Both the output base object pointer and the vmw buffer object pointer
886 * will be refcounted.
888 int vmw_user_bo_lookup(struct ttm_object_file *tfile,
889 uint32_t handle, struct vmw_buffer_object **out,
890 struct ttm_base_object **p_base)
892 struct vmw_user_buffer_object *vmw_user_bo;
893 struct ttm_base_object *base;
895 base = ttm_base_object_lookup(tfile, handle);
896 if (unlikely(base == NULL)) {
897 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
898 (unsigned long)handle);
899 return -ESRCH;
902 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
903 ttm_base_object_unref(&base);
904 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
905 (unsigned long)handle);
906 return -EINVAL;
909 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
910 prime.base);
911 ttm_bo_get(&vmw_user_bo->vbo.base);
912 if (p_base)
913 *p_base = base;
914 else
915 ttm_base_object_unref(&base);
916 *out = &vmw_user_bo->vbo;
918 return 0;
922 * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
923 * @tfile: The TTM object file the handle is registered with.
924 * @handle: The user buffer object handle.
926 * This function looks up a struct vmw_user_bo and returns a pointer to the
927 * struct vmw_buffer_object it derives from without refcounting the pointer.
928 * The returned pointer is only valid until vmw_user_bo_noref_release() is
929 * called, and the object pointed to by the returned pointer may be doomed.
930 * Any persistent usage of the object requires a refcount to be taken using
931 * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
932 * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
933 * or scheduling functions may be called inbetween these function calls.
935 * Return: A struct vmw_buffer_object pointer if successful or negative
936 * error pointer on failure.
938 struct vmw_buffer_object *
939 vmw_user_bo_noref_lookup(struct ttm_object_file *tfile, u32 handle)
941 struct vmw_user_buffer_object *vmw_user_bo;
942 struct ttm_base_object *base;
944 base = ttm_base_object_noref_lookup(tfile, handle);
945 if (!base) {
946 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
947 (unsigned long)handle);
948 return ERR_PTR(-ESRCH);
951 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
952 ttm_base_object_noref_release();
953 DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
954 (unsigned long)handle);
955 return ERR_PTR(-EINVAL);
958 vmw_user_bo = container_of(base, struct vmw_user_buffer_object,
959 prime.base);
960 return &vmw_user_bo->vbo;
964 * vmw_user_bo_reference - Open a handle to a vmw user buffer object.
966 * @tfile: The TTM object file to register the handle with.
967 * @vbo: The embedded vmw buffer object.
968 * @handle: Pointer to where the new handle should be placed.
969 * Return: Zero on success, Negative error code on error.
971 int vmw_user_bo_reference(struct ttm_object_file *tfile,
972 struct vmw_buffer_object *vbo,
973 uint32_t *handle)
975 struct vmw_user_buffer_object *user_bo;
977 if (vbo->base.destroy != vmw_user_bo_destroy)
978 return -EINVAL;
980 user_bo = container_of(vbo, struct vmw_user_buffer_object, vbo);
982 *handle = user_bo->prime.base.handle;
983 return ttm_ref_object_add(tfile, &user_bo->prime.base,
984 TTM_REF_USAGE, NULL, false);
989 * vmw_bo_fence_single - Utility function to fence a single TTM buffer
990 * object without unreserving it.
992 * @bo: Pointer to the struct ttm_buffer_object to fence.
993 * @fence: Pointer to the fence. If NULL, this function will
994 * insert a fence into the command stream..
996 * Contrary to the ttm_eu version of this function, it takes only
997 * a single buffer object instead of a list, and it also doesn't
998 * unreserve the buffer object, which needs to be done separately.
1000 void vmw_bo_fence_single(struct ttm_buffer_object *bo,
1001 struct vmw_fence_obj *fence)
1003 struct ttm_bo_device *bdev = bo->bdev;
1005 struct vmw_private *dev_priv =
1006 container_of(bdev, struct vmw_private, bdev);
1008 if (fence == NULL) {
1009 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1010 reservation_object_add_excl_fence(bo->resv, &fence->base);
1011 dma_fence_put(&fence->base);
1012 } else
1013 reservation_object_add_excl_fence(bo->resv, &fence->base);
1018 * vmw_dumb_create - Create a dumb kms buffer
1020 * @file_priv: Pointer to a struct drm_file identifying the caller.
1021 * @dev: Pointer to the drm device.
1022 * @args: Pointer to a struct drm_mode_create_dumb structure
1023 * Return: Zero on success, negative error code on failure.
1025 * This is a driver callback for the core drm create_dumb functionality.
1026 * Note that this is very similar to the vmw_bo_alloc ioctl, except
1027 * that the arguments have a different format.
1029 int vmw_dumb_create(struct drm_file *file_priv,
1030 struct drm_device *dev,
1031 struct drm_mode_create_dumb *args)
1033 struct vmw_private *dev_priv = vmw_priv(dev);
1034 struct vmw_buffer_object *vbo;
1035 int ret;
1037 args->pitch = args->width * ((args->bpp + 7) / 8);
1038 args->size = args->pitch * args->height;
1040 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1041 if (unlikely(ret != 0))
1042 return ret;
1044 ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1045 args->size, false, &args->handle,
1046 &vbo, NULL);
1047 if (unlikely(ret != 0))
1048 goto out_no_bo;
1050 vmw_bo_unreference(&vbo);
1051 out_no_bo:
1052 ttm_read_unlock(&dev_priv->reservation_sem);
1053 return ret;
1058 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1060 * @file_priv: Pointer to a struct drm_file identifying the caller.
1061 * @dev: Pointer to the drm device.
1062 * @handle: Handle identifying the dumb buffer.
1063 * @offset: The address space offset returned.
1064 * Return: Zero on success, negative error code on failure.
1066 * This is a driver callback for the core drm dumb_map_offset functionality.
1068 int vmw_dumb_map_offset(struct drm_file *file_priv,
1069 struct drm_device *dev, uint32_t handle,
1070 uint64_t *offset)
1072 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1073 struct vmw_buffer_object *out_buf;
1074 int ret;
1076 ret = vmw_user_bo_lookup(tfile, handle, &out_buf, NULL);
1077 if (ret != 0)
1078 return -EINVAL;
1080 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1081 vmw_bo_unreference(&out_buf);
1082 return 0;
1087 * vmw_dumb_destroy - Destroy a dumb boffer
1089 * @file_priv: Pointer to a struct drm_file identifying the caller.
1090 * @dev: Pointer to the drm device.
1091 * @handle: Handle identifying the dumb buffer.
1092 * Return: Zero on success, negative error code on failure.
1094 * This is a driver callback for the core drm dumb_destroy functionality.
1096 int vmw_dumb_destroy(struct drm_file *file_priv,
1097 struct drm_device *dev,
1098 uint32_t handle)
1100 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1101 handle, TTM_REF_USAGE);
1106 * vmw_bo_swap_notify - swapout notify callback.
1108 * @bo: The buffer object to be swapped out.
1110 void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
1112 /* Is @bo embedded in a struct vmw_buffer_object? */
1113 if (bo->destroy != vmw_bo_bo_free &&
1114 bo->destroy != vmw_user_bo_destroy)
1115 return;
1117 /* Kill any cached kernel maps before swapout */
1118 vmw_bo_unmap(vmw_buffer_object(bo));
1123 * vmw_bo_move_notify - TTM move_notify_callback
1125 * @bo: The TTM buffer object about to move.
1126 * @mem: The struct ttm_mem_reg indicating to what memory
1127 * region the move is taking place.
1129 * Detaches cached maps and device bindings that require that the
1130 * buffer doesn't move.
1132 void vmw_bo_move_notify(struct ttm_buffer_object *bo,
1133 struct ttm_mem_reg *mem)
1135 struct vmw_buffer_object *vbo;
1137 if (mem == NULL)
1138 return;
1140 /* Make sure @bo is embedded in a struct vmw_buffer_object? */
1141 if (bo->destroy != vmw_bo_bo_free &&
1142 bo->destroy != vmw_user_bo_destroy)
1143 return;
1145 vbo = container_of(bo, struct vmw_buffer_object, base);
1148 * Kill any cached kernel maps before move to or from VRAM.
1149 * With other types of moves, the underlying pages stay the same,
1150 * and the map can be kept.
1152 if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
1153 vmw_bo_unmap(vbo);
1156 * If we're moving a backup MOB out of MOB placement, then make sure we
1157 * read back all resource content first, and unbind the MOB from
1158 * the resource.
1160 if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
1161 vmw_resource_unbind_list(vbo);