1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <linux/sched/signal.h>
30 #include "vmwgfx_drv.h"
32 #define VMW_FENCE_WRAP (1 << 31)
34 struct vmw_fence_manager
{
35 int num_fence_objects
;
36 struct vmw_private
*dev_priv
;
38 struct list_head fence_list
;
39 struct work_struct work
;
42 u32 event_fence_action_size
;
44 struct list_head cleanup_list
;
45 uint32_t pending_actions
[VMW_ACTION_MAX
];
46 struct mutex goal_irq_mutex
;
47 bool goal_irq_on
; /* Protected by @goal_irq_mutex */
48 bool seqno_valid
; /* Protected by @lock, and may not be set to true
49 without the @goal_irq_mutex held. */
53 struct vmw_user_fence
{
54 struct ttm_base_object base
;
55 struct vmw_fence_obj fence
;
59 * struct vmw_event_fence_action - fence action that delivers a drm event.
61 * @e: A struct drm_pending_event that controls the event delivery.
62 * @action: A struct vmw_fence_action to hook up to a fence.
63 * @fence: A referenced pointer to the fence to keep it alive while @action
65 * @dev: Pointer to a struct drm_device so we can access the event stuff.
66 * @kref: Both @e and @action has destructors, so we need to refcount.
67 * @size: Size accounted for this object.
68 * @tv_sec: If non-null, the variable pointed to will be assigned
69 * current time tv_sec val when the fence signals.
70 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
71 * be assigned the current time tv_usec val when the fence signals.
73 struct vmw_event_fence_action
{
74 struct vmw_fence_action action
;
76 struct drm_pending_event
*event
;
77 struct vmw_fence_obj
*fence
;
78 struct drm_device
*dev
;
84 static struct vmw_fence_manager
*
85 fman_from_fence(struct vmw_fence_obj
*fence
)
87 return container_of(fence
->base
.lock
, struct vmw_fence_manager
, lock
);
91 * Note on fencing subsystem usage of irqs:
92 * Typically the vmw_fences_update function is called
94 * a) When a new fence seqno has been submitted by the fifo code.
95 * b) On-demand when we have waiters. Sleeping waiters will switch on the
96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
97 * irq is received. When the last fence waiter is gone, that IRQ is masked
100 * In situations where there are no waiters and we don't submit any new fences,
101 * fence objects may not be signaled. This is perfectly OK, since there are
102 * no consumers of the signaled data, but that is NOT ok when there are fence
103 * actions attached to a fence. The fencing subsystem then makes use of the
104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
105 * which has an action attached, and each time vmw_fences_update is called,
106 * the subsystem makes sure the fence goal seqno is updated.
108 * The fence goal seqno irq is on as long as there are unsignaled fence
109 * objects with actions attached to them.
112 static void vmw_fence_obj_destroy(struct dma_fence
*f
)
114 struct vmw_fence_obj
*fence
=
115 container_of(f
, struct vmw_fence_obj
, base
);
117 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
119 spin_lock(&fman
->lock
);
120 list_del_init(&fence
->head
);
121 --fman
->num_fence_objects
;
122 spin_unlock(&fman
->lock
);
123 fence
->destroy(fence
);
126 static const char *vmw_fence_get_driver_name(struct dma_fence
*f
)
131 static const char *vmw_fence_get_timeline_name(struct dma_fence
*f
)
136 static bool vmw_fence_enable_signaling(struct dma_fence
*f
)
138 struct vmw_fence_obj
*fence
=
139 container_of(f
, struct vmw_fence_obj
, base
);
141 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
142 struct vmw_private
*dev_priv
= fman
->dev_priv
;
144 u32
*fifo_mem
= dev_priv
->mmio_virt
;
145 u32 seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
146 if (seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
)
149 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
154 struct vmwgfx_wait_cb
{
155 struct dma_fence_cb base
;
156 struct task_struct
*task
;
160 vmwgfx_wait_cb(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
162 struct vmwgfx_wait_cb
*wait
=
163 container_of(cb
, struct vmwgfx_wait_cb
, base
);
165 wake_up_process(wait
->task
);
168 static void __vmw_fences_update(struct vmw_fence_manager
*fman
);
170 static long vmw_fence_wait(struct dma_fence
*f
, bool intr
, signed long timeout
)
172 struct vmw_fence_obj
*fence
=
173 container_of(f
, struct vmw_fence_obj
, base
);
175 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
176 struct vmw_private
*dev_priv
= fman
->dev_priv
;
177 struct vmwgfx_wait_cb cb
;
180 if (likely(vmw_fence_obj_signaled(fence
)))
183 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
184 vmw_seqno_waiter_add(dev_priv
);
188 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &f
->flags
))
191 if (intr
&& signal_pending(current
)) {
196 cb
.base
.func
= vmwgfx_wait_cb
;
198 list_add(&cb
.base
.node
, &f
->cb_list
);
201 __vmw_fences_update(fman
);
204 * We can use the barrier free __set_current_state() since
205 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
209 __set_current_state(TASK_INTERRUPTIBLE
);
211 __set_current_state(TASK_UNINTERRUPTIBLE
);
213 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &f
->flags
)) {
214 if (ret
== 0 && timeout
> 0)
219 if (intr
&& signal_pending(current
)) {
227 spin_unlock(f
->lock
);
229 ret
= schedule_timeout(ret
);
233 __set_current_state(TASK_RUNNING
);
234 if (!list_empty(&cb
.base
.node
))
235 list_del(&cb
.base
.node
);
238 spin_unlock(f
->lock
);
240 vmw_seqno_waiter_remove(dev_priv
);
245 static const struct dma_fence_ops vmw_fence_ops
= {
246 .get_driver_name
= vmw_fence_get_driver_name
,
247 .get_timeline_name
= vmw_fence_get_timeline_name
,
248 .enable_signaling
= vmw_fence_enable_signaling
,
249 .wait
= vmw_fence_wait
,
250 .release
= vmw_fence_obj_destroy
,
255 * Execute signal actions on fences recently signaled.
256 * This is done from a workqueue so we don't have to execute
257 * signal actions from atomic context.
260 static void vmw_fence_work_func(struct work_struct
*work
)
262 struct vmw_fence_manager
*fman
=
263 container_of(work
, struct vmw_fence_manager
, work
);
264 struct list_head list
;
265 struct vmw_fence_action
*action
, *next_action
;
269 INIT_LIST_HEAD(&list
);
270 mutex_lock(&fman
->goal_irq_mutex
);
272 spin_lock(&fman
->lock
);
273 list_splice_init(&fman
->cleanup_list
, &list
);
274 seqno_valid
= fman
->seqno_valid
;
275 spin_unlock(&fman
->lock
);
277 if (!seqno_valid
&& fman
->goal_irq_on
) {
278 fman
->goal_irq_on
= false;
279 vmw_goal_waiter_remove(fman
->dev_priv
);
281 mutex_unlock(&fman
->goal_irq_mutex
);
283 if (list_empty(&list
))
287 * At this point, only we should be able to manipulate the
288 * list heads of the actions we have on the private list.
289 * hence fman::lock not held.
292 list_for_each_entry_safe(action
, next_action
, &list
, head
) {
293 list_del_init(&action
->head
);
295 action
->cleanup(action
);
300 struct vmw_fence_manager
*vmw_fence_manager_init(struct vmw_private
*dev_priv
)
302 struct vmw_fence_manager
*fman
= kzalloc(sizeof(*fman
), GFP_KERNEL
);
307 fman
->dev_priv
= dev_priv
;
308 spin_lock_init(&fman
->lock
);
309 INIT_LIST_HEAD(&fman
->fence_list
);
310 INIT_LIST_HEAD(&fman
->cleanup_list
);
311 INIT_WORK(&fman
->work
, &vmw_fence_work_func
);
312 fman
->fifo_down
= true;
313 fman
->user_fence_size
= ttm_round_pot(sizeof(struct vmw_user_fence
)) +
315 fman
->fence_size
= ttm_round_pot(sizeof(struct vmw_fence_obj
));
316 fman
->event_fence_action_size
=
317 ttm_round_pot(sizeof(struct vmw_event_fence_action
));
318 mutex_init(&fman
->goal_irq_mutex
);
319 fman
->ctx
= dma_fence_context_alloc(1);
324 void vmw_fence_manager_takedown(struct vmw_fence_manager
*fman
)
328 (void) cancel_work_sync(&fman
->work
);
330 spin_lock(&fman
->lock
);
331 lists_empty
= list_empty(&fman
->fence_list
) &&
332 list_empty(&fman
->cleanup_list
);
333 spin_unlock(&fman
->lock
);
335 BUG_ON(!lists_empty
);
339 static int vmw_fence_obj_init(struct vmw_fence_manager
*fman
,
340 struct vmw_fence_obj
*fence
, u32 seqno
,
341 void (*destroy
) (struct vmw_fence_obj
*fence
))
345 dma_fence_init(&fence
->base
, &vmw_fence_ops
, &fman
->lock
,
347 INIT_LIST_HEAD(&fence
->seq_passed_actions
);
348 fence
->destroy
= destroy
;
350 spin_lock(&fman
->lock
);
351 if (unlikely(fman
->fifo_down
)) {
355 list_add_tail(&fence
->head
, &fman
->fence_list
);
356 ++fman
->num_fence_objects
;
359 spin_unlock(&fman
->lock
);
364 static void vmw_fences_perform_actions(struct vmw_fence_manager
*fman
,
365 struct list_head
*list
)
367 struct vmw_fence_action
*action
, *next_action
;
369 list_for_each_entry_safe(action
, next_action
, list
, head
) {
370 list_del_init(&action
->head
);
371 fman
->pending_actions
[action
->type
]--;
372 if (action
->seq_passed
!= NULL
)
373 action
->seq_passed(action
);
376 * Add the cleanup action to the cleanup list so that
377 * it will be performed by a worker task.
380 list_add_tail(&action
->head
, &fman
->cleanup_list
);
385 * vmw_fence_goal_new_locked - Figure out a new device fence goal
388 * @fman: Pointer to a fence manager.
389 * @passed_seqno: The seqno the device currently signals as passed.
391 * This function should be called with the fence manager lock held.
392 * It is typically called when we have a new passed_seqno, and
393 * we might need to update the fence goal. It checks to see whether
394 * the current fence goal has already passed, and, in that case,
395 * scans through all unsignaled fences to get the next fence object with an
396 * action attached, and sets the seqno of that fence as a new fence goal.
398 * returns true if the device goal seqno was updated. False otherwise.
400 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager
*fman
,
405 struct vmw_fence_obj
*fence
;
407 if (likely(!fman
->seqno_valid
))
410 fifo_mem
= fman
->dev_priv
->mmio_virt
;
411 goal_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
412 if (likely(passed_seqno
- goal_seqno
>= VMW_FENCE_WRAP
))
415 fman
->seqno_valid
= false;
416 list_for_each_entry(fence
, &fman
->fence_list
, head
) {
417 if (!list_empty(&fence
->seq_passed_actions
)) {
418 fman
->seqno_valid
= true;
419 vmw_mmio_write(fence
->base
.seqno
,
420 fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
433 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
434 * considered as a device fence goal.
436 * This function should be called with the fence manager lock held.
437 * It is typically called when an action has been attached to a fence to
438 * check whether the seqno of that fence should be used for a fence
439 * goal interrupt. This is typically needed if the current fence goal is
440 * invalid, or has a higher seqno than that of the current fence object.
442 * returns true if the device goal seqno was updated. False otherwise.
444 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj
*fence
)
446 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
450 if (dma_fence_is_signaled_locked(&fence
->base
))
453 fifo_mem
= fman
->dev_priv
->mmio_virt
;
454 goal_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
455 if (likely(fman
->seqno_valid
&&
456 goal_seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
))
459 vmw_mmio_write(fence
->base
.seqno
, fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
460 fman
->seqno_valid
= true;
465 static void __vmw_fences_update(struct vmw_fence_manager
*fman
)
467 struct vmw_fence_obj
*fence
, *next_fence
;
468 struct list_head action_list
;
470 uint32_t seqno
, new_seqno
;
471 u32
*fifo_mem
= fman
->dev_priv
->mmio_virt
;
473 seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
475 list_for_each_entry_safe(fence
, next_fence
, &fman
->fence_list
, head
) {
476 if (seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
) {
477 list_del_init(&fence
->head
);
478 dma_fence_signal_locked(&fence
->base
);
479 INIT_LIST_HEAD(&action_list
);
480 list_splice_init(&fence
->seq_passed_actions
,
482 vmw_fences_perform_actions(fman
, &action_list
);
488 * Rerun if the fence goal seqno was updated, and the
489 * hardware might have raced with that update, so that
490 * we missed a fence_goal irq.
493 needs_rerun
= vmw_fence_goal_new_locked(fman
, seqno
);
494 if (unlikely(needs_rerun
)) {
495 new_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
496 if (new_seqno
!= seqno
) {
502 if (!list_empty(&fman
->cleanup_list
))
503 (void) schedule_work(&fman
->work
);
506 void vmw_fences_update(struct vmw_fence_manager
*fman
)
508 spin_lock(&fman
->lock
);
509 __vmw_fences_update(fman
);
510 spin_unlock(&fman
->lock
);
513 bool vmw_fence_obj_signaled(struct vmw_fence_obj
*fence
)
515 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
517 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
520 vmw_fences_update(fman
);
522 return dma_fence_is_signaled(&fence
->base
);
525 int vmw_fence_obj_wait(struct vmw_fence_obj
*fence
, bool lazy
,
526 bool interruptible
, unsigned long timeout
)
528 long ret
= dma_fence_wait_timeout(&fence
->base
, interruptible
, timeout
);
538 void vmw_fence_obj_flush(struct vmw_fence_obj
*fence
)
540 struct vmw_private
*dev_priv
= fman_from_fence(fence
)->dev_priv
;
542 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
545 static void vmw_fence_destroy(struct vmw_fence_obj
*fence
)
547 dma_fence_free(&fence
->base
);
550 int vmw_fence_create(struct vmw_fence_manager
*fman
,
552 struct vmw_fence_obj
**p_fence
)
554 struct vmw_fence_obj
*fence
;
557 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
558 if (unlikely(!fence
))
561 ret
= vmw_fence_obj_init(fman
, fence
, seqno
,
563 if (unlikely(ret
!= 0))
575 static void vmw_user_fence_destroy(struct vmw_fence_obj
*fence
)
577 struct vmw_user_fence
*ufence
=
578 container_of(fence
, struct vmw_user_fence
, fence
);
579 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
581 ttm_base_object_kfree(ufence
, base
);
583 * Free kernel space accounting.
585 ttm_mem_global_free(vmw_mem_glob(fman
->dev_priv
),
586 fman
->user_fence_size
);
589 static void vmw_user_fence_base_release(struct ttm_base_object
**p_base
)
591 struct ttm_base_object
*base
= *p_base
;
592 struct vmw_user_fence
*ufence
=
593 container_of(base
, struct vmw_user_fence
, base
);
594 struct vmw_fence_obj
*fence
= &ufence
->fence
;
597 vmw_fence_obj_unreference(&fence
);
600 int vmw_user_fence_create(struct drm_file
*file_priv
,
601 struct vmw_fence_manager
*fman
,
603 struct vmw_fence_obj
**p_fence
,
606 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
607 struct vmw_user_fence
*ufence
;
608 struct vmw_fence_obj
*tmp
;
609 struct ttm_mem_global
*mem_glob
= vmw_mem_glob(fman
->dev_priv
);
610 struct ttm_operation_ctx ctx
= {
611 .interruptible
= false,
617 * Kernel memory space accounting, since this object may
618 * be created by a user-space request.
621 ret
= ttm_mem_global_alloc(mem_glob
, fman
->user_fence_size
,
623 if (unlikely(ret
!= 0))
626 ufence
= kzalloc(sizeof(*ufence
), GFP_KERNEL
);
627 if (unlikely(!ufence
)) {
632 ret
= vmw_fence_obj_init(fman
, &ufence
->fence
, seqno
,
633 vmw_user_fence_destroy
);
634 if (unlikely(ret
!= 0)) {
640 * The base object holds a reference which is freed in
641 * vmw_user_fence_base_release.
643 tmp
= vmw_fence_obj_reference(&ufence
->fence
);
644 ret
= ttm_base_object_init(tfile
, &ufence
->base
, false,
646 &vmw_user_fence_base_release
, NULL
);
649 if (unlikely(ret
!= 0)) {
651 * Free the base object's reference
653 vmw_fence_obj_unreference(&tmp
);
657 *p_fence
= &ufence
->fence
;
658 *p_handle
= ufence
->base
.handle
;
662 tmp
= &ufence
->fence
;
663 vmw_fence_obj_unreference(&tmp
);
665 ttm_mem_global_free(mem_glob
, fman
->user_fence_size
);
671 * vmw_wait_dma_fence - Wait for a dma fence
673 * @fman: pointer to a fence manager
674 * @fence: DMA fence to wait on
676 * This function handles the case when the fence is actually a fence
677 * array. If that's the case, it'll wait on each of the child fence
679 int vmw_wait_dma_fence(struct vmw_fence_manager
*fman
,
680 struct dma_fence
*fence
)
682 struct dma_fence_array
*fence_array
;
687 if (dma_fence_is_signaled(fence
))
690 if (!dma_fence_is_array(fence
))
691 return dma_fence_wait(fence
, true);
693 /* From i915: Note that if the fence-array was created in
694 * signal-on-any mode, we should *not* decompose it into its individual
695 * fences. However, we don't currently store which mode the fence-array
696 * is operating in. Fortunately, the only user of signal-on-any is
697 * private to amdgpu and we should not see any incoming fence-array
698 * from sync-file being in signal-on-any mode.
701 fence_array
= to_dma_fence_array(fence
);
702 for (i
= 0; i
< fence_array
->num_fences
; i
++) {
703 struct dma_fence
*child
= fence_array
->fences
[i
];
705 ret
= dma_fence_wait(child
, true);
716 * vmw_fence_fifo_down - signal all unsignaled fence objects.
719 void vmw_fence_fifo_down(struct vmw_fence_manager
*fman
)
721 struct list_head action_list
;
725 * The list may be altered while we traverse it, so always
726 * restart when we've released the fman->lock.
729 spin_lock(&fman
->lock
);
730 fman
->fifo_down
= true;
731 while (!list_empty(&fman
->fence_list
)) {
732 struct vmw_fence_obj
*fence
=
733 list_entry(fman
->fence_list
.prev
, struct vmw_fence_obj
,
735 dma_fence_get(&fence
->base
);
736 spin_unlock(&fman
->lock
);
738 ret
= vmw_fence_obj_wait(fence
, false, false,
739 VMW_FENCE_WAIT_TIMEOUT
);
741 if (unlikely(ret
!= 0)) {
742 list_del_init(&fence
->head
);
743 dma_fence_signal(&fence
->base
);
744 INIT_LIST_HEAD(&action_list
);
745 list_splice_init(&fence
->seq_passed_actions
,
747 vmw_fences_perform_actions(fman
, &action_list
);
750 BUG_ON(!list_empty(&fence
->head
));
751 dma_fence_put(&fence
->base
);
752 spin_lock(&fman
->lock
);
754 spin_unlock(&fman
->lock
);
757 void vmw_fence_fifo_up(struct vmw_fence_manager
*fman
)
759 spin_lock(&fman
->lock
);
760 fman
->fifo_down
= false;
761 spin_unlock(&fman
->lock
);
766 * vmw_fence_obj_lookup - Look up a user-space fence object
768 * @tfile: A struct ttm_object_file identifying the caller.
769 * @handle: A handle identifying the fence object.
770 * @return: A struct vmw_user_fence base ttm object on success or
771 * an error pointer on failure.
773 * The fence object is looked up and type-checked. The caller needs
774 * to have opened the fence object first, but since that happens on
775 * creation and fence objects aren't shareable, that's not an
778 static struct ttm_base_object
*
779 vmw_fence_obj_lookup(struct ttm_object_file
*tfile
, u32 handle
)
781 struct ttm_base_object
*base
= ttm_base_object_lookup(tfile
, handle
);
784 pr_err("Invalid fence object handle 0x%08lx.\n",
785 (unsigned long)handle
);
786 return ERR_PTR(-EINVAL
);
789 if (base
->refcount_release
!= vmw_user_fence_base_release
) {
790 pr_err("Invalid fence object handle 0x%08lx.\n",
791 (unsigned long)handle
);
792 ttm_base_object_unref(&base
);
793 return ERR_PTR(-EINVAL
);
800 int vmw_fence_obj_wait_ioctl(struct drm_device
*dev
, void *data
,
801 struct drm_file
*file_priv
)
803 struct drm_vmw_fence_wait_arg
*arg
=
804 (struct drm_vmw_fence_wait_arg
*)data
;
805 unsigned long timeout
;
806 struct ttm_base_object
*base
;
807 struct vmw_fence_obj
*fence
;
808 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
810 uint64_t wait_timeout
= ((uint64_t)arg
->timeout_us
* HZ
);
813 * 64-bit division not present on 32-bit systems, so do an
814 * approximation. (Divide by 1000000).
817 wait_timeout
= (wait_timeout
>> 20) + (wait_timeout
>> 24) -
818 (wait_timeout
>> 26);
820 if (!arg
->cookie_valid
) {
821 arg
->cookie_valid
= 1;
822 arg
->kernel_cookie
= jiffies
+ wait_timeout
;
825 base
= vmw_fence_obj_lookup(tfile
, arg
->handle
);
827 return PTR_ERR(base
);
829 fence
= &(container_of(base
, struct vmw_user_fence
, base
)->fence
);
832 if (time_after_eq(timeout
, (unsigned long)arg
->kernel_cookie
)) {
833 ret
= ((vmw_fence_obj_signaled(fence
)) ?
838 timeout
= (unsigned long)arg
->kernel_cookie
- timeout
;
840 ret
= vmw_fence_obj_wait(fence
, arg
->lazy
, true, timeout
);
843 ttm_base_object_unref(&base
);
846 * Optionally unref the fence object.
849 if (ret
== 0 && (arg
->wait_options
& DRM_VMW_WAIT_OPTION_UNREF
))
850 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
855 int vmw_fence_obj_signaled_ioctl(struct drm_device
*dev
, void *data
,
856 struct drm_file
*file_priv
)
858 struct drm_vmw_fence_signaled_arg
*arg
=
859 (struct drm_vmw_fence_signaled_arg
*) data
;
860 struct ttm_base_object
*base
;
861 struct vmw_fence_obj
*fence
;
862 struct vmw_fence_manager
*fman
;
863 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
864 struct vmw_private
*dev_priv
= vmw_priv(dev
);
866 base
= vmw_fence_obj_lookup(tfile
, arg
->handle
);
868 return PTR_ERR(base
);
870 fence
= &(container_of(base
, struct vmw_user_fence
, base
)->fence
);
871 fman
= fman_from_fence(fence
);
873 arg
->signaled
= vmw_fence_obj_signaled(fence
);
875 arg
->signaled_flags
= arg
->flags
;
876 spin_lock(&fman
->lock
);
877 arg
->passed_seqno
= dev_priv
->last_read_seqno
;
878 spin_unlock(&fman
->lock
);
880 ttm_base_object_unref(&base
);
886 int vmw_fence_obj_unref_ioctl(struct drm_device
*dev
, void *data
,
887 struct drm_file
*file_priv
)
889 struct drm_vmw_fence_arg
*arg
=
890 (struct drm_vmw_fence_arg
*) data
;
892 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
898 * vmw_event_fence_action_seq_passed
900 * @action: The struct vmw_fence_action embedded in a struct
901 * vmw_event_fence_action.
903 * This function is called when the seqno of the fence where @action is
904 * attached has passed. It queues the event on the submitter's event list.
905 * This function is always called from atomic context.
907 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action
*action
)
909 struct vmw_event_fence_action
*eaction
=
910 container_of(action
, struct vmw_event_fence_action
, action
);
911 struct drm_device
*dev
= eaction
->dev
;
912 struct drm_pending_event
*event
= eaction
->event
;
914 if (unlikely(event
== NULL
))
917 spin_lock_irq(&dev
->event_lock
);
919 if (likely(eaction
->tv_sec
!= NULL
)) {
920 struct timespec64 ts
;
923 /* monotonic time, so no y2038 overflow */
924 *eaction
->tv_sec
= ts
.tv_sec
;
925 *eaction
->tv_usec
= ts
.tv_nsec
/ NSEC_PER_USEC
;
928 drm_send_event_locked(dev
, eaction
->event
);
929 eaction
->event
= NULL
;
930 spin_unlock_irq(&dev
->event_lock
);
934 * vmw_event_fence_action_cleanup
936 * @action: The struct vmw_fence_action embedded in a struct
937 * vmw_event_fence_action.
939 * This function is the struct vmw_fence_action destructor. It's typically
940 * called from a workqueue.
942 static void vmw_event_fence_action_cleanup(struct vmw_fence_action
*action
)
944 struct vmw_event_fence_action
*eaction
=
945 container_of(action
, struct vmw_event_fence_action
, action
);
947 vmw_fence_obj_unreference(&eaction
->fence
);
953 * vmw_fence_obj_add_action - Add an action to a fence object.
955 * @fence - The fence object.
956 * @action - The action to add.
958 * Note that the action callbacks may be executed before this function
961 static void vmw_fence_obj_add_action(struct vmw_fence_obj
*fence
,
962 struct vmw_fence_action
*action
)
964 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
965 bool run_update
= false;
967 mutex_lock(&fman
->goal_irq_mutex
);
968 spin_lock(&fman
->lock
);
970 fman
->pending_actions
[action
->type
]++;
971 if (dma_fence_is_signaled_locked(&fence
->base
)) {
972 struct list_head action_list
;
974 INIT_LIST_HEAD(&action_list
);
975 list_add_tail(&action
->head
, &action_list
);
976 vmw_fences_perform_actions(fman
, &action_list
);
978 list_add_tail(&action
->head
, &fence
->seq_passed_actions
);
981 * This function may set fman::seqno_valid, so it must
982 * be run with the goal_irq_mutex held.
984 run_update
= vmw_fence_goal_check_locked(fence
);
987 spin_unlock(&fman
->lock
);
990 if (!fman
->goal_irq_on
) {
991 fman
->goal_irq_on
= true;
992 vmw_goal_waiter_add(fman
->dev_priv
);
994 vmw_fences_update(fman
);
996 mutex_unlock(&fman
->goal_irq_mutex
);
1001 * vmw_event_fence_action_create - Post an event for sending when a fence
1002 * object seqno has passed.
1004 * @file_priv: The file connection on which the event should be posted.
1005 * @fence: The fence object on which to post the event.
1006 * @event: Event to be posted. This event should've been alloced
1007 * using k[mz]alloc, and should've been completely initialized.
1008 * @interruptible: Interruptible waits if possible.
1010 * As a side effect, the object pointed to by @event may have been
1011 * freed when this function returns. If this function returns with
1012 * an error code, the caller needs to free that object.
1015 int vmw_event_fence_action_queue(struct drm_file
*file_priv
,
1016 struct vmw_fence_obj
*fence
,
1017 struct drm_pending_event
*event
,
1022 struct vmw_event_fence_action
*eaction
;
1023 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
1025 eaction
= kzalloc(sizeof(*eaction
), GFP_KERNEL
);
1026 if (unlikely(!eaction
))
1029 eaction
->event
= event
;
1031 eaction
->action
.seq_passed
= vmw_event_fence_action_seq_passed
;
1032 eaction
->action
.cleanup
= vmw_event_fence_action_cleanup
;
1033 eaction
->action
.type
= VMW_ACTION_EVENT
;
1035 eaction
->fence
= vmw_fence_obj_reference(fence
);
1036 eaction
->dev
= fman
->dev_priv
->dev
;
1037 eaction
->tv_sec
= tv_sec
;
1038 eaction
->tv_usec
= tv_usec
;
1040 vmw_fence_obj_add_action(fence
, &eaction
->action
);
1045 struct vmw_event_fence_pending
{
1046 struct drm_pending_event base
;
1047 struct drm_vmw_event_fence event
;
1050 static int vmw_event_fence_action_create(struct drm_file
*file_priv
,
1051 struct vmw_fence_obj
*fence
,
1056 struct vmw_event_fence_pending
*event
;
1057 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
1058 struct drm_device
*dev
= fman
->dev_priv
->dev
;
1061 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1062 if (unlikely(!event
)) {
1063 DRM_ERROR("Failed to allocate an event.\n");
1068 event
->event
.base
.type
= DRM_VMW_EVENT_FENCE_SIGNALED
;
1069 event
->event
.base
.length
= sizeof(*event
);
1070 event
->event
.user_data
= user_data
;
1072 ret
= drm_event_reserve_init(dev
, file_priv
, &event
->base
, &event
->event
.base
);
1074 if (unlikely(ret
!= 0)) {
1075 DRM_ERROR("Failed to allocate event space for this file.\n");
1080 if (flags
& DRM_VMW_FE_FLAG_REQ_TIME
)
1081 ret
= vmw_event_fence_action_queue(file_priv
, fence
,
1083 &event
->event
.tv_sec
,
1084 &event
->event
.tv_usec
,
1087 ret
= vmw_event_fence_action_queue(file_priv
, fence
,
1098 drm_event_cancel_free(dev
, &event
->base
);
1103 int vmw_fence_event_ioctl(struct drm_device
*dev
, void *data
,
1104 struct drm_file
*file_priv
)
1106 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1107 struct drm_vmw_fence_event_arg
*arg
=
1108 (struct drm_vmw_fence_event_arg
*) data
;
1109 struct vmw_fence_obj
*fence
= NULL
;
1110 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1111 struct ttm_object_file
*tfile
= vmw_fp
->tfile
;
1112 struct drm_vmw_fence_rep __user
*user_fence_rep
=
1113 (struct drm_vmw_fence_rep __user
*)(unsigned long)
1119 * Look up an existing fence object,
1120 * and if user-space wants a new reference,
1124 struct ttm_base_object
*base
=
1125 vmw_fence_obj_lookup(tfile
, arg
->handle
);
1128 return PTR_ERR(base
);
1130 fence
= &(container_of(base
, struct vmw_user_fence
,
1132 (void) vmw_fence_obj_reference(fence
);
1134 if (user_fence_rep
!= NULL
) {
1135 ret
= ttm_ref_object_add(vmw_fp
->tfile
, base
,
1136 TTM_REF_USAGE
, NULL
, false);
1137 if (unlikely(ret
!= 0)) {
1138 DRM_ERROR("Failed to reference a fence "
1140 goto out_no_ref_obj
;
1142 handle
= base
->handle
;
1144 ttm_base_object_unref(&base
);
1148 * Create a new fence object.
1151 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
1155 if (unlikely(ret
!= 0)) {
1156 DRM_ERROR("Fence event failed to create fence.\n");
1161 BUG_ON(fence
== NULL
);
1163 ret
= vmw_event_fence_action_create(file_priv
, fence
,
1167 if (unlikely(ret
!= 0)) {
1168 if (ret
!= -ERESTARTSYS
)
1169 DRM_ERROR("Failed to attach event to fence.\n");
1173 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fp
, 0, user_fence_rep
, fence
,
1175 vmw_fence_obj_unreference(&fence
);
1178 if (user_fence_rep
!= NULL
)
1179 ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
1181 vmw_fence_obj_unreference(&fence
);