1 /**************************************************************************
3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
31 #define VMW_FENCE_WRAP (1 << 31)
33 struct vmw_fence_manager
{
34 int num_fence_objects
;
35 struct vmw_private
*dev_priv
;
37 struct list_head fence_list
;
38 struct work_struct work
;
41 u32 event_fence_action_size
;
43 struct list_head cleanup_list
;
44 uint32_t pending_actions
[VMW_ACTION_MAX
];
45 struct mutex goal_irq_mutex
;
46 bool goal_irq_on
; /* Protected by @goal_irq_mutex */
47 bool seqno_valid
; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */
52 struct vmw_user_fence
{
53 struct ttm_base_object base
;
54 struct vmw_fence_obj fence
;
58 * struct vmw_event_fence_action - fence action that delivers a drm event.
60 * @e: A struct drm_pending_event that controls the event delivery.
61 * @action: A struct vmw_fence_action to hook up to a fence.
62 * @fence: A referenced pointer to the fence to keep it alive while @action
64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
65 * @kref: Both @e and @action has destructors, so we need to refcount.
66 * @size: Size accounted for this object.
67 * @tv_sec: If non-null, the variable pointed to will be assigned
68 * current time tv_sec val when the fence signals.
69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
70 * be assigned the current time tv_usec val when the fence signals.
72 struct vmw_event_fence_action
{
73 struct vmw_fence_action action
;
75 struct drm_pending_event
*event
;
76 struct vmw_fence_obj
*fence
;
77 struct drm_device
*dev
;
83 static struct vmw_fence_manager
*
84 fman_from_fence(struct vmw_fence_obj
*fence
)
86 return container_of(fence
->base
.lock
, struct vmw_fence_manager
, lock
);
90 * Note on fencing subsystem usage of irqs:
91 * Typically the vmw_fences_update function is called
93 * a) When a new fence seqno has been submitted by the fifo code.
94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
96 * irq is received. When the last fence waiter is gone, that IRQ is masked
99 * In situations where there are no waiters and we don't submit any new fences,
100 * fence objects may not be signaled. This is perfectly OK, since there are
101 * no consumers of the signaled data, but that is NOT ok when there are fence
102 * actions attached to a fence. The fencing subsystem then makes use of the
103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
104 * which has an action attached, and each time vmw_fences_update is called,
105 * the subsystem makes sure the fence goal seqno is updated.
107 * The fence goal seqno irq is on as long as there are unsignaled fence
108 * objects with actions attached to them.
111 static void vmw_fence_obj_destroy(struct dma_fence
*f
)
113 struct vmw_fence_obj
*fence
=
114 container_of(f
, struct vmw_fence_obj
, base
);
116 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
118 spin_lock(&fman
->lock
);
119 list_del_init(&fence
->head
);
120 --fman
->num_fence_objects
;
121 spin_unlock(&fman
->lock
);
122 fence
->destroy(fence
);
125 static const char *vmw_fence_get_driver_name(struct dma_fence
*f
)
130 static const char *vmw_fence_get_timeline_name(struct dma_fence
*f
)
135 static bool vmw_fence_enable_signaling(struct dma_fence
*f
)
137 struct vmw_fence_obj
*fence
=
138 container_of(f
, struct vmw_fence_obj
, base
);
140 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
141 struct vmw_private
*dev_priv
= fman
->dev_priv
;
143 u32
*fifo_mem
= dev_priv
->mmio_virt
;
144 u32 seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
145 if (seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
)
148 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
153 struct vmwgfx_wait_cb
{
154 struct dma_fence_cb base
;
155 struct task_struct
*task
;
159 vmwgfx_wait_cb(struct dma_fence
*fence
, struct dma_fence_cb
*cb
)
161 struct vmwgfx_wait_cb
*wait
=
162 container_of(cb
, struct vmwgfx_wait_cb
, base
);
164 wake_up_process(wait
->task
);
167 static void __vmw_fences_update(struct vmw_fence_manager
*fman
);
169 static long vmw_fence_wait(struct dma_fence
*f
, bool intr
, signed long timeout
)
171 struct vmw_fence_obj
*fence
=
172 container_of(f
, struct vmw_fence_obj
, base
);
174 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
175 struct vmw_private
*dev_priv
= fman
->dev_priv
;
176 struct vmwgfx_wait_cb cb
;
178 unsigned long irq_flags
;
180 if (likely(vmw_fence_obj_signaled(fence
)))
183 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
184 vmw_seqno_waiter_add(dev_priv
);
186 spin_lock_irqsave(f
->lock
, irq_flags
);
188 if (intr
&& signal_pending(current
)) {
193 cb
.base
.func
= vmwgfx_wait_cb
;
195 list_add(&cb
.base
.node
, &f
->cb_list
);
198 __vmw_fences_update(fman
);
199 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &f
->flags
))
203 __set_current_state(TASK_INTERRUPTIBLE
);
205 __set_current_state(TASK_UNINTERRUPTIBLE
);
206 spin_unlock_irqrestore(f
->lock
, irq_flags
);
208 ret
= schedule_timeout(ret
);
210 spin_lock_irqsave(f
->lock
, irq_flags
);
211 if (ret
> 0 && intr
&& signal_pending(current
))
215 if (!list_empty(&cb
.base
.node
))
216 list_del(&cb
.base
.node
);
217 __set_current_state(TASK_RUNNING
);
220 spin_unlock_irqrestore(f
->lock
, irq_flags
);
222 vmw_seqno_waiter_remove(dev_priv
);
227 static const struct dma_fence_ops vmw_fence_ops
= {
228 .get_driver_name
= vmw_fence_get_driver_name
,
229 .get_timeline_name
= vmw_fence_get_timeline_name
,
230 .enable_signaling
= vmw_fence_enable_signaling
,
231 .wait
= vmw_fence_wait
,
232 .release
= vmw_fence_obj_destroy
,
237 * Execute signal actions on fences recently signaled.
238 * This is done from a workqueue so we don't have to execute
239 * signal actions from atomic context.
242 static void vmw_fence_work_func(struct work_struct
*work
)
244 struct vmw_fence_manager
*fman
=
245 container_of(work
, struct vmw_fence_manager
, work
);
246 struct list_head list
;
247 struct vmw_fence_action
*action
, *next_action
;
251 INIT_LIST_HEAD(&list
);
252 mutex_lock(&fman
->goal_irq_mutex
);
254 spin_lock(&fman
->lock
);
255 list_splice_init(&fman
->cleanup_list
, &list
);
256 seqno_valid
= fman
->seqno_valid
;
257 spin_unlock(&fman
->lock
);
259 if (!seqno_valid
&& fman
->goal_irq_on
) {
260 fman
->goal_irq_on
= false;
261 vmw_goal_waiter_remove(fman
->dev_priv
);
263 mutex_unlock(&fman
->goal_irq_mutex
);
265 if (list_empty(&list
))
269 * At this point, only we should be able to manipulate the
270 * list heads of the actions we have on the private list.
271 * hence fman::lock not held.
274 list_for_each_entry_safe(action
, next_action
, &list
, head
) {
275 list_del_init(&action
->head
);
277 action
->cleanup(action
);
282 struct vmw_fence_manager
*vmw_fence_manager_init(struct vmw_private
*dev_priv
)
284 struct vmw_fence_manager
*fman
= kzalloc(sizeof(*fman
), GFP_KERNEL
);
289 fman
->dev_priv
= dev_priv
;
290 spin_lock_init(&fman
->lock
);
291 INIT_LIST_HEAD(&fman
->fence_list
);
292 INIT_LIST_HEAD(&fman
->cleanup_list
);
293 INIT_WORK(&fman
->work
, &vmw_fence_work_func
);
294 fman
->fifo_down
= true;
295 fman
->user_fence_size
= ttm_round_pot(sizeof(struct vmw_user_fence
));
296 fman
->fence_size
= ttm_round_pot(sizeof(struct vmw_fence_obj
));
297 fman
->event_fence_action_size
=
298 ttm_round_pot(sizeof(struct vmw_event_fence_action
));
299 mutex_init(&fman
->goal_irq_mutex
);
300 fman
->ctx
= dma_fence_context_alloc(1);
305 void vmw_fence_manager_takedown(struct vmw_fence_manager
*fman
)
309 (void) cancel_work_sync(&fman
->work
);
311 spin_lock(&fman
->lock
);
312 lists_empty
= list_empty(&fman
->fence_list
) &&
313 list_empty(&fman
->cleanup_list
);
314 spin_unlock(&fman
->lock
);
316 BUG_ON(!lists_empty
);
320 static int vmw_fence_obj_init(struct vmw_fence_manager
*fman
,
321 struct vmw_fence_obj
*fence
, u32 seqno
,
322 void (*destroy
) (struct vmw_fence_obj
*fence
))
326 dma_fence_init(&fence
->base
, &vmw_fence_ops
, &fman
->lock
,
328 INIT_LIST_HEAD(&fence
->seq_passed_actions
);
329 fence
->destroy
= destroy
;
331 spin_lock(&fman
->lock
);
332 if (unlikely(fman
->fifo_down
)) {
336 list_add_tail(&fence
->head
, &fman
->fence_list
);
337 ++fman
->num_fence_objects
;
340 spin_unlock(&fman
->lock
);
345 static void vmw_fences_perform_actions(struct vmw_fence_manager
*fman
,
346 struct list_head
*list
)
348 struct vmw_fence_action
*action
, *next_action
;
350 list_for_each_entry_safe(action
, next_action
, list
, head
) {
351 list_del_init(&action
->head
);
352 fman
->pending_actions
[action
->type
]--;
353 if (action
->seq_passed
!= NULL
)
354 action
->seq_passed(action
);
357 * Add the cleanup action to the cleanup list so that
358 * it will be performed by a worker task.
361 list_add_tail(&action
->head
, &fman
->cleanup_list
);
366 * vmw_fence_goal_new_locked - Figure out a new device fence goal
369 * @fman: Pointer to a fence manager.
370 * @passed_seqno: The seqno the device currently signals as passed.
372 * This function should be called with the fence manager lock held.
373 * It is typically called when we have a new passed_seqno, and
374 * we might need to update the fence goal. It checks to see whether
375 * the current fence goal has already passed, and, in that case,
376 * scans through all unsignaled fences to get the next fence object with an
377 * action attached, and sets the seqno of that fence as a new fence goal.
379 * returns true if the device goal seqno was updated. False otherwise.
381 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager
*fman
,
386 struct vmw_fence_obj
*fence
;
388 if (likely(!fman
->seqno_valid
))
391 fifo_mem
= fman
->dev_priv
->mmio_virt
;
392 goal_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
393 if (likely(passed_seqno
- goal_seqno
>= VMW_FENCE_WRAP
))
396 fman
->seqno_valid
= false;
397 list_for_each_entry(fence
, &fman
->fence_list
, head
) {
398 if (!list_empty(&fence
->seq_passed_actions
)) {
399 fman
->seqno_valid
= true;
400 vmw_mmio_write(fence
->base
.seqno
,
401 fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
411 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
414 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
415 * considered as a device fence goal.
417 * This function should be called with the fence manager lock held.
418 * It is typically called when an action has been attached to a fence to
419 * check whether the seqno of that fence should be used for a fence
420 * goal interrupt. This is typically needed if the current fence goal is
421 * invalid, or has a higher seqno than that of the current fence object.
423 * returns true if the device goal seqno was updated. False otherwise.
425 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj
*fence
)
427 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
431 if (dma_fence_is_signaled_locked(&fence
->base
))
434 fifo_mem
= fman
->dev_priv
->mmio_virt
;
435 goal_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
436 if (likely(fman
->seqno_valid
&&
437 goal_seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
))
440 vmw_mmio_write(fence
->base
.seqno
, fifo_mem
+ SVGA_FIFO_FENCE_GOAL
);
441 fman
->seqno_valid
= true;
446 static void __vmw_fences_update(struct vmw_fence_manager
*fman
)
448 struct vmw_fence_obj
*fence
, *next_fence
;
449 struct list_head action_list
;
451 uint32_t seqno
, new_seqno
;
452 u32
*fifo_mem
= fman
->dev_priv
->mmio_virt
;
454 seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
456 list_for_each_entry_safe(fence
, next_fence
, &fman
->fence_list
, head
) {
457 if (seqno
- fence
->base
.seqno
< VMW_FENCE_WRAP
) {
458 list_del_init(&fence
->head
);
459 dma_fence_signal_locked(&fence
->base
);
460 INIT_LIST_HEAD(&action_list
);
461 list_splice_init(&fence
->seq_passed_actions
,
463 vmw_fences_perform_actions(fman
, &action_list
);
469 * Rerun if the fence goal seqno was updated, and the
470 * hardware might have raced with that update, so that
471 * we missed a fence_goal irq.
474 needs_rerun
= vmw_fence_goal_new_locked(fman
, seqno
);
475 if (unlikely(needs_rerun
)) {
476 new_seqno
= vmw_mmio_read(fifo_mem
+ SVGA_FIFO_FENCE
);
477 if (new_seqno
!= seqno
) {
483 if (!list_empty(&fman
->cleanup_list
))
484 (void) schedule_work(&fman
->work
);
487 void vmw_fences_update(struct vmw_fence_manager
*fman
)
489 spin_lock(&fman
->lock
);
490 __vmw_fences_update(fman
);
491 spin_unlock(&fman
->lock
);
494 bool vmw_fence_obj_signaled(struct vmw_fence_obj
*fence
)
496 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
498 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT
, &fence
->base
.flags
))
501 vmw_fences_update(fman
);
503 return dma_fence_is_signaled(&fence
->base
);
506 int vmw_fence_obj_wait(struct vmw_fence_obj
*fence
, bool lazy
,
507 bool interruptible
, unsigned long timeout
)
509 long ret
= dma_fence_wait_timeout(&fence
->base
, interruptible
, timeout
);
519 void vmw_fence_obj_flush(struct vmw_fence_obj
*fence
)
521 struct vmw_private
*dev_priv
= fman_from_fence(fence
)->dev_priv
;
523 vmw_fifo_ping_host(dev_priv
, SVGA_SYNC_GENERIC
);
526 static void vmw_fence_destroy(struct vmw_fence_obj
*fence
)
528 dma_fence_free(&fence
->base
);
531 int vmw_fence_create(struct vmw_fence_manager
*fman
,
533 struct vmw_fence_obj
**p_fence
)
535 struct vmw_fence_obj
*fence
;
538 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
539 if (unlikely(!fence
))
542 ret
= vmw_fence_obj_init(fman
, fence
, seqno
,
544 if (unlikely(ret
!= 0))
556 static void vmw_user_fence_destroy(struct vmw_fence_obj
*fence
)
558 struct vmw_user_fence
*ufence
=
559 container_of(fence
, struct vmw_user_fence
, fence
);
560 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
562 ttm_base_object_kfree(ufence
, base
);
564 * Free kernel space accounting.
566 ttm_mem_global_free(vmw_mem_glob(fman
->dev_priv
),
567 fman
->user_fence_size
);
570 static void vmw_user_fence_base_release(struct ttm_base_object
**p_base
)
572 struct ttm_base_object
*base
= *p_base
;
573 struct vmw_user_fence
*ufence
=
574 container_of(base
, struct vmw_user_fence
, base
);
575 struct vmw_fence_obj
*fence
= &ufence
->fence
;
578 vmw_fence_obj_unreference(&fence
);
581 int vmw_user_fence_create(struct drm_file
*file_priv
,
582 struct vmw_fence_manager
*fman
,
584 struct vmw_fence_obj
**p_fence
,
587 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
588 struct vmw_user_fence
*ufence
;
589 struct vmw_fence_obj
*tmp
;
590 struct ttm_mem_global
*mem_glob
= vmw_mem_glob(fman
->dev_priv
);
591 struct ttm_operation_ctx ctx
= {
592 .interruptible
= false,
598 * Kernel memory space accounting, since this object may
599 * be created by a user-space request.
602 ret
= ttm_mem_global_alloc(mem_glob
, fman
->user_fence_size
,
604 if (unlikely(ret
!= 0))
607 ufence
= kzalloc(sizeof(*ufence
), GFP_KERNEL
);
608 if (unlikely(!ufence
)) {
613 ret
= vmw_fence_obj_init(fman
, &ufence
->fence
, seqno
,
614 vmw_user_fence_destroy
);
615 if (unlikely(ret
!= 0)) {
621 * The base object holds a reference which is freed in
622 * vmw_user_fence_base_release.
624 tmp
= vmw_fence_obj_reference(&ufence
->fence
);
625 ret
= ttm_base_object_init(tfile
, &ufence
->base
, false,
627 &vmw_user_fence_base_release
, NULL
);
630 if (unlikely(ret
!= 0)) {
632 * Free the base object's reference
634 vmw_fence_obj_unreference(&tmp
);
638 *p_fence
= &ufence
->fence
;
639 *p_handle
= ufence
->base
.hash
.key
;
643 tmp
= &ufence
->fence
;
644 vmw_fence_obj_unreference(&tmp
);
646 ttm_mem_global_free(mem_glob
, fman
->user_fence_size
);
652 * vmw_wait_dma_fence - Wait for a dma fence
654 * @fman: pointer to a fence manager
655 * @fence: DMA fence to wait on
657 * This function handles the case when the fence is actually a fence
658 * array. If that's the case, it'll wait on each of the child fence
660 int vmw_wait_dma_fence(struct vmw_fence_manager
*fman
,
661 struct dma_fence
*fence
)
663 struct dma_fence_array
*fence_array
;
668 if (dma_fence_is_signaled(fence
))
671 if (!dma_fence_is_array(fence
))
672 return dma_fence_wait(fence
, true);
674 /* From i915: Note that if the fence-array was created in
675 * signal-on-any mode, we should *not* decompose it into its individual
676 * fences. However, we don't currently store which mode the fence-array
677 * is operating in. Fortunately, the only user of signal-on-any is
678 * private to amdgpu and we should not see any incoming fence-array
679 * from sync-file being in signal-on-any mode.
682 fence_array
= to_dma_fence_array(fence
);
683 for (i
= 0; i
< fence_array
->num_fences
; i
++) {
684 struct dma_fence
*child
= fence_array
->fences
[i
];
686 ret
= dma_fence_wait(child
, true);
697 * vmw_fence_fifo_down - signal all unsignaled fence objects.
700 void vmw_fence_fifo_down(struct vmw_fence_manager
*fman
)
702 struct list_head action_list
;
706 * The list may be altered while we traverse it, so always
707 * restart when we've released the fman->lock.
710 spin_lock(&fman
->lock
);
711 fman
->fifo_down
= true;
712 while (!list_empty(&fman
->fence_list
)) {
713 struct vmw_fence_obj
*fence
=
714 list_entry(fman
->fence_list
.prev
, struct vmw_fence_obj
,
716 dma_fence_get(&fence
->base
);
717 spin_unlock(&fman
->lock
);
719 ret
= vmw_fence_obj_wait(fence
, false, false,
720 VMW_FENCE_WAIT_TIMEOUT
);
722 if (unlikely(ret
!= 0)) {
723 list_del_init(&fence
->head
);
724 dma_fence_signal(&fence
->base
);
725 INIT_LIST_HEAD(&action_list
);
726 list_splice_init(&fence
->seq_passed_actions
,
728 vmw_fences_perform_actions(fman
, &action_list
);
731 BUG_ON(!list_empty(&fence
->head
));
732 dma_fence_put(&fence
->base
);
733 spin_lock(&fman
->lock
);
735 spin_unlock(&fman
->lock
);
738 void vmw_fence_fifo_up(struct vmw_fence_manager
*fman
)
740 spin_lock(&fman
->lock
);
741 fman
->fifo_down
= false;
742 spin_unlock(&fman
->lock
);
747 * vmw_fence_obj_lookup - Look up a user-space fence object
749 * @tfile: A struct ttm_object_file identifying the caller.
750 * @handle: A handle identifying the fence object.
751 * @return: A struct vmw_user_fence base ttm object on success or
752 * an error pointer on failure.
754 * The fence object is looked up and type-checked. The caller needs
755 * to have opened the fence object first, but since that happens on
756 * creation and fence objects aren't shareable, that's not an
759 static struct ttm_base_object
*
760 vmw_fence_obj_lookup(struct ttm_object_file
*tfile
, u32 handle
)
762 struct ttm_base_object
*base
= ttm_base_object_lookup(tfile
, handle
);
765 pr_err("Invalid fence object handle 0x%08lx.\n",
766 (unsigned long)handle
);
767 return ERR_PTR(-EINVAL
);
770 if (base
->refcount_release
!= vmw_user_fence_base_release
) {
771 pr_err("Invalid fence object handle 0x%08lx.\n",
772 (unsigned long)handle
);
773 ttm_base_object_unref(&base
);
774 return ERR_PTR(-EINVAL
);
781 int vmw_fence_obj_wait_ioctl(struct drm_device
*dev
, void *data
,
782 struct drm_file
*file_priv
)
784 struct drm_vmw_fence_wait_arg
*arg
=
785 (struct drm_vmw_fence_wait_arg
*)data
;
786 unsigned long timeout
;
787 struct ttm_base_object
*base
;
788 struct vmw_fence_obj
*fence
;
789 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
791 uint64_t wait_timeout
= ((uint64_t)arg
->timeout_us
* HZ
);
794 * 64-bit division not present on 32-bit systems, so do an
795 * approximation. (Divide by 1000000).
798 wait_timeout
= (wait_timeout
>> 20) + (wait_timeout
>> 24) -
799 (wait_timeout
>> 26);
801 if (!arg
->cookie_valid
) {
802 arg
->cookie_valid
= 1;
803 arg
->kernel_cookie
= jiffies
+ wait_timeout
;
806 base
= vmw_fence_obj_lookup(tfile
, arg
->handle
);
808 return PTR_ERR(base
);
810 fence
= &(container_of(base
, struct vmw_user_fence
, base
)->fence
);
813 if (time_after_eq(timeout
, (unsigned long)arg
->kernel_cookie
)) {
814 ret
= ((vmw_fence_obj_signaled(fence
)) ?
819 timeout
= (unsigned long)arg
->kernel_cookie
- timeout
;
821 ret
= vmw_fence_obj_wait(fence
, arg
->lazy
, true, timeout
);
824 ttm_base_object_unref(&base
);
827 * Optionally unref the fence object.
830 if (ret
== 0 && (arg
->wait_options
& DRM_VMW_WAIT_OPTION_UNREF
))
831 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
836 int vmw_fence_obj_signaled_ioctl(struct drm_device
*dev
, void *data
,
837 struct drm_file
*file_priv
)
839 struct drm_vmw_fence_signaled_arg
*arg
=
840 (struct drm_vmw_fence_signaled_arg
*) data
;
841 struct ttm_base_object
*base
;
842 struct vmw_fence_obj
*fence
;
843 struct vmw_fence_manager
*fman
;
844 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
845 struct vmw_private
*dev_priv
= vmw_priv(dev
);
847 base
= vmw_fence_obj_lookup(tfile
, arg
->handle
);
849 return PTR_ERR(base
);
851 fence
= &(container_of(base
, struct vmw_user_fence
, base
)->fence
);
852 fman
= fman_from_fence(fence
);
854 arg
->signaled
= vmw_fence_obj_signaled(fence
);
856 arg
->signaled_flags
= arg
->flags
;
857 spin_lock(&fman
->lock
);
858 arg
->passed_seqno
= dev_priv
->last_read_seqno
;
859 spin_unlock(&fman
->lock
);
861 ttm_base_object_unref(&base
);
867 int vmw_fence_obj_unref_ioctl(struct drm_device
*dev
, void *data
,
868 struct drm_file
*file_priv
)
870 struct drm_vmw_fence_arg
*arg
=
871 (struct drm_vmw_fence_arg
*) data
;
873 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
879 * vmw_event_fence_action_seq_passed
881 * @action: The struct vmw_fence_action embedded in a struct
882 * vmw_event_fence_action.
884 * This function is called when the seqno of the fence where @action is
885 * attached has passed. It queues the event on the submitter's event list.
886 * This function is always called from atomic context.
888 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action
*action
)
890 struct vmw_event_fence_action
*eaction
=
891 container_of(action
, struct vmw_event_fence_action
, action
);
892 struct drm_device
*dev
= eaction
->dev
;
893 struct drm_pending_event
*event
= eaction
->event
;
894 struct drm_file
*file_priv
;
897 if (unlikely(event
== NULL
))
900 file_priv
= event
->file_priv
;
901 spin_lock_irq(&dev
->event_lock
);
903 if (likely(eaction
->tv_sec
!= NULL
)) {
906 do_gettimeofday(&tv
);
907 *eaction
->tv_sec
= tv
.tv_sec
;
908 *eaction
->tv_usec
= tv
.tv_usec
;
911 drm_send_event_locked(dev
, eaction
->event
);
912 eaction
->event
= NULL
;
913 spin_unlock_irq(&dev
->event_lock
);
917 * vmw_event_fence_action_cleanup
919 * @action: The struct vmw_fence_action embedded in a struct
920 * vmw_event_fence_action.
922 * This function is the struct vmw_fence_action destructor. It's typically
923 * called from a workqueue.
925 static void vmw_event_fence_action_cleanup(struct vmw_fence_action
*action
)
927 struct vmw_event_fence_action
*eaction
=
928 container_of(action
, struct vmw_event_fence_action
, action
);
930 vmw_fence_obj_unreference(&eaction
->fence
);
936 * vmw_fence_obj_add_action - Add an action to a fence object.
938 * @fence - The fence object.
939 * @action - The action to add.
941 * Note that the action callbacks may be executed before this function
944 static void vmw_fence_obj_add_action(struct vmw_fence_obj
*fence
,
945 struct vmw_fence_action
*action
)
947 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
948 bool run_update
= false;
950 mutex_lock(&fman
->goal_irq_mutex
);
951 spin_lock(&fman
->lock
);
953 fman
->pending_actions
[action
->type
]++;
954 if (dma_fence_is_signaled_locked(&fence
->base
)) {
955 struct list_head action_list
;
957 INIT_LIST_HEAD(&action_list
);
958 list_add_tail(&action
->head
, &action_list
);
959 vmw_fences_perform_actions(fman
, &action_list
);
961 list_add_tail(&action
->head
, &fence
->seq_passed_actions
);
964 * This function may set fman::seqno_valid, so it must
965 * be run with the goal_irq_mutex held.
967 run_update
= vmw_fence_goal_check_locked(fence
);
970 spin_unlock(&fman
->lock
);
973 if (!fman
->goal_irq_on
) {
974 fman
->goal_irq_on
= true;
975 vmw_goal_waiter_add(fman
->dev_priv
);
977 vmw_fences_update(fman
);
979 mutex_unlock(&fman
->goal_irq_mutex
);
984 * vmw_event_fence_action_create - Post an event for sending when a fence
985 * object seqno has passed.
987 * @file_priv: The file connection on which the event should be posted.
988 * @fence: The fence object on which to post the event.
989 * @event: Event to be posted. This event should've been alloced
990 * using k[mz]alloc, and should've been completely initialized.
991 * @interruptible: Interruptible waits if possible.
993 * As a side effect, the object pointed to by @event may have been
994 * freed when this function returns. If this function returns with
995 * an error code, the caller needs to free that object.
998 int vmw_event_fence_action_queue(struct drm_file
*file_priv
,
999 struct vmw_fence_obj
*fence
,
1000 struct drm_pending_event
*event
,
1005 struct vmw_event_fence_action
*eaction
;
1006 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
1008 eaction
= kzalloc(sizeof(*eaction
), GFP_KERNEL
);
1009 if (unlikely(!eaction
))
1012 eaction
->event
= event
;
1014 eaction
->action
.seq_passed
= vmw_event_fence_action_seq_passed
;
1015 eaction
->action
.cleanup
= vmw_event_fence_action_cleanup
;
1016 eaction
->action
.type
= VMW_ACTION_EVENT
;
1018 eaction
->fence
= vmw_fence_obj_reference(fence
);
1019 eaction
->dev
= fman
->dev_priv
->dev
;
1020 eaction
->tv_sec
= tv_sec
;
1021 eaction
->tv_usec
= tv_usec
;
1023 vmw_fence_obj_add_action(fence
, &eaction
->action
);
1028 struct vmw_event_fence_pending
{
1029 struct drm_pending_event base
;
1030 struct drm_vmw_event_fence event
;
1033 static int vmw_event_fence_action_create(struct drm_file
*file_priv
,
1034 struct vmw_fence_obj
*fence
,
1039 struct vmw_event_fence_pending
*event
;
1040 struct vmw_fence_manager
*fman
= fman_from_fence(fence
);
1041 struct drm_device
*dev
= fman
->dev_priv
->dev
;
1044 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1045 if (unlikely(!event
)) {
1046 DRM_ERROR("Failed to allocate an event.\n");
1051 event
->event
.base
.type
= DRM_VMW_EVENT_FENCE_SIGNALED
;
1052 event
->event
.base
.length
= sizeof(*event
);
1053 event
->event
.user_data
= user_data
;
1055 ret
= drm_event_reserve_init(dev
, file_priv
, &event
->base
, &event
->event
.base
);
1057 if (unlikely(ret
!= 0)) {
1058 DRM_ERROR("Failed to allocate event space for this file.\n");
1063 if (flags
& DRM_VMW_FE_FLAG_REQ_TIME
)
1064 ret
= vmw_event_fence_action_queue(file_priv
, fence
,
1066 &event
->event
.tv_sec
,
1067 &event
->event
.tv_usec
,
1070 ret
= vmw_event_fence_action_queue(file_priv
, fence
,
1081 drm_event_cancel_free(dev
, &event
->base
);
1086 int vmw_fence_event_ioctl(struct drm_device
*dev
, void *data
,
1087 struct drm_file
*file_priv
)
1089 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1090 struct drm_vmw_fence_event_arg
*arg
=
1091 (struct drm_vmw_fence_event_arg
*) data
;
1092 struct vmw_fence_obj
*fence
= NULL
;
1093 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1094 struct ttm_object_file
*tfile
= vmw_fp
->tfile
;
1095 struct drm_vmw_fence_rep __user
*user_fence_rep
=
1096 (struct drm_vmw_fence_rep __user
*)(unsigned long)
1102 * Look up an existing fence object,
1103 * and if user-space wants a new reference,
1107 struct ttm_base_object
*base
=
1108 vmw_fence_obj_lookup(tfile
, arg
->handle
);
1111 return PTR_ERR(base
);
1113 fence
= &(container_of(base
, struct vmw_user_fence
,
1115 (void) vmw_fence_obj_reference(fence
);
1117 if (user_fence_rep
!= NULL
) {
1118 ret
= ttm_ref_object_add(vmw_fp
->tfile
, base
,
1119 TTM_REF_USAGE
, NULL
, false);
1120 if (unlikely(ret
!= 0)) {
1121 DRM_ERROR("Failed to reference a fence "
1123 goto out_no_ref_obj
;
1125 handle
= base
->hash
.key
;
1127 ttm_base_object_unref(&base
);
1131 * Create a new fence object.
1134 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
1138 if (unlikely(ret
!= 0)) {
1139 DRM_ERROR("Fence event failed to create fence.\n");
1144 BUG_ON(fence
== NULL
);
1146 ret
= vmw_event_fence_action_create(file_priv
, fence
,
1150 if (unlikely(ret
!= 0)) {
1151 if (ret
!= -ERESTARTSYS
)
1152 DRM_ERROR("Failed to attach event to fence.\n");
1156 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fp
, 0, user_fence_rep
, fence
,
1158 vmw_fence_obj_unreference(&fence
);
1161 if (user_fence_rep
!= NULL
)
1162 ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
1164 vmw_fence_obj_unreference(&fence
);