1 /**************************************************************************
3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
32 struct list_head head
;
34 struct timespec submitted
;
37 void vmw_marker_queue_init(struct vmw_marker_queue
*queue
)
39 INIT_LIST_HEAD(&queue
->head
);
40 queue
->lag
= ns_to_timespec(0);
41 getrawmonotonic(&queue
->lag_time
);
42 spin_lock_init(&queue
->lock
);
45 void vmw_marker_queue_takedown(struct vmw_marker_queue
*queue
)
47 struct vmw_marker
*marker
, *next
;
49 spin_lock(&queue
->lock
);
50 list_for_each_entry_safe(marker
, next
, &queue
->head
, head
) {
53 spin_unlock(&queue
->lock
);
56 int vmw_marker_push(struct vmw_marker_queue
*queue
,
59 struct vmw_marker
*marker
= kmalloc(sizeof(*marker
), GFP_KERNEL
);
61 if (unlikely(!marker
))
64 marker
->seqno
= seqno
;
65 getrawmonotonic(&marker
->submitted
);
66 spin_lock(&queue
->lock
);
67 list_add_tail(&marker
->head
, &queue
->head
);
68 spin_unlock(&queue
->lock
);
73 int vmw_marker_pull(struct vmw_marker_queue
*queue
,
74 uint32_t signaled_seqno
)
76 struct vmw_marker
*marker
, *next
;
80 spin_lock(&queue
->lock
);
81 getrawmonotonic(&now
);
83 if (list_empty(&queue
->head
)) {
84 queue
->lag
= ns_to_timespec(0);
85 queue
->lag_time
= now
;
90 list_for_each_entry_safe(marker
, next
, &queue
->head
, head
) {
91 if (signaled_seqno
- marker
->seqno
> (1 << 30))
94 queue
->lag
= timespec_sub(now
, marker
->submitted
);
95 queue
->lag_time
= now
;
97 list_del(&marker
->head
);
102 spin_unlock(&queue
->lock
);
104 return (updated
) ? 0 : -EBUSY
;
107 static struct timespec
vmw_timespec_add(struct timespec t1
,
110 t1
.tv_sec
+= t2
.tv_sec
;
111 t1
.tv_nsec
+= t2
.tv_nsec
;
112 if (t1
.tv_nsec
>= 1000000000L) {
114 t1
.tv_nsec
-= 1000000000L;
120 static struct timespec
vmw_fifo_lag(struct vmw_marker_queue
*queue
)
124 spin_lock(&queue
->lock
);
125 getrawmonotonic(&now
);
126 queue
->lag
= vmw_timespec_add(queue
->lag
,
127 timespec_sub(now
, queue
->lag_time
));
128 queue
->lag_time
= now
;
129 spin_unlock(&queue
->lock
);
134 static bool vmw_lag_lt(struct vmw_marker_queue
*queue
,
137 struct timespec lag
, cond
;
139 cond
= ns_to_timespec((s64
) us
* 1000);
140 lag
= vmw_fifo_lag(queue
);
141 return (timespec_compare(&lag
, &cond
) < 1);
144 int vmw_wait_lag(struct vmw_private
*dev_priv
,
145 struct vmw_marker_queue
*queue
, uint32_t us
)
147 struct vmw_marker
*marker
;
151 while (!vmw_lag_lt(queue
, us
)) {
152 spin_lock(&queue
->lock
);
153 if (list_empty(&queue
->head
))
154 seqno
= atomic_read(&dev_priv
->marker_seq
);
156 marker
= list_first_entry(&queue
->head
,
157 struct vmw_marker
, head
);
158 seqno
= marker
->seqno
;
160 spin_unlock(&queue
->lock
);
162 ret
= vmw_wait_seqno(dev_priv
, false, seqno
, true,
165 if (unlikely(ret
!= 0))
168 (void) vmw_marker_pull(queue
, seqno
);