1 /**************************************************************************
3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
29 #include "vmwgfx_drv.h"
32 struct list_head head
;
37 void vmw_marker_queue_init(struct vmw_marker_queue
*queue
)
39 INIT_LIST_HEAD(&queue
->head
);
41 queue
->lag_time
= ktime_get_raw_ns();
42 spin_lock_init(&queue
->lock
);
45 void vmw_marker_queue_takedown(struct vmw_marker_queue
*queue
)
47 struct vmw_marker
*marker
, *next
;
49 spin_lock(&queue
->lock
);
50 list_for_each_entry_safe(marker
, next
, &queue
->head
, head
) {
53 spin_unlock(&queue
->lock
);
56 int vmw_marker_push(struct vmw_marker_queue
*queue
,
59 struct vmw_marker
*marker
= kmalloc(sizeof(*marker
), GFP_KERNEL
);
61 if (unlikely(!marker
))
64 marker
->seqno
= seqno
;
65 marker
->submitted
= ktime_get_raw_ns();
66 spin_lock(&queue
->lock
);
67 list_add_tail(&marker
->head
, &queue
->head
);
68 spin_unlock(&queue
->lock
);
73 int vmw_marker_pull(struct vmw_marker_queue
*queue
,
74 uint32_t signaled_seqno
)
76 struct vmw_marker
*marker
, *next
;
80 spin_lock(&queue
->lock
);
81 now
= ktime_get_raw_ns();
83 if (list_empty(&queue
->head
)) {
85 queue
->lag_time
= now
;
90 list_for_each_entry_safe(marker
, next
, &queue
->head
, head
) {
91 if (signaled_seqno
- marker
->seqno
> (1 << 30))
94 queue
->lag
= now
- marker
->submitted
;
95 queue
->lag_time
= now
;
97 list_del(&marker
->head
);
102 spin_unlock(&queue
->lock
);
104 return (updated
) ? 0 : -EBUSY
;
107 static u64
vmw_fifo_lag(struct vmw_marker_queue
*queue
)
111 spin_lock(&queue
->lock
);
112 now
= ktime_get_raw_ns();
113 queue
->lag
+= now
- queue
->lag_time
;
114 queue
->lag_time
= now
;
115 spin_unlock(&queue
->lock
);
120 static bool vmw_lag_lt(struct vmw_marker_queue
*queue
,
123 u64 cond
= (u64
) us
* NSEC_PER_USEC
;
125 return vmw_fifo_lag(queue
) <= cond
;
128 int vmw_wait_lag(struct vmw_private
*dev_priv
,
129 struct vmw_marker_queue
*queue
, uint32_t us
)
131 struct vmw_marker
*marker
;
135 while (!vmw_lag_lt(queue
, us
)) {
136 spin_lock(&queue
->lock
);
137 if (list_empty(&queue
->head
))
138 seqno
= atomic_read(&dev_priv
->marker_seq
);
140 marker
= list_first_entry(&queue
->head
,
141 struct vmw_marker
, head
);
142 seqno
= marker
->seqno
;
144 spin_unlock(&queue
->lock
);
146 ret
= vmw_wait_seqno(dev_priv
, false, seqno
, true,
149 if (unlikely(ret
!= 0))
152 (void) vmw_marker_pull(queue
, seqno
);