2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
27 nvkm_timer_wait_test(struct nvkm_timer_wait
*wait
)
29 struct nvkm_subdev
*subdev
= &wait
->tmr
->subdev
;
30 u64 time
= nvkm_timer_read(wait
->tmr
);
32 if (wait
->reads
== 0) {
37 if (wait
->time1
== time
) {
38 if (wait
->reads
++ == 16) {
39 nvkm_fatal(subdev
, "stalled at %016llx\n", time
);
47 if (wait
->time1
- wait
->time0
> wait
->limit
)
50 return wait
->time1
- wait
->time0
;
54 nvkm_timer_wait_init(struct nvkm_device
*device
, u64 nsec
,
55 struct nvkm_timer_wait
*wait
)
57 wait
->tmr
= device
->timer
;
63 nvkm_timer_read(struct nvkm_timer
*tmr
)
65 return tmr
->func
->read(tmr
);
69 nvkm_timer_alarm_trigger(struct nvkm_timer
*tmr
)
71 struct nvkm_alarm
*alarm
, *atemp
;
75 /* Process pending alarms. */
76 spin_lock_irqsave(&tmr
->lock
, flags
);
77 list_for_each_entry_safe(alarm
, atemp
, &tmr
->alarms
, head
) {
78 /* Have we hit the earliest alarm that hasn't gone off? */
79 if (alarm
->timestamp
> nvkm_timer_read(tmr
)) {
80 /* Schedule it. If we didn't race, we're done. */
81 tmr
->func
->alarm_init(tmr
, alarm
->timestamp
);
82 if (alarm
->timestamp
> nvkm_timer_read(tmr
))
86 /* Move to completed list. We'll drop the lock before
87 * executing the callback so it can reschedule itself.
89 list_del_init(&alarm
->head
);
90 list_add(&alarm
->exec
, &exec
);
93 /* Shut down interrupt if no more pending alarms. */
94 if (list_empty(&tmr
->alarms
))
95 tmr
->func
->alarm_fini(tmr
);
96 spin_unlock_irqrestore(&tmr
->lock
, flags
);
98 /* Execute completed callbacks. */
99 list_for_each_entry_safe(alarm
, atemp
, &exec
, exec
) {
100 list_del(&alarm
->exec
);
106 nvkm_timer_alarm(struct nvkm_timer
*tmr
, u32 nsec
, struct nvkm_alarm
*alarm
)
108 struct nvkm_alarm
*list
;
111 /* Remove alarm from pending list.
113 * This both protects against the corruption of the list,
114 * and implements alarm rescheduling/cancellation.
116 spin_lock_irqsave(&tmr
->lock
, flags
);
117 list_del_init(&alarm
->head
);
120 /* Insert into pending list, ordered earliest to latest. */
121 alarm
->timestamp
= nvkm_timer_read(tmr
) + nsec
;
122 list_for_each_entry(list
, &tmr
->alarms
, head
) {
123 if (list
->timestamp
> alarm
->timestamp
)
127 list_add_tail(&alarm
->head
, &list
->head
);
129 /* Update HW if this is now the earliest alarm. */
130 list
= list_first_entry(&tmr
->alarms
, typeof(*list
), head
);
132 tmr
->func
->alarm_init(tmr
, alarm
->timestamp
);
133 /* This shouldn't happen if callers aren't stupid.
135 * Worst case scenario is that it'll take roughly
136 * 4 seconds for the next alarm to trigger.
138 WARN_ON(alarm
->timestamp
<= nvkm_timer_read(tmr
));
141 spin_unlock_irqrestore(&tmr
->lock
, flags
);
145 nvkm_timer_intr(struct nvkm_subdev
*subdev
)
147 struct nvkm_timer
*tmr
= nvkm_timer(subdev
);
148 tmr
->func
->intr(tmr
);
152 nvkm_timer_fini(struct nvkm_subdev
*subdev
, bool suspend
)
154 struct nvkm_timer
*tmr
= nvkm_timer(subdev
);
155 tmr
->func
->alarm_fini(tmr
);
160 nvkm_timer_init(struct nvkm_subdev
*subdev
)
162 struct nvkm_timer
*tmr
= nvkm_timer(subdev
);
164 tmr
->func
->init(tmr
);
165 tmr
->func
->time(tmr
, ktime_to_ns(ktime_get()));
166 nvkm_timer_alarm_trigger(tmr
);
171 nvkm_timer_dtor(struct nvkm_subdev
*subdev
)
173 return nvkm_timer(subdev
);
176 static const struct nvkm_subdev_func
178 .dtor
= nvkm_timer_dtor
,
179 .init
= nvkm_timer_init
,
180 .fini
= nvkm_timer_fini
,
181 .intr
= nvkm_timer_intr
,
185 nvkm_timer_new_(const struct nvkm_timer_func
*func
, struct nvkm_device
*device
,
186 int index
, struct nvkm_timer
**ptmr
)
188 struct nvkm_timer
*tmr
;
190 if (!(tmr
= *ptmr
= kzalloc(sizeof(*tmr
), GFP_KERNEL
)))
193 nvkm_subdev_ctor(&nvkm_timer
, device
, index
, &tmr
->subdev
);
195 INIT_LIST_HEAD(&tmr
->alarms
);
196 spin_lock_init(&tmr
->lock
);