1 // SPDX-License-Identifier: GPL-2.0-only
3 * Tegra host1x Interrupt Management
5 * Copyright (c) 2010-2013, NVIDIA Corporation.
9 #include <linux/interrupt.h>
10 #include <linux/slab.h>
11 #include <linux/irq.h>
13 #include <trace/events/host1x.h>
18 /* Wait list management */
27 static void waiter_release(struct kref
*kref
)
29 kfree(container_of(kref
, struct host1x_waitlist
, refcount
));
33 * add a waiter to a waiter queue, sorted by threshold
34 * returns true if it was added at the head of the queue
36 static bool add_waiter_to_queue(struct host1x_waitlist
*waiter
,
37 struct list_head
*queue
)
39 struct host1x_waitlist
*pos
;
40 u32 thresh
= waiter
->thresh
;
42 list_for_each_entry_reverse(pos
, queue
, list
)
43 if ((s32
)(pos
->thresh
- thresh
) <= 0) {
44 list_add(&waiter
->list
, &pos
->list
);
48 list_add(&waiter
->list
, queue
);
53 * run through a waiter queue for a single sync point ID
54 * and gather all completed waiters into lists by actions
56 static void remove_completed_waiters(struct list_head
*head
, u32 sync
,
57 struct list_head completed
[HOST1X_INTR_ACTION_COUNT
])
59 struct list_head
*dest
;
60 struct host1x_waitlist
*waiter
, *next
, *prev
;
62 list_for_each_entry_safe(waiter
, next
, head
, list
) {
63 if ((s32
)(waiter
->thresh
- sync
) > 0)
66 dest
= completed
+ waiter
->action
;
68 /* consolidate submit cleanups */
69 if (waiter
->action
== HOST1X_INTR_ACTION_SUBMIT_COMPLETE
&&
71 prev
= list_entry(dest
->prev
,
72 struct host1x_waitlist
, list
);
73 if (prev
->data
== waiter
->data
) {
79 /* PENDING->REMOVED or CANCELLED->HANDLED */
80 if (atomic_inc_return(&waiter
->state
) == WLS_HANDLED
|| !dest
) {
81 list_del(&waiter
->list
);
82 kref_put(&waiter
->refcount
, waiter_release
);
84 list_move_tail(&waiter
->list
, dest
);
88 static void reset_threshold_interrupt(struct host1x
*host
,
89 struct list_head
*head
,
93 list_first_entry(head
, struct host1x_waitlist
, list
)->thresh
;
95 host1x_hw_intr_set_syncpt_threshold(host
, id
, thresh
);
96 host1x_hw_intr_enable_syncpt_intr(host
, id
);
99 static void action_submit_complete(struct host1x_waitlist
*waiter
)
101 struct host1x_channel
*channel
= waiter
->data
;
103 host1x_cdma_update(&channel
->cdma
);
105 /* Add nr_completed to trace */
106 trace_host1x_channel_submit_complete(dev_name(channel
->dev
),
107 waiter
->count
, waiter
->thresh
);
111 static void action_wakeup(struct host1x_waitlist
*waiter
)
113 wait_queue_head_t
*wq
= waiter
->data
;
118 static void action_wakeup_interruptible(struct host1x_waitlist
*waiter
)
120 wait_queue_head_t
*wq
= waiter
->data
;
122 wake_up_interruptible(wq
);
125 typedef void (*action_handler
)(struct host1x_waitlist
*waiter
);
127 static const action_handler action_handlers
[HOST1X_INTR_ACTION_COUNT
] = {
128 action_submit_complete
,
130 action_wakeup_interruptible
,
133 static void run_handlers(struct list_head completed
[HOST1X_INTR_ACTION_COUNT
])
135 struct list_head
*head
= completed
;
138 for (i
= 0; i
< HOST1X_INTR_ACTION_COUNT
; ++i
, ++head
) {
139 action_handler handler
= action_handlers
[i
];
140 struct host1x_waitlist
*waiter
, *next
;
142 list_for_each_entry_safe(waiter
, next
, head
, list
) {
143 list_del(&waiter
->list
);
145 WARN_ON(atomic_xchg(&waiter
->state
, WLS_HANDLED
) !=
147 kref_put(&waiter
->refcount
, waiter_release
);
153 * Remove & handle all waiters that have completed for the given syncpt
155 static int process_wait_list(struct host1x
*host
,
156 struct host1x_syncpt
*syncpt
,
159 struct list_head completed
[HOST1X_INTR_ACTION_COUNT
];
163 for (i
= 0; i
< HOST1X_INTR_ACTION_COUNT
; ++i
)
164 INIT_LIST_HEAD(completed
+ i
);
166 spin_lock(&syncpt
->intr
.lock
);
168 remove_completed_waiters(&syncpt
->intr
.wait_head
, threshold
,
171 empty
= list_empty(&syncpt
->intr
.wait_head
);
173 host1x_hw_intr_disable_syncpt_intr(host
, syncpt
->id
);
175 reset_threshold_interrupt(host
, &syncpt
->intr
.wait_head
,
178 spin_unlock(&syncpt
->intr
.lock
);
180 run_handlers(completed
);
186 * Sync point threshold interrupt service thread function
187 * Handles sync point threshold triggers, in thread context
190 static void syncpt_thresh_work(struct work_struct
*work
)
192 struct host1x_syncpt_intr
*syncpt_intr
=
193 container_of(work
, struct host1x_syncpt_intr
, work
);
194 struct host1x_syncpt
*syncpt
=
195 container_of(syncpt_intr
, struct host1x_syncpt
, intr
);
196 unsigned int id
= syncpt
->id
;
197 struct host1x
*host
= syncpt
->host
;
199 (void)process_wait_list(host
, syncpt
,
200 host1x_syncpt_load(host
->syncpt
+ id
));
203 int host1x_intr_add_action(struct host1x
*host
, struct host1x_syncpt
*syncpt
,
204 u32 thresh
, enum host1x_intr_action action
,
205 void *data
, struct host1x_waitlist
*waiter
,
210 if (waiter
== NULL
) {
211 pr_warn("%s: NULL waiter\n", __func__
);
215 /* initialize a new waiter */
216 INIT_LIST_HEAD(&waiter
->list
);
217 kref_init(&waiter
->refcount
);
219 kref_get(&waiter
->refcount
);
220 waiter
->thresh
= thresh
;
221 waiter
->action
= action
;
222 atomic_set(&waiter
->state
, WLS_PENDING
);
226 spin_lock(&syncpt
->intr
.lock
);
228 queue_was_empty
= list_empty(&syncpt
->intr
.wait_head
);
230 if (add_waiter_to_queue(waiter
, &syncpt
->intr
.wait_head
)) {
231 /* added at head of list - new threshold value */
232 host1x_hw_intr_set_syncpt_threshold(host
, syncpt
->id
, thresh
);
234 /* added as first waiter - enable interrupt */
236 host1x_hw_intr_enable_syncpt_intr(host
, syncpt
->id
);
239 spin_unlock(&syncpt
->intr
.lock
);
246 void host1x_intr_put_ref(struct host1x
*host
, unsigned int id
, void *ref
)
248 struct host1x_waitlist
*waiter
= ref
;
249 struct host1x_syncpt
*syncpt
;
251 while (atomic_cmpxchg(&waiter
->state
, WLS_PENDING
, WLS_CANCELLED
) ==
255 syncpt
= host
->syncpt
+ id
;
256 (void)process_wait_list(host
, syncpt
,
257 host1x_syncpt_load(host
->syncpt
+ id
));
259 kref_put(&waiter
->refcount
, waiter_release
);
262 int host1x_intr_init(struct host1x
*host
, unsigned int irq_sync
)
265 u32 nb_pts
= host1x_syncpt_nb_pts(host
);
267 mutex_init(&host
->intr_mutex
);
268 host
->intr_syncpt_irq
= irq_sync
;
270 for (id
= 0; id
< nb_pts
; ++id
) {
271 struct host1x_syncpt
*syncpt
= host
->syncpt
+ id
;
273 spin_lock_init(&syncpt
->intr
.lock
);
274 INIT_LIST_HEAD(&syncpt
->intr
.wait_head
);
275 snprintf(syncpt
->intr
.thresh_irq_name
,
276 sizeof(syncpt
->intr
.thresh_irq_name
),
277 "host1x_sp_%02u", id
);
280 host1x_intr_start(host
);
285 void host1x_intr_deinit(struct host1x
*host
)
287 host1x_intr_stop(host
);
290 void host1x_intr_start(struct host1x
*host
)
292 u32 hz
= clk_get_rate(host
->clk
);
295 mutex_lock(&host
->intr_mutex
);
296 err
= host1x_hw_intr_init_host_sync(host
, DIV_ROUND_UP(hz
, 1000000),
299 mutex_unlock(&host
->intr_mutex
);
302 mutex_unlock(&host
->intr_mutex
);
305 void host1x_intr_stop(struct host1x
*host
)
308 struct host1x_syncpt
*syncpt
= host
->syncpt
;
309 u32 nb_pts
= host1x_syncpt_nb_pts(host
);
311 mutex_lock(&host
->intr_mutex
);
313 host1x_hw_intr_disable_all_syncpt_intrs(host
);
315 for (id
= 0; id
< nb_pts
; ++id
) {
316 struct host1x_waitlist
*waiter
, *next
;
318 list_for_each_entry_safe(waiter
, next
,
319 &syncpt
[id
].intr
.wait_head
, list
) {
320 if (atomic_cmpxchg(&waiter
->state
,
321 WLS_CANCELLED
, WLS_HANDLED
) == WLS_CANCELLED
) {
322 list_del(&waiter
->list
);
323 kref_put(&waiter
->refcount
, waiter_release
);
327 if (!list_empty(&syncpt
[id
].intr
.wait_head
)) {
328 /* output diagnostics */
329 mutex_unlock(&host
->intr_mutex
);
330 pr_warn("%s cannot stop syncpt intr id=%u\n",
336 host1x_hw_intr_free_syncpt_irq(host
);
338 mutex_unlock(&host
->intr_mutex
);