Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / gpu / host1x / intr.c
blobb3285dd101804c59e05b4ea82746d35f3652519d
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Tegra host1x Interrupt Management
5 * Copyright (c) 2010-2021, NVIDIA Corporation.
6 */
8 #include <linux/clk.h>
9 #include <linux/interrupt.h>
10 #include "dev.h"
11 #include "fence.h"
12 #include "intr.h"
14 static void host1x_intr_add_fence_to_list(struct host1x_fence_list *list,
15 struct host1x_syncpt_fence *fence)
17 struct host1x_syncpt_fence *fence_in_list;
19 list_for_each_entry_reverse(fence_in_list, &list->list, list) {
20 if ((s32)(fence_in_list->threshold - fence->threshold) <= 0) {
21 /* Fence in list is before us, we can insert here */
22 list_add(&fence->list, &fence_in_list->list);
23 return;
27 /* Add as first in list */
28 list_add(&fence->list, &list->list);
31 static void host1x_intr_update_hw_state(struct host1x *host, struct host1x_syncpt *sp)
33 struct host1x_syncpt_fence *fence;
35 if (!list_empty(&sp->fences.list)) {
36 fence = list_first_entry(&sp->fences.list, struct host1x_syncpt_fence, list);
38 host1x_hw_intr_set_syncpt_threshold(host, sp->id, fence->threshold);
39 host1x_hw_intr_enable_syncpt_intr(host, sp->id);
40 } else {
41 host1x_hw_intr_disable_syncpt_intr(host, sp->id);
45 void host1x_intr_add_fence_locked(struct host1x *host, struct host1x_syncpt_fence *fence)
47 struct host1x_fence_list *fence_list = &fence->sp->fences;
49 INIT_LIST_HEAD(&fence->list);
51 host1x_intr_add_fence_to_list(fence_list, fence);
52 host1x_intr_update_hw_state(host, fence->sp);
55 bool host1x_intr_remove_fence(struct host1x *host, struct host1x_syncpt_fence *fence)
57 struct host1x_fence_list *fence_list = &fence->sp->fences;
58 unsigned long irqflags;
60 spin_lock_irqsave(&fence_list->lock, irqflags);
62 if (list_empty(&fence->list)) {
63 spin_unlock_irqrestore(&fence_list->lock, irqflags);
64 return false;
67 list_del_init(&fence->list);
68 host1x_intr_update_hw_state(host, fence->sp);
70 spin_unlock_irqrestore(&fence_list->lock, irqflags);
72 return true;
75 void host1x_intr_handle_interrupt(struct host1x *host, unsigned int id)
77 struct host1x_syncpt *sp = &host->syncpt[id];
78 struct host1x_syncpt_fence *fence, *tmp;
79 unsigned int value;
81 value = host1x_syncpt_load(sp);
83 spin_lock(&sp->fences.lock);
85 list_for_each_entry_safe(fence, tmp, &sp->fences.list, list) {
86 if (((value - fence->threshold) & 0x80000000U) != 0U) {
87 /* Fence is not yet expired, we are done */
88 break;
91 list_del_init(&fence->list);
92 host1x_fence_signal(fence);
95 /* Re-enable interrupt if necessary */
96 host1x_intr_update_hw_state(host, sp);
98 spin_unlock(&sp->fences.lock);
101 int host1x_intr_init(struct host1x *host)
103 struct host1x_intr_irq_data *irq_data;
104 unsigned int id;
105 int i, err;
107 mutex_init(&host->intr_mutex);
109 for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
110 struct host1x_syncpt *syncpt = &host->syncpt[id];
112 spin_lock_init(&syncpt->fences.lock);
113 INIT_LIST_HEAD(&syncpt->fences.list);
116 irq_data = devm_kcalloc(host->dev, host->num_syncpt_irqs, sizeof(irq_data[0]), GFP_KERNEL);
117 if (!irq_data)
118 return -ENOMEM;
120 host1x_hw_intr_disable_all_syncpt_intrs(host);
122 for (i = 0; i < host->num_syncpt_irqs; i++) {
123 irq_data[i].host = host;
124 irq_data[i].offset = i;
126 err = devm_request_irq(host->dev, host->syncpt_irqs[i],
127 host->intr_op->isr, IRQF_SHARED,
128 "host1x_syncpt", &irq_data[i]);
129 if (err < 0)
130 return err;
133 return 0;
136 void host1x_intr_deinit(struct host1x *host)
140 void host1x_intr_start(struct host1x *host)
142 u32 hz = clk_get_rate(host->clk);
143 int err;
145 mutex_lock(&host->intr_mutex);
146 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000));
147 if (err) {
148 mutex_unlock(&host->intr_mutex);
149 return;
151 mutex_unlock(&host->intr_mutex);
154 void host1x_intr_stop(struct host1x *host)
156 host1x_hw_intr_disable_all_syncpt_intrs(host);