1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
5 * DOC: Interrupt management for the V3D engine
7 * When we take a bin, render, TFU done, or CSD done interrupt, we
8 * need to signal the fence for that job so that the scheduler can
9 * queue up the next one and unblock any waiters.
11 * When we take the binner out of memory interrupt, we need to
12 * allocate some new memory and pass it to the binner so that the
13 * current job can make progress.
16 #include <linux/platform_device.h>
20 #include "v3d_trace.h"
22 #define V3D_CORE_IRQS ((u32)(V3D_INT_OUTOMEM | \
28 #define V3D_HUB_IRQS ((u32)(V3D_HUB_INT_MMU_WRV | \
29 V3D_HUB_INT_MMU_PTI | \
30 V3D_HUB_INT_MMU_CAP | \
34 v3d_hub_irq(int irq
, void *arg
);
37 v3d_overflow_mem_work(struct work_struct
*work
)
40 container_of(work
, struct v3d_dev
, overflow_mem_work
);
41 struct drm_device
*dev
= &v3d
->drm
;
42 struct v3d_bo
*bo
= v3d_bo_create(dev
, NULL
/* XXX: GMP */, 256 * 1024);
43 struct drm_gem_object
*obj
;
44 unsigned long irqflags
;
47 DRM_ERROR("Couldn't allocate binner overflow mem\n");
52 /* We lost a race, and our work task came in after the bin job
53 * completed and exited. This can happen because the HW
54 * signals OOM before it's fully OOM, so the binner might just
57 * If we lose the race and our work task comes in after a new
58 * bin job got scheduled, that's fine. We'll just give them
59 * some binner pool anyway.
61 spin_lock_irqsave(&v3d
->job_lock
, irqflags
);
63 spin_unlock_irqrestore(&v3d
->job_lock
, irqflags
);
67 drm_gem_object_get(obj
);
68 list_add_tail(&bo
->unref_head
, &v3d
->bin_job
->render
->unref_list
);
69 spin_unlock_irqrestore(&v3d
->job_lock
, irqflags
);
71 V3D_CORE_WRITE(0, V3D_PTB_BPOA
, bo
->node
.start
<< PAGE_SHIFT
);
72 V3D_CORE_WRITE(0, V3D_PTB_BPOS
, obj
->size
);
75 drm_gem_object_put(obj
);
79 v3d_irq(int irq
, void *arg
)
81 struct v3d_dev
*v3d
= arg
;
83 irqreturn_t status
= IRQ_NONE
;
85 intsts
= V3D_CORE_READ(0, V3D_CTL_INT_STS
);
87 /* Acknowledge the interrupts we're handling here. */
88 V3D_CORE_WRITE(0, V3D_CTL_INT_CLR
, intsts
);
90 if (intsts
& V3D_INT_OUTOMEM
) {
91 /* Note that the OOM status is edge signaled, so the
92 * interrupt won't happen again until the we actually
93 * add more memory. Also, as of V3D 4.1, FLDONE won't
94 * be reported until any OOM state has been cleared.
96 schedule_work(&v3d
->overflow_mem_work
);
100 if (intsts
& V3D_INT_FLDONE
) {
101 struct v3d_fence
*fence
=
102 to_v3d_fence(v3d
->bin_job
->base
.irq_fence
);
104 trace_v3d_bcl_irq(&v3d
->drm
, fence
->seqno
);
105 dma_fence_signal(&fence
->base
);
106 status
= IRQ_HANDLED
;
109 if (intsts
& V3D_INT_FRDONE
) {
110 struct v3d_fence
*fence
=
111 to_v3d_fence(v3d
->render_job
->base
.irq_fence
);
113 trace_v3d_rcl_irq(&v3d
->drm
, fence
->seqno
);
114 dma_fence_signal(&fence
->base
);
115 status
= IRQ_HANDLED
;
118 if (intsts
& V3D_INT_CSDDONE
) {
119 struct v3d_fence
*fence
=
120 to_v3d_fence(v3d
->csd_job
->base
.irq_fence
);
122 trace_v3d_csd_irq(&v3d
->drm
, fence
->seqno
);
123 dma_fence_signal(&fence
->base
);
124 status
= IRQ_HANDLED
;
127 /* We shouldn't be triggering these if we have GMP in
128 * always-allowed mode.
130 if (intsts
& V3D_INT_GMPV
)
131 dev_err(v3d
->drm
.dev
, "GMP violation\n");
133 /* V3D 4.2 wires the hub and core IRQs together, so if we &
134 * didn't see the common one then check hub for MMU IRQs.
136 if (v3d
->single_irq_line
&& status
== IRQ_NONE
)
137 return v3d_hub_irq(irq
, arg
);
143 v3d_hub_irq(int irq
, void *arg
)
145 struct v3d_dev
*v3d
= arg
;
147 irqreturn_t status
= IRQ_NONE
;
149 intsts
= V3D_READ(V3D_HUB_INT_STS
);
151 /* Acknowledge the interrupts we're handling here. */
152 V3D_WRITE(V3D_HUB_INT_CLR
, intsts
);
154 if (intsts
& V3D_HUB_INT_TFUC
) {
155 struct v3d_fence
*fence
=
156 to_v3d_fence(v3d
->tfu_job
->base
.irq_fence
);
158 trace_v3d_tfu_irq(&v3d
->drm
, fence
->seqno
);
159 dma_fence_signal(&fence
->base
);
160 status
= IRQ_HANDLED
;
163 if (intsts
& (V3D_HUB_INT_MMU_WRV
|
164 V3D_HUB_INT_MMU_PTI
|
165 V3D_HUB_INT_MMU_CAP
)) {
166 u32 axi_id
= V3D_READ(V3D_MMU_VIO_ID
);
167 u64 vio_addr
= ((u64
)V3D_READ(V3D_MMU_VIO_ADDR
) <<
168 (v3d
->va_width
- 32));
169 static const char *const v3d41_axi_ids
[] = {
179 const char *client
= "?";
181 V3D_WRITE(V3D_MMU_CTL
,
182 V3D_READ(V3D_MMU_CTL
) & (V3D_MMU_CTL_CAP_EXCEEDED
|
183 V3D_MMU_CTL_PT_INVALID
|
184 V3D_MMU_CTL_WRITE_VIOLATION
));
186 if (v3d
->ver
>= 41) {
187 axi_id
= axi_id
>> 5;
188 if (axi_id
< ARRAY_SIZE(v3d41_axi_ids
))
189 client
= v3d41_axi_ids
[axi_id
];
192 dev_err(v3d
->drm
.dev
, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
193 client
, axi_id
, (long long)vio_addr
,
194 ((intsts
& V3D_HUB_INT_MMU_WRV
) ?
195 ", write violation" : ""),
196 ((intsts
& V3D_HUB_INT_MMU_PTI
) ?
197 ", pte invalid" : ""),
198 ((intsts
& V3D_HUB_INT_MMU_CAP
) ?
199 ", cap exceeded" : ""));
200 status
= IRQ_HANDLED
;
207 v3d_irq_init(struct v3d_dev
*v3d
)
211 INIT_WORK(&v3d
->overflow_mem_work
, v3d_overflow_mem_work
);
213 /* Clear any pending interrupts someone might have left around
216 for (core
= 0; core
< v3d
->cores
; core
++)
217 V3D_CORE_WRITE(core
, V3D_CTL_INT_CLR
, V3D_CORE_IRQS
);
218 V3D_WRITE(V3D_HUB_INT_CLR
, V3D_HUB_IRQS
);
220 irq1
= platform_get_irq(v3d_to_pdev(v3d
), 1);
221 if (irq1
== -EPROBE_DEFER
)
224 ret
= devm_request_irq(v3d
->drm
.dev
, irq1
,
225 v3d_irq
, IRQF_SHARED
,
229 ret
= devm_request_irq(v3d
->drm
.dev
,
230 platform_get_irq(v3d_to_pdev(v3d
), 0),
231 v3d_hub_irq
, IRQF_SHARED
,
236 v3d
->single_irq_line
= true;
238 ret
= devm_request_irq(v3d
->drm
.dev
,
239 platform_get_irq(v3d_to_pdev(v3d
), 0),
240 v3d_irq
, IRQF_SHARED
,
250 if (ret
!= -EPROBE_DEFER
)
251 dev_err(v3d
->drm
.dev
, "IRQ setup failed: %d\n", ret
);
256 v3d_irq_enable(struct v3d_dev
*v3d
)
260 /* Enable our set of interrupts, masking out any others. */
261 for (core
= 0; core
< v3d
->cores
; core
++) {
262 V3D_CORE_WRITE(core
, V3D_CTL_INT_MSK_SET
, ~V3D_CORE_IRQS
);
263 V3D_CORE_WRITE(core
, V3D_CTL_INT_MSK_CLR
, V3D_CORE_IRQS
);
266 V3D_WRITE(V3D_HUB_INT_MSK_SET
, ~V3D_HUB_IRQS
);
267 V3D_WRITE(V3D_HUB_INT_MSK_CLR
, V3D_HUB_IRQS
);
271 v3d_irq_disable(struct v3d_dev
*v3d
)
275 /* Disable all interrupts. */
276 for (core
= 0; core
< v3d
->cores
; core
++)
277 V3D_CORE_WRITE(core
, V3D_CTL_INT_MSK_SET
, ~0);
278 V3D_WRITE(V3D_HUB_INT_MSK_SET
, ~0);
280 /* Clear any pending interrupts we might have left. */
281 for (core
= 0; core
< v3d
->cores
; core
++)
282 V3D_CORE_WRITE(core
, V3D_CTL_INT_CLR
, V3D_CORE_IRQS
);
283 V3D_WRITE(V3D_HUB_INT_CLR
, V3D_HUB_IRQS
);
285 cancel_work_sync(&v3d
->overflow_mem_work
);
288 /** Reinitializes interrupt registers when a GPU reset is performed. */
289 void v3d_irq_reset(struct v3d_dev
*v3d
)