1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 /* Copyright 2019 Collabora ltd. */
4 #include <linux/delay.h>
5 #include <linux/interrupt.h>
7 #include <linux/platform_device.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/dma-resv.h>
10 #include <drm/gpu_scheduler.h>
11 #include <drm/panfrost_drm.h>
13 #include "panfrost_device.h"
14 #include "panfrost_devfreq.h"
15 #include "panfrost_job.h"
16 #include "panfrost_features.h"
17 #include "panfrost_issues.h"
18 #include "panfrost_gem.h"
19 #include "panfrost_regs.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_mmu.h"
23 #define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
24 #define job_read(dev, reg) readl(dev->iomem + (reg))
26 struct panfrost_queue_state
{
27 struct drm_gpu_scheduler sched
;
33 struct panfrost_job_slot
{
34 struct panfrost_queue_state queue
[NUM_JOB_SLOTS
];
38 static struct panfrost_job
*
39 to_panfrost_job(struct drm_sched_job
*sched_job
)
41 return container_of(sched_job
, struct panfrost_job
, base
);
44 struct panfrost_fence
{
45 struct dma_fence base
;
46 struct drm_device
*dev
;
47 /* panfrost seqno for signaled() test */
52 static inline struct panfrost_fence
*
53 to_panfrost_fence(struct dma_fence
*fence
)
55 return (struct panfrost_fence
*)fence
;
58 static const char *panfrost_fence_get_driver_name(struct dma_fence
*fence
)
63 static const char *panfrost_fence_get_timeline_name(struct dma_fence
*fence
)
65 struct panfrost_fence
*f
= to_panfrost_fence(fence
);
69 return "panfrost-js-0";
71 return "panfrost-js-1";
73 return "panfrost-js-2";
79 static const struct dma_fence_ops panfrost_fence_ops
= {
80 .get_driver_name
= panfrost_fence_get_driver_name
,
81 .get_timeline_name
= panfrost_fence_get_timeline_name
,
84 static struct dma_fence
*panfrost_fence_create(struct panfrost_device
*pfdev
, int js_num
)
86 struct panfrost_fence
*fence
;
87 struct panfrost_job_slot
*js
= pfdev
->js
;
89 fence
= kzalloc(sizeof(*fence
), GFP_KERNEL
);
91 return ERR_PTR(-ENOMEM
);
93 fence
->dev
= pfdev
->ddev
;
94 fence
->queue
= js_num
;
95 fence
->seqno
= ++js
->queue
[js_num
].emit_seqno
;
96 dma_fence_init(&fence
->base
, &panfrost_fence_ops
, &js
->job_lock
,
97 js
->queue
[js_num
].fence_context
, fence
->seqno
);
102 static int panfrost_job_get_slot(struct panfrost_job
*job
)
104 /* JS0: fragment jobs.
105 * JS1: vertex/tiler jobs
108 if (job
->requirements
& PANFROST_JD_REQ_FS
)
111 /* Not exposed to userspace yet */
113 if (job
->requirements
& PANFROST_JD_REQ_ONLY_COMPUTE
) {
114 if ((job
->requirements
& PANFROST_JD_REQ_CORE_GRP_MASK
) &&
115 (job
->pfdev
->features
.nr_core_groups
== 2))
117 if (panfrost_has_hw_issue(job
->pfdev
, HW_ISSUE_8987
))
124 static void panfrost_job_write_affinity(struct panfrost_device
*pfdev
,
131 * Use all cores for now.
132 * Eventually we may need to support tiler only jobs and h/w with
133 * multiple (2) coherent core groups
135 affinity
= pfdev
->features
.shader_present
;
137 job_write(pfdev
, JS_AFFINITY_NEXT_LO(js
), affinity
& 0xFFFFFFFF);
138 job_write(pfdev
, JS_AFFINITY_NEXT_HI(js
), affinity
>> 32);
141 static void panfrost_job_hw_submit(struct panfrost_job
*job
, int js
)
143 struct panfrost_device
*pfdev
= job
->pfdev
;
145 u64 jc_head
= job
->jc
;
148 ret
= pm_runtime_get_sync(pfdev
->dev
);
152 if (WARN_ON(job_read(pfdev
, JS_COMMAND_NEXT(js
)))) {
153 pm_runtime_put_sync_autosuspend(pfdev
->dev
);
157 cfg
= panfrost_mmu_as_get(pfdev
, &job
->file_priv
->mmu
);
158 panfrost_devfreq_record_busy(pfdev
);
160 job_write(pfdev
, JS_HEAD_NEXT_LO(js
), jc_head
& 0xFFFFFFFF);
161 job_write(pfdev
, JS_HEAD_NEXT_HI(js
), jc_head
>> 32);
163 panfrost_job_write_affinity(pfdev
, job
->requirements
, js
);
165 /* start MMU, medium priority, cache clean/flush on end, clean/flush on
167 cfg
|= JS_CONFIG_THREAD_PRI(8) |
168 JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE
|
169 JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE
;
171 if (panfrost_has_hw_feature(pfdev
, HW_FEATURE_FLUSH_REDUCTION
))
172 cfg
|= JS_CONFIG_ENABLE_FLUSH_REDUCTION
;
174 if (panfrost_has_hw_issue(pfdev
, HW_ISSUE_10649
))
175 cfg
|= JS_CONFIG_START_MMU
;
177 job_write(pfdev
, JS_CONFIG_NEXT(js
), cfg
);
179 if (panfrost_has_hw_feature(pfdev
, HW_FEATURE_FLUSH_REDUCTION
))
180 job_write(pfdev
, JS_FLUSH_ID_NEXT(js
), job
->flush_id
);
183 dev_dbg(pfdev
->dev
, "JS: Submitting atom %p to js[%d] with head=0x%llx",
186 job_write(pfdev
, JS_COMMAND_NEXT(js
), JS_COMMAND_START
);
189 static void panfrost_acquire_object_fences(struct drm_gem_object
**bos
,
191 struct dma_fence
**implicit_fences
)
195 for (i
= 0; i
< bo_count
; i
++)
196 implicit_fences
[i
] = dma_resv_get_excl_rcu(bos
[i
]->resv
);
199 static void panfrost_attach_object_fences(struct drm_gem_object
**bos
,
201 struct dma_fence
*fence
)
205 for (i
= 0; i
< bo_count
; i
++)
206 dma_resv_add_excl_fence(bos
[i
]->resv
, fence
);
209 int panfrost_job_push(struct panfrost_job
*job
)
211 struct panfrost_device
*pfdev
= job
->pfdev
;
212 int slot
= panfrost_job_get_slot(job
);
213 struct drm_sched_entity
*entity
= &job
->file_priv
->sched_entity
[slot
];
214 struct ww_acquire_ctx acquire_ctx
;
217 mutex_lock(&pfdev
->sched_lock
);
219 ret
= drm_gem_lock_reservations(job
->bos
, job
->bo_count
,
222 mutex_unlock(&pfdev
->sched_lock
);
226 ret
= drm_sched_job_init(&job
->base
, entity
, NULL
);
228 mutex_unlock(&pfdev
->sched_lock
);
232 job
->render_done_fence
= dma_fence_get(&job
->base
.s_fence
->finished
);
234 kref_get(&job
->refcount
); /* put by scheduler job completion */
236 panfrost_acquire_object_fences(job
->bos
, job
->bo_count
,
237 job
->implicit_fences
);
239 drm_sched_entity_push_job(&job
->base
, entity
);
241 mutex_unlock(&pfdev
->sched_lock
);
243 panfrost_attach_object_fences(job
->bos
, job
->bo_count
,
244 job
->render_done_fence
);
247 drm_gem_unlock_reservations(job
->bos
, job
->bo_count
, &acquire_ctx
);
252 static void panfrost_job_cleanup(struct kref
*ref
)
254 struct panfrost_job
*job
= container_of(ref
, struct panfrost_job
,
258 if (job
->in_fences
) {
259 for (i
= 0; i
< job
->in_fence_count
; i
++)
260 dma_fence_put(job
->in_fences
[i
]);
261 kvfree(job
->in_fences
);
263 if (job
->implicit_fences
) {
264 for (i
= 0; i
< job
->bo_count
; i
++)
265 dma_fence_put(job
->implicit_fences
[i
]);
266 kvfree(job
->implicit_fences
);
268 dma_fence_put(job
->done_fence
);
269 dma_fence_put(job
->render_done_fence
);
272 for (i
= 0; i
< job
->bo_count
; i
++) {
273 if (!job
->mappings
[i
])
276 atomic_dec(&job
->mappings
[i
]->obj
->gpu_usecount
);
277 panfrost_gem_mapping_put(job
->mappings
[i
]);
279 kvfree(job
->mappings
);
283 for (i
= 0; i
< job
->bo_count
; i
++)
284 drm_gem_object_put_unlocked(job
->bos
[i
]);
292 void panfrost_job_put(struct panfrost_job
*job
)
294 kref_put(&job
->refcount
, panfrost_job_cleanup
);
297 static void panfrost_job_free(struct drm_sched_job
*sched_job
)
299 struct panfrost_job
*job
= to_panfrost_job(sched_job
);
301 drm_sched_job_cleanup(sched_job
);
303 panfrost_job_put(job
);
306 static struct dma_fence
*panfrost_job_dependency(struct drm_sched_job
*sched_job
,
307 struct drm_sched_entity
*s_entity
)
309 struct panfrost_job
*job
= to_panfrost_job(sched_job
);
310 struct dma_fence
*fence
;
313 /* Explicit fences */
314 for (i
= 0; i
< job
->in_fence_count
; i
++) {
315 if (job
->in_fences
[i
]) {
316 fence
= job
->in_fences
[i
];
317 job
->in_fences
[i
] = NULL
;
322 /* Implicit fences, max. one per BO */
323 for (i
= 0; i
< job
->bo_count
; i
++) {
324 if (job
->implicit_fences
[i
]) {
325 fence
= job
->implicit_fences
[i
];
326 job
->implicit_fences
[i
] = NULL
;
334 static struct dma_fence
*panfrost_job_run(struct drm_sched_job
*sched_job
)
336 struct panfrost_job
*job
= to_panfrost_job(sched_job
);
337 struct panfrost_device
*pfdev
= job
->pfdev
;
338 int slot
= panfrost_job_get_slot(job
);
339 struct dma_fence
*fence
= NULL
;
341 if (unlikely(job
->base
.s_fence
->finished
.error
))
344 pfdev
->jobs
[slot
] = job
;
346 fence
= panfrost_fence_create(pfdev
, slot
);
351 dma_fence_put(job
->done_fence
);
352 job
->done_fence
= dma_fence_get(fence
);
354 panfrost_job_hw_submit(job
, slot
);
359 void panfrost_job_enable_interrupts(struct panfrost_device
*pfdev
)
364 for (j
= 0; j
< NUM_JOB_SLOTS
; j
++) {
365 irq_mask
|= MK_JS_MASK(j
);
368 job_write(pfdev
, JOB_INT_CLEAR
, irq_mask
);
369 job_write(pfdev
, JOB_INT_MASK
, irq_mask
);
372 static void panfrost_job_timedout(struct drm_sched_job
*sched_job
)
374 struct panfrost_job
*job
= to_panfrost_job(sched_job
);
375 struct panfrost_device
*pfdev
= job
->pfdev
;
376 int js
= panfrost_job_get_slot(job
);
381 * If the GPU managed to complete this jobs fence, the timeout is
382 * spurious. Bail out.
384 if (dma_fence_is_signaled(job
->done_fence
))
387 dev_err(pfdev
->dev
, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
389 job_read(pfdev
, JS_CONFIG(js
)),
390 job_read(pfdev
, JS_STATUS(js
)),
391 job_read(pfdev
, JS_HEAD_LO(js
)),
392 job_read(pfdev
, JS_TAIL_LO(js
)),
395 if (!mutex_trylock(&pfdev
->reset_lock
))
398 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++) {
399 struct drm_gpu_scheduler
*sched
= &pfdev
->js
->queue
[i
].sched
;
401 drm_sched_stop(sched
, sched_job
);
403 /* Ensure any timeouts on other slots have finished */
404 cancel_delayed_work_sync(&sched
->work_tdr
);
407 drm_sched_increase_karma(sched_job
);
409 spin_lock_irqsave(&pfdev
->js
->job_lock
, flags
);
410 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++) {
411 if (pfdev
->jobs
[i
]) {
412 pm_runtime_put_noidle(pfdev
->dev
);
413 pfdev
->jobs
[i
] = NULL
;
416 spin_unlock_irqrestore(&pfdev
->js
->job_lock
, flags
);
418 panfrost_devfreq_record_idle(pfdev
);
419 panfrost_device_reset(pfdev
);
421 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++)
422 drm_sched_resubmit_jobs(&pfdev
->js
->queue
[i
].sched
);
424 /* restart scheduler after GPU is usable again */
425 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++)
426 drm_sched_start(&pfdev
->js
->queue
[i
].sched
, true);
428 mutex_unlock(&pfdev
->reset_lock
);
431 static const struct drm_sched_backend_ops panfrost_sched_ops
= {
432 .dependency
= panfrost_job_dependency
,
433 .run_job
= panfrost_job_run
,
434 .timedout_job
= panfrost_job_timedout
,
435 .free_job
= panfrost_job_free
438 static irqreturn_t
panfrost_job_irq_handler(int irq
, void *data
)
440 struct panfrost_device
*pfdev
= data
;
441 u32 status
= job_read(pfdev
, JOB_INT_STAT
);
444 dev_dbg(pfdev
->dev
, "jobslot irq status=%x\n", status
);
449 pm_runtime_mark_last_busy(pfdev
->dev
);
451 for (j
= 0; status
; j
++) {
452 u32 mask
= MK_JS_MASK(j
);
454 if (!(status
& mask
))
457 job_write(pfdev
, JOB_INT_CLEAR
, mask
);
459 if (status
& JOB_INT_MASK_ERR(j
)) {
460 job_write(pfdev
, JS_COMMAND_NEXT(j
), JS_COMMAND_NOP
);
462 dev_err(pfdev
->dev
, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
464 panfrost_exception_name(pfdev
, job_read(pfdev
, JS_STATUS(j
))),
465 job_read(pfdev
, JS_HEAD_LO(j
)),
466 job_read(pfdev
, JS_TAIL_LO(j
)));
468 drm_sched_fault(&pfdev
->js
->queue
[j
].sched
);
471 if (status
& JOB_INT_MASK_DONE(j
)) {
472 struct panfrost_job
*job
;
474 spin_lock(&pfdev
->js
->job_lock
);
475 job
= pfdev
->jobs
[j
];
476 /* Only NULL if job timeout occurred */
478 pfdev
->jobs
[j
] = NULL
;
480 panfrost_mmu_as_put(pfdev
, &job
->file_priv
->mmu
);
481 panfrost_devfreq_record_idle(pfdev
);
483 dma_fence_signal_locked(job
->done_fence
);
484 pm_runtime_put_autosuspend(pfdev
->dev
);
486 spin_unlock(&pfdev
->js
->job_lock
);
495 int panfrost_job_init(struct panfrost_device
*pfdev
)
497 struct panfrost_job_slot
*js
;
500 pfdev
->js
= js
= devm_kzalloc(pfdev
->dev
, sizeof(*js
), GFP_KERNEL
);
504 spin_lock_init(&js
->job_lock
);
506 irq
= platform_get_irq_byname(to_platform_device(pfdev
->dev
), "job");
510 ret
= devm_request_irq(pfdev
->dev
, irq
, panfrost_job_irq_handler
,
511 IRQF_SHARED
, "job", pfdev
);
513 dev_err(pfdev
->dev
, "failed to request job irq");
517 for (j
= 0; j
< NUM_JOB_SLOTS
; j
++) {
518 js
->queue
[j
].fence_context
= dma_fence_context_alloc(1);
520 ret
= drm_sched_init(&js
->queue
[j
].sched
,
522 1, 0, msecs_to_jiffies(500),
525 dev_err(pfdev
->dev
, "Failed to create scheduler: %d.", ret
);
530 panfrost_job_enable_interrupts(pfdev
);
535 for (j
--; j
>= 0; j
--)
536 drm_sched_fini(&js
->queue
[j
].sched
);
541 void panfrost_job_fini(struct panfrost_device
*pfdev
)
543 struct panfrost_job_slot
*js
= pfdev
->js
;
546 job_write(pfdev
, JOB_INT_MASK
, 0);
548 for (j
= 0; j
< NUM_JOB_SLOTS
; j
++)
549 drm_sched_fini(&js
->queue
[j
].sched
);
553 int panfrost_job_open(struct panfrost_file_priv
*panfrost_priv
)
555 struct panfrost_device
*pfdev
= panfrost_priv
->pfdev
;
556 struct panfrost_job_slot
*js
= pfdev
->js
;
557 struct drm_gpu_scheduler
*sched
;
560 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++) {
561 sched
= &js
->queue
[i
].sched
;
562 ret
= drm_sched_entity_init(&panfrost_priv
->sched_entity
[i
],
563 DRM_SCHED_PRIORITY_NORMAL
, &sched
,
571 void panfrost_job_close(struct panfrost_file_priv
*panfrost_priv
)
575 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++)
576 drm_sched_entity_destroy(&panfrost_priv
->sched_entity
[i
]);
579 int panfrost_job_is_idle(struct panfrost_device
*pfdev
)
581 struct panfrost_job_slot
*js
= pfdev
->js
;
584 /* Check whether the hardware is idle */
585 if (atomic_read(&pfdev
->devfreq
.busy_count
))
588 for (i
= 0; i
< NUM_JOB_SLOTS
; i
++) {
589 /* If there are any jobs in the HW queue, we're not idle */
590 if (atomic_read(&js
->queue
[i
].sched
.hw_rq_count
))