1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Etnaviv Project
6 #include <linux/moduleparam.h>
8 #include "etnaviv_drv.h"
9 #include "etnaviv_dump.h"
10 #include "etnaviv_gem.h"
11 #include "etnaviv_gpu.h"
12 #include "etnaviv_sched.h"
13 #include "state.xml.h"
15 static int etnaviv_job_hang_limit
= 0;
16 module_param_named(job_hang_limit
, etnaviv_job_hang_limit
, int , 0444);
17 static int etnaviv_hw_jobs_limit
= 4;
18 module_param_named(hw_job_limit
, etnaviv_hw_jobs_limit
, int , 0444);
20 static struct dma_fence
*
21 etnaviv_sched_dependency(struct drm_sched_job
*sched_job
,
22 struct drm_sched_entity
*entity
)
24 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
25 struct dma_fence
*fence
;
28 if (unlikely(submit
->in_fence
)) {
29 fence
= submit
->in_fence
;
30 submit
->in_fence
= NULL
;
32 if (!dma_fence_is_signaled(fence
))
38 for (i
= 0; i
< submit
->nr_bos
; i
++) {
39 struct etnaviv_gem_submit_bo
*bo
= &submit
->bos
[i
];
46 if (!dma_fence_is_signaled(fence
))
52 for (j
= 0; j
< bo
->nr_shared
; j
++) {
56 fence
= bo
->shared
[j
];
59 if (!dma_fence_is_signaled(fence
))
72 static struct dma_fence
*etnaviv_sched_run_job(struct drm_sched_job
*sched_job
)
74 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
75 struct dma_fence
*fence
= NULL
;
77 if (likely(!sched_job
->s_fence
->finished
.error
))
78 fence
= etnaviv_gpu_submit(submit
);
80 dev_dbg(submit
->gpu
->dev
, "skipping bad job\n");
85 static void etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job
)
87 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
88 struct etnaviv_gpu
*gpu
= submit
->gpu
;
93 * If the GPU managed to complete this jobs fence, the timout is
96 if (dma_fence_is_signaled(submit
->out_fence
))
100 * If the GPU is still making forward progress on the front-end (which
101 * should never loop) we shift out the timeout to give it a chance to
104 dma_addr
= gpu_read(gpu
, VIVS_FE_DMA_ADDRESS
);
105 change
= dma_addr
- gpu
->hangcheck_dma_addr
;
106 if (change
< 0 || change
> 16) {
107 gpu
->hangcheck_dma_addr
= dma_addr
;
111 /* block scheduler */
112 drm_sched_stop(&gpu
->sched
, sched_job
);
115 drm_sched_increase_karma(sched_job
);
117 /* get the GPU back into the init state */
118 etnaviv_core_dump(submit
);
119 etnaviv_gpu_recover_hang(gpu
);
121 drm_sched_resubmit_jobs(&gpu
->sched
);
123 /* restart scheduler after GPU is usable again */
124 drm_sched_start(&gpu
->sched
, true);
127 static void etnaviv_sched_free_job(struct drm_sched_job
*sched_job
)
129 struct etnaviv_gem_submit
*submit
= to_etnaviv_submit(sched_job
);
131 drm_sched_job_cleanup(sched_job
);
133 etnaviv_submit_put(submit
);
136 static const struct drm_sched_backend_ops etnaviv_sched_ops
= {
137 .dependency
= etnaviv_sched_dependency
,
138 .run_job
= etnaviv_sched_run_job
,
139 .timedout_job
= etnaviv_sched_timedout_job
,
140 .free_job
= etnaviv_sched_free_job
,
143 int etnaviv_sched_push_job(struct drm_sched_entity
*sched_entity
,
144 struct etnaviv_gem_submit
*submit
)
149 * Hold the fence lock across the whole operation to avoid jobs being
150 * pushed out of order with regard to their sched fence seqnos as
151 * allocated in drm_sched_job_init.
153 mutex_lock(&submit
->gpu
->fence_lock
);
155 ret
= drm_sched_job_init(&submit
->sched_job
, sched_entity
,
160 submit
->out_fence
= dma_fence_get(&submit
->sched_job
.s_fence
->finished
);
161 submit
->out_fence_id
= idr_alloc_cyclic(&submit
->gpu
->fence_idr
,
162 submit
->out_fence
, 0,
163 INT_MAX
, GFP_KERNEL
);
164 if (submit
->out_fence_id
< 0) {
165 drm_sched_job_cleanup(&submit
->sched_job
);
170 /* the scheduler holds on to the job now */
171 kref_get(&submit
->refcount
);
173 drm_sched_entity_push_job(&submit
->sched_job
, sched_entity
);
176 mutex_unlock(&submit
->gpu
->fence_lock
);
181 int etnaviv_sched_init(struct etnaviv_gpu
*gpu
)
185 ret
= drm_sched_init(&gpu
->sched
, &etnaviv_sched_ops
,
186 etnaviv_hw_jobs_limit
, etnaviv_job_hang_limit
,
187 msecs_to_jiffies(500), dev_name(gpu
->dev
));
194 void etnaviv_sched_fini(struct etnaviv_gpu
*gpu
)
196 drm_sched_fini(&gpu
->sched
);