1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
4 #include <linux/device.h>
5 #include <linux/dma-mapping.h>
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/reset.h>
11 #include <linux/sched/signal.h>
12 #include <linux/uaccess.h>
14 #include <drm/drm_syncobj.h>
15 #include <uapi/drm/v3d_drm.h>
19 #include "v3d_trace.h"
22 v3d_init_core(struct v3d_dev
*v3d
, int core
)
24 /* Set OVRTMUOUT, which means that the texture sampler uniform
25 * configuration's tmu output type field is used, instead of
26 * using the hardware default behavior based on the texture
27 * type. If you want the default behavior, you can still put
28 * "2" in the indirect texture state's output_type field.
31 V3D_CORE_WRITE(core
, V3D_CTL_MISCCFG
, V3D_MISCCFG_OVRTMUOUT
);
33 /* Whenever we flush the L2T cache, we always want to flush
36 V3D_CORE_WRITE(core
, V3D_CTL_L2TFLSTA
, 0);
37 V3D_CORE_WRITE(core
, V3D_CTL_L2TFLEND
, ~0);
40 /* Sets invariant state for the HW. */
42 v3d_init_hw_state(struct v3d_dev
*v3d
)
44 v3d_init_core(v3d
, 0);
48 v3d_idle_axi(struct v3d_dev
*v3d
, int core
)
50 V3D_CORE_WRITE(core
, V3D_GMP_CFG
, V3D_GMP_CFG_STOP_REQ
);
52 if (wait_for((V3D_CORE_READ(core
, V3D_GMP_STATUS
) &
53 (V3D_GMP_STATUS_RD_COUNT_MASK
|
54 V3D_GMP_STATUS_WR_COUNT_MASK
|
55 V3D_GMP_STATUS_CFG_BUSY
)) == 0, 100)) {
56 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
61 v3d_idle_gca(struct v3d_dev
*v3d
)
66 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN
, V3D_GCA_SAFE_SHUTDOWN_EN
);
68 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK
) &
69 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED
) ==
70 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED
, 100)) {
71 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
76 v3d_reset_by_bridge(struct v3d_dev
*v3d
)
78 int version
= V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION
);
80 if (V3D_GET_FIELD(version
, V3D_TOP_GR_BRIDGE_MAJOR
) == 2) {
81 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0
,
82 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT
);
83 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0
, 0);
85 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
86 * of the unit, so reset it to its power-on value here.
88 V3D_WRITE(V3D_HUB_AXICFG
, V3D_HUB_AXICFG_MAX_LEN_MASK
);
90 WARN_ON_ONCE(V3D_GET_FIELD(version
,
91 V3D_TOP_GR_BRIDGE_MAJOR
) != 7);
92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1
,
93 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT
);
94 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1
, 0);
99 v3d_reset_v3d(struct v3d_dev
*v3d
)
102 reset_control_reset(v3d
->reset
);
104 v3d_reset_by_bridge(v3d
);
106 v3d_init_hw_state(v3d
);
110 v3d_reset(struct v3d_dev
*v3d
)
112 struct drm_device
*dev
= &v3d
->drm
;
114 DRM_DEV_ERROR(dev
->dev
, "Resetting GPU for hang.\n");
115 DRM_DEV_ERROR(dev
->dev
, "V3D_ERR_STAT: 0x%08x\n",
116 V3D_CORE_READ(0, V3D_ERR_STAT
));
117 trace_v3d_reset_begin(dev
);
119 /* XXX: only needed for safe powerdown, not reset. */
121 v3d_idle_axi(v3d
, 0);
126 v3d_mmu_set_page_table(v3d
);
129 trace_v3d_reset_end(dev
);
133 v3d_flush_l3(struct v3d_dev
*v3d
)
136 u32 gca_ctrl
= V3D_GCA_READ(V3D_GCA_CACHE_CTRL
);
138 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL
,
139 gca_ctrl
| V3D_GCA_CACHE_CTRL_FLUSH
);
142 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL
,
143 gca_ctrl
& ~V3D_GCA_CACHE_CTRL_FLUSH
);
148 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
149 * uniforms and instructions on V3D 3.2.
152 v3d_invalidate_l2c(struct v3d_dev
*v3d
, int core
)
157 V3D_CORE_WRITE(core
, V3D_CTL_L2CACTL
,
162 /* Invalidates texture L2 cachelines */
164 v3d_flush_l2t(struct v3d_dev
*v3d
, int core
)
166 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
167 * need to wait for completion before dispatching the job --
168 * L2T accesses will be stalled until the flush has completed.
169 * However, we do need to make sure we don't try to trigger a
170 * new flush while the L2_CLEAN queue is trying to
171 * synchronously clean after a job.
173 mutex_lock(&v3d
->cache_clean_lock
);
174 V3D_CORE_WRITE(core
, V3D_CTL_L2TCACTL
,
175 V3D_L2TCACTL_L2TFLS
|
176 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH
, V3D_L2TCACTL_FLM
));
177 mutex_unlock(&v3d
->cache_clean_lock
);
180 /* Cleans texture L1 and L2 cachelines (writing back dirty data).
182 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
183 * executed, we need to make sure that the clean is done before
184 * signaling job completion. So, we synchronously wait before
185 * returning, and we make sure that L2 invalidates don't happen in the
186 * meantime to confuse our are-we-done checks.
189 v3d_clean_caches(struct v3d_dev
*v3d
)
191 struct drm_device
*dev
= &v3d
->drm
;
194 trace_v3d_cache_clean_begin(dev
);
196 V3D_CORE_WRITE(core
, V3D_CTL_L2TCACTL
, V3D_L2TCACTL_TMUWCF
);
197 if (wait_for(!(V3D_CORE_READ(core
, V3D_CTL_L2TCACTL
) &
198 V3D_L2TCACTL_L2TFLS
), 100)) {
199 DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
202 mutex_lock(&v3d
->cache_clean_lock
);
203 V3D_CORE_WRITE(core
, V3D_CTL_L2TCACTL
,
204 V3D_L2TCACTL_L2TFLS
|
205 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN
, V3D_L2TCACTL_FLM
));
207 if (wait_for(!(V3D_CORE_READ(core
, V3D_CTL_L2TCACTL
) &
208 V3D_L2TCACTL_L2TFLS
), 100)) {
209 DRM_ERROR("Timeout waiting for L2T clean\n");
212 mutex_unlock(&v3d
->cache_clean_lock
);
214 trace_v3d_cache_clean_end(dev
);
217 /* Invalidates the slice caches. These are read-only caches. */
219 v3d_invalidate_slices(struct v3d_dev
*v3d
, int core
)
221 V3D_CORE_WRITE(core
, V3D_CTL_SLCACTL
,
222 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS
) |
223 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS
) |
224 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC
) |
225 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC
));
229 v3d_invalidate_caches(struct v3d_dev
*v3d
)
231 /* Invalidate the caches from the outside in. That way if
232 * another CL's concurrent use of nearby memory were to pull
233 * an invalidated cacheline back in, we wouldn't leave stale
234 * data in the inner cache.
237 v3d_invalidate_l2c(v3d
, 0);
238 v3d_flush_l2t(v3d
, 0);
239 v3d_invalidate_slices(v3d
, 0);
242 /* Takes the reservation lock on all the BOs being referenced, so that
243 * at queue submit time we can update the reservations.
245 * We don't lock the RCL the tile alloc/state BOs, or overflow memory
246 * (all of which are on exec->unref_list). They're entirely private
247 * to v3d, so we don't attach dma-buf fences to them.
250 v3d_lock_bo_reservations(struct v3d_job
*job
,
251 struct ww_acquire_ctx
*acquire_ctx
)
255 ret
= drm_gem_lock_reservations(job
->bo
, job
->bo_count
, acquire_ctx
);
259 for (i
= 0; i
< job
->bo_count
; i
++) {
260 ret
= drm_gem_fence_array_add_implicit(&job
->deps
,
263 drm_gem_unlock_reservations(job
->bo
, job
->bo_count
,
273 * v3d_lookup_bos() - Sets up job->bo[] with the GEM objects
274 * referenced by the job.
276 * @file_priv: DRM file for this fd
277 * @job: V3D job being set up
279 * The command validator needs to reference BOs by their index within
280 * the submitted job's BO list. This does the validation of the job's
281 * BO list and reference counting for the lifetime of the job.
283 * Note that this function doesn't need to unreference the BOs on
284 * failure, because that will happen at v3d_exec_cleanup() time.
287 v3d_lookup_bos(struct drm_device
*dev
,
288 struct drm_file
*file_priv
,
297 job
->bo_count
= bo_count
;
299 if (!job
->bo_count
) {
300 /* See comment on bo_index for why we have to check
303 DRM_DEBUG("Rendering requires BOs\n");
307 job
->bo
= kvmalloc_array(job
->bo_count
,
308 sizeof(struct drm_gem_cma_object
*),
309 GFP_KERNEL
| __GFP_ZERO
);
311 DRM_DEBUG("Failed to allocate validated BO pointers\n");
315 handles
= kvmalloc_array(job
->bo_count
, sizeof(u32
), GFP_KERNEL
);
318 DRM_DEBUG("Failed to allocate incoming GEM handles\n");
322 if (copy_from_user(handles
,
323 (void __user
*)(uintptr_t)bo_handles
,
324 job
->bo_count
* sizeof(u32
))) {
326 DRM_DEBUG("Failed to copy in GEM handles\n");
330 spin_lock(&file_priv
->table_lock
);
331 for (i
= 0; i
< job
->bo_count
; i
++) {
332 struct drm_gem_object
*bo
= idr_find(&file_priv
->object_idr
,
335 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
338 spin_unlock(&file_priv
->table_lock
);
341 drm_gem_object_get(bo
);
344 spin_unlock(&file_priv
->table_lock
);
352 v3d_job_free(struct kref
*ref
)
354 struct v3d_job
*job
= container_of(ref
, struct v3d_job
, refcount
);
356 struct dma_fence
*fence
;
359 for (i
= 0; i
< job
->bo_count
; i
++) {
361 drm_gem_object_put_unlocked(job
->bo
[i
]);
365 xa_for_each(&job
->deps
, index
, fence
) {
366 dma_fence_put(fence
);
368 xa_destroy(&job
->deps
);
370 dma_fence_put(job
->irq_fence
);
371 dma_fence_put(job
->done_fence
);
373 pm_runtime_mark_last_busy(job
->v3d
->dev
);
374 pm_runtime_put_autosuspend(job
->v3d
->dev
);
380 v3d_render_job_free(struct kref
*ref
)
382 struct v3d_render_job
*job
= container_of(ref
, struct v3d_render_job
,
384 struct v3d_bo
*bo
, *save
;
386 list_for_each_entry_safe(bo
, save
, &job
->unref_list
, unref_head
) {
387 drm_gem_object_put_unlocked(&bo
->base
.base
);
393 void v3d_job_put(struct v3d_job
*job
)
395 kref_put(&job
->refcount
, job
->free
);
399 v3d_wait_bo_ioctl(struct drm_device
*dev
, void *data
,
400 struct drm_file
*file_priv
)
403 struct drm_v3d_wait_bo
*args
= data
;
404 ktime_t start
= ktime_get();
406 unsigned long timeout_jiffies
=
407 nsecs_to_jiffies_timeout(args
->timeout_ns
);
412 ret
= drm_gem_dma_resv_wait(file_priv
, args
->handle
,
413 true, timeout_jiffies
);
415 /* Decrement the user's timeout, in case we got interrupted
416 * such that the ioctl will be restarted.
418 delta_ns
= ktime_to_ns(ktime_sub(ktime_get(), start
));
419 if (delta_ns
< args
->timeout_ns
)
420 args
->timeout_ns
-= delta_ns
;
422 args
->timeout_ns
= 0;
424 /* Asked to wait beyond the jiffie/scheduler precision? */
425 if (ret
== -ETIME
&& args
->timeout_ns
)
432 v3d_job_init(struct v3d_dev
*v3d
, struct drm_file
*file_priv
,
433 struct v3d_job
*job
, void (*free
)(struct kref
*ref
),
436 struct dma_fence
*in_fence
= NULL
;
442 ret
= pm_runtime_get_sync(v3d
->dev
);
446 xa_init_flags(&job
->deps
, XA_FLAGS_ALLOC
);
448 ret
= drm_syncobj_find_fence(file_priv
, in_sync
, 0, 0, &in_fence
);
452 ret
= drm_gem_fence_array_add(&job
->deps
, in_fence
);
456 kref_init(&job
->refcount
);
460 xa_destroy(&job
->deps
);
461 pm_runtime_put_autosuspend(v3d
->dev
);
466 v3d_push_job(struct v3d_file_priv
*v3d_priv
,
467 struct v3d_job
*job
, enum v3d_queue queue
)
471 ret
= drm_sched_job_init(&job
->base
, &v3d_priv
->sched_entity
[queue
],
476 job
->done_fence
= dma_fence_get(&job
->base
.s_fence
->finished
);
478 /* put by scheduler job completion */
479 kref_get(&job
->refcount
);
481 drm_sched_entity_push_job(&job
->base
, &v3d_priv
->sched_entity
[queue
]);
487 v3d_attach_fences_and_unlock_reservation(struct drm_file
*file_priv
,
489 struct ww_acquire_ctx
*acquire_ctx
,
491 struct dma_fence
*done_fence
)
493 struct drm_syncobj
*sync_out
;
496 for (i
= 0; i
< job
->bo_count
; i
++) {
497 /* XXX: Use shared fences for read-only objects. */
498 dma_resv_add_excl_fence(job
->bo
[i
]->resv
,
502 drm_gem_unlock_reservations(job
->bo
, job
->bo_count
, acquire_ctx
);
504 /* Update the return sync object for the job */
505 sync_out
= drm_syncobj_find(file_priv
, out_sync
);
507 drm_syncobj_replace_fence(sync_out
, done_fence
);
508 drm_syncobj_put(sync_out
);
513 * v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
515 * @data: ioctl argument
516 * @file_priv: DRM file for this fd
518 * This is the main entrypoint for userspace to submit a 3D frame to
519 * the GPU. Userspace provides the binner command list (if
520 * applicable), and the kernel sets up the render command list to draw
521 * to the framebuffer described in the ioctl, using the command lists
522 * that the 3D engine's binner will produce.
525 v3d_submit_cl_ioctl(struct drm_device
*dev
, void *data
,
526 struct drm_file
*file_priv
)
528 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
529 struct v3d_file_priv
*v3d_priv
= file_priv
->driver_priv
;
530 struct drm_v3d_submit_cl
*args
= data
;
531 struct v3d_bin_job
*bin
= NULL
;
532 struct v3d_render_job
*render
;
533 struct v3d_job
*clean_job
= NULL
;
534 struct v3d_job
*last_job
;
535 struct ww_acquire_ctx acquire_ctx
;
538 trace_v3d_submit_cl_ioctl(&v3d
->drm
, args
->rcl_start
, args
->rcl_end
);
540 if (args
->flags
!= 0 &&
541 args
->flags
!= DRM_V3D_SUBMIT_CL_FLUSH_CACHE
) {
542 DRM_INFO("invalid flags: %d\n", args
->flags
);
546 render
= kcalloc(1, sizeof(*render
), GFP_KERNEL
);
550 render
->start
= args
->rcl_start
;
551 render
->end
= args
->rcl_end
;
552 INIT_LIST_HEAD(&render
->unref_list
);
554 ret
= v3d_job_init(v3d
, file_priv
, &render
->base
,
555 v3d_render_job_free
, args
->in_sync_rcl
);
561 if (args
->bcl_start
!= args
->bcl_end
) {
562 bin
= kcalloc(1, sizeof(*bin
), GFP_KERNEL
);
564 v3d_job_put(&render
->base
);
568 ret
= v3d_job_init(v3d
, file_priv
, &bin
->base
,
569 v3d_job_free
, args
->in_sync_bcl
);
572 v3d_job_put(&render
->base
);
577 bin
->start
= args
->bcl_start
;
578 bin
->end
= args
->bcl_end
;
579 bin
->qma
= args
->qma
;
580 bin
->qms
= args
->qms
;
581 bin
->qts
= args
->qts
;
582 bin
->render
= render
;
585 if (args
->flags
& DRM_V3D_SUBMIT_CL_FLUSH_CACHE
) {
586 clean_job
= kcalloc(1, sizeof(*clean_job
), GFP_KERNEL
);
592 ret
= v3d_job_init(v3d
, file_priv
, clean_job
, v3d_job_free
, 0);
599 last_job
= clean_job
;
601 last_job
= &render
->base
;
604 ret
= v3d_lookup_bos(dev
, file_priv
, last_job
,
605 args
->bo_handles
, args
->bo_handle_count
);
609 ret
= v3d_lock_bo_reservations(last_job
, &acquire_ctx
);
613 mutex_lock(&v3d
->sched_lock
);
615 ret
= v3d_push_job(v3d_priv
, &bin
->base
, V3D_BIN
);
619 ret
= drm_gem_fence_array_add(&render
->base
.deps
,
620 dma_fence_get(bin
->base
.done_fence
));
625 ret
= v3d_push_job(v3d_priv
, &render
->base
, V3D_RENDER
);
630 struct dma_fence
*render_fence
=
631 dma_fence_get(render
->base
.done_fence
);
632 ret
= drm_gem_fence_array_add(&clean_job
->deps
, render_fence
);
635 ret
= v3d_push_job(v3d_priv
, clean_job
, V3D_CACHE_CLEAN
);
640 mutex_unlock(&v3d
->sched_lock
);
642 v3d_attach_fences_and_unlock_reservation(file_priv
,
646 last_job
->done_fence
);
649 v3d_job_put(&bin
->base
);
650 v3d_job_put(&render
->base
);
652 v3d_job_put(clean_job
);
657 mutex_unlock(&v3d
->sched_lock
);
658 drm_gem_unlock_reservations(last_job
->bo
,
659 last_job
->bo_count
, &acquire_ctx
);
662 v3d_job_put(&bin
->base
);
663 v3d_job_put(&render
->base
);
665 v3d_job_put(clean_job
);
671 * v3d_submit_tfu_ioctl() - Submits a TFU (texture formatting) job to the V3D.
673 * @data: ioctl argument
674 * @file_priv: DRM file for this fd
676 * Userspace provides the register setup for the TFU, which we don't
677 * need to validate since the TFU is behind the MMU.
680 v3d_submit_tfu_ioctl(struct drm_device
*dev
, void *data
,
681 struct drm_file
*file_priv
)
683 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
684 struct v3d_file_priv
*v3d_priv
= file_priv
->driver_priv
;
685 struct drm_v3d_submit_tfu
*args
= data
;
686 struct v3d_tfu_job
*job
;
687 struct ww_acquire_ctx acquire_ctx
;
690 trace_v3d_submit_tfu_ioctl(&v3d
->drm
, args
->iia
);
692 job
= kcalloc(1, sizeof(*job
), GFP_KERNEL
);
696 ret
= v3d_job_init(v3d
, file_priv
, &job
->base
,
697 v3d_job_free
, args
->in_sync
);
703 job
->base
.bo
= kcalloc(ARRAY_SIZE(args
->bo_handles
),
704 sizeof(*job
->base
.bo
), GFP_KERNEL
);
706 v3d_job_put(&job
->base
);
712 spin_lock(&file_priv
->table_lock
);
713 for (job
->base
.bo_count
= 0;
714 job
->base
.bo_count
< ARRAY_SIZE(args
->bo_handles
);
715 job
->base
.bo_count
++) {
716 struct drm_gem_object
*bo
;
718 if (!args
->bo_handles
[job
->base
.bo_count
])
721 bo
= idr_find(&file_priv
->object_idr
,
722 args
->bo_handles
[job
->base
.bo_count
]);
724 DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
726 args
->bo_handles
[job
->base
.bo_count
]);
728 spin_unlock(&file_priv
->table_lock
);
731 drm_gem_object_get(bo
);
732 job
->base
.bo
[job
->base
.bo_count
] = bo
;
734 spin_unlock(&file_priv
->table_lock
);
736 ret
= v3d_lock_bo_reservations(&job
->base
, &acquire_ctx
);
740 mutex_lock(&v3d
->sched_lock
);
741 ret
= v3d_push_job(v3d_priv
, &job
->base
, V3D_TFU
);
744 mutex_unlock(&v3d
->sched_lock
);
746 v3d_attach_fences_and_unlock_reservation(file_priv
,
747 &job
->base
, &acquire_ctx
,
749 job
->base
.done_fence
);
751 v3d_job_put(&job
->base
);
756 mutex_unlock(&v3d
->sched_lock
);
757 drm_gem_unlock_reservations(job
->base
.bo
, job
->base
.bo_count
,
760 v3d_job_put(&job
->base
);
766 * v3d_submit_csd_ioctl() - Submits a CSD (texture formatting) job to the V3D.
768 * @data: ioctl argument
769 * @file_priv: DRM file for this fd
771 * Userspace provides the register setup for the CSD, which we don't
772 * need to validate since the CSD is behind the MMU.
775 v3d_submit_csd_ioctl(struct drm_device
*dev
, void *data
,
776 struct drm_file
*file_priv
)
778 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
779 struct v3d_file_priv
*v3d_priv
= file_priv
->driver_priv
;
780 struct drm_v3d_submit_csd
*args
= data
;
781 struct v3d_csd_job
*job
;
782 struct v3d_job
*clean_job
;
783 struct ww_acquire_ctx acquire_ctx
;
786 trace_v3d_submit_csd_ioctl(&v3d
->drm
, args
->cfg
[5], args
->cfg
[6]);
788 if (!v3d_has_csd(v3d
)) {
789 DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
793 job
= kcalloc(1, sizeof(*job
), GFP_KERNEL
);
797 ret
= v3d_job_init(v3d
, file_priv
, &job
->base
,
798 v3d_job_free
, args
->in_sync
);
804 clean_job
= kcalloc(1, sizeof(*clean_job
), GFP_KERNEL
);
806 v3d_job_put(&job
->base
);
811 ret
= v3d_job_init(v3d
, file_priv
, clean_job
, v3d_job_free
, 0);
813 v3d_job_put(&job
->base
);
820 ret
= v3d_lookup_bos(dev
, file_priv
, clean_job
,
821 args
->bo_handles
, args
->bo_handle_count
);
825 ret
= v3d_lock_bo_reservations(clean_job
, &acquire_ctx
);
829 mutex_lock(&v3d
->sched_lock
);
830 ret
= v3d_push_job(v3d_priv
, &job
->base
, V3D_CSD
);
834 ret
= drm_gem_fence_array_add(&clean_job
->deps
,
835 dma_fence_get(job
->base
.done_fence
));
839 ret
= v3d_push_job(v3d_priv
, clean_job
, V3D_CACHE_CLEAN
);
842 mutex_unlock(&v3d
->sched_lock
);
844 v3d_attach_fences_and_unlock_reservation(file_priv
,
848 clean_job
->done_fence
);
850 v3d_job_put(&job
->base
);
851 v3d_job_put(clean_job
);
856 mutex_unlock(&v3d
->sched_lock
);
857 drm_gem_unlock_reservations(clean_job
->bo
, clean_job
->bo_count
,
860 v3d_job_put(&job
->base
);
861 v3d_job_put(clean_job
);
867 v3d_gem_init(struct drm_device
*dev
)
869 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
870 u32 pt_size
= 4096 * 1024;
873 for (i
= 0; i
< V3D_MAX_QUEUES
; i
++)
874 v3d
->queue
[i
].fence_context
= dma_fence_context_alloc(1);
876 spin_lock_init(&v3d
->mm_lock
);
877 spin_lock_init(&v3d
->job_lock
);
878 mutex_init(&v3d
->bo_lock
);
879 mutex_init(&v3d
->reset_lock
);
880 mutex_init(&v3d
->sched_lock
);
881 mutex_init(&v3d
->cache_clean_lock
);
883 /* Note: We don't allocate address 0. Various bits of HW
884 * treat 0 as special, such as the occlusion query counters
885 * where 0 means "disabled".
887 drm_mm_init(&v3d
->mm
, 1, pt_size
/ sizeof(u32
) - 1);
889 v3d
->pt
= dma_alloc_wc(v3d
->dev
, pt_size
,
891 GFP_KERNEL
| __GFP_NOWARN
| __GFP_ZERO
);
893 drm_mm_takedown(&v3d
->mm
);
895 "Failed to allocate page tables. "
896 "Please ensure you have CMA enabled.\n");
900 v3d_init_hw_state(v3d
);
901 v3d_mmu_set_page_table(v3d
);
903 ret
= v3d_sched_init(v3d
);
905 drm_mm_takedown(&v3d
->mm
);
906 dma_free_coherent(v3d
->dev
, 4096 * 1024, (void *)v3d
->pt
,
914 v3d_gem_destroy(struct drm_device
*dev
)
916 struct v3d_dev
*v3d
= to_v3d_dev(dev
);
920 /* Waiting for jobs to finish would need to be done before
923 WARN_ON(v3d
->bin_job
);
924 WARN_ON(v3d
->render_job
);
926 drm_mm_takedown(&v3d
->mm
);
928 dma_free_coherent(v3d
->dev
, 4096 * 1024, (void *)v3d
->pt
, v3d
->pt_paddr
);