1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
6 #ifdef CONFIG_ARM_ARCH_TIMER
7 #include <asm/arch_timer.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_platform.h>
13 #include <linux/pagemap.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/time64.h>
18 #include <drm/drm_auth.h>
19 #include <drm/drm_debugfs.h>
20 #include <drm/drm_drv.h>
21 #include <drm/drm_exec.h>
22 #include <drm/drm_ioctl.h>
23 #include <drm/drm_syncobj.h>
24 #include <drm/drm_utils.h>
25 #include <drm/gpu_scheduler.h>
26 #include <drm/panthor_drm.h>
28 #include "panthor_device.h"
29 #include "panthor_fw.h"
30 #include "panthor_gem.h"
31 #include "panthor_gpu.h"
32 #include "panthor_heap.h"
33 #include "panthor_mmu.h"
34 #include "panthor_regs.h"
35 #include "panthor_sched.h"
38 * DOC: user <-> kernel object copy helpers.
42 * panthor_set_uobj() - Copy kernel object to user object.
43 * @usr_ptr: Users pointer.
44 * @usr_size: Size of the user object.
45 * @min_size: Minimum size for this object.
46 * @kern_size: Size of the kernel object.
47 * @in: Address of the kernel object to copy.
49 * Helper automating kernel -> user object copies.
51 * Don't use this function directly, use PANTHOR_UOBJ_SET() instead.
53 * Return: 0 on success, a negative error code otherwise.
56 panthor_set_uobj(u64 usr_ptr
, u32 usr_size
, u32 min_size
, u32 kern_size
, const void *in
)
58 /* User size shouldn't be smaller than the minimal object size. */
59 if (usr_size
< min_size
)
62 if (copy_to_user(u64_to_user_ptr(usr_ptr
), in
, min_t(u32
, usr_size
, kern_size
)))
65 /* When the kernel object is smaller than the user object, we fill the gap with
68 if (usr_size
> kern_size
&&
69 clear_user(u64_to_user_ptr(usr_ptr
+ kern_size
), usr_size
- kern_size
)) {
77 * panthor_get_uobj_array() - Copy a user object array into a kernel accessible object array.
78 * @in: The object array to copy.
79 * @min_stride: Minimum array stride.
80 * @obj_size: Kernel object size.
82 * Helper automating user -> kernel object copies.
84 * Don't use this function directly, use PANTHOR_UOBJ_GET_ARRAY() instead.
86 * Return: newly allocated object array or an ERR_PTR on error.
89 panthor_get_uobj_array(const struct drm_panthor_obj_array
*in
, u32 min_stride
,
98 /* User stride must be at least the minimum object size, otherwise it might
99 * lack useful information.
101 if (in
->stride
< min_stride
)
102 return ERR_PTR(-EINVAL
);
104 out_alloc
= kvmalloc_array(in
->count
, obj_size
, GFP_KERNEL
);
106 return ERR_PTR(-ENOMEM
);
108 if (obj_size
== in
->stride
) {
109 /* Fast path when user/kernel have the same uAPI header version. */
110 if (copy_from_user(out_alloc
, u64_to_user_ptr(in
->array
),
111 (unsigned long)obj_size
* in
->count
))
114 void __user
*in_ptr
= u64_to_user_ptr(in
->array
);
115 void *out_ptr
= out_alloc
;
117 /* If the sizes differ, we need to copy elements one by one. */
118 for (u32 i
= 0; i
< in
->count
; i
++) {
119 ret
= copy_struct_from_user(out_ptr
, obj_size
, in_ptr
, in
->stride
);
124 in_ptr
+= in
->stride
;
137 * PANTHOR_UOBJ_MIN_SIZE_INTERNAL() - Get the minimum user object size
138 * @_typename: Object type.
139 * @_last_mandatory_field: Last mandatory field.
141 * Get the minimum user object size based on the last mandatory field name,
142 * A.K.A, the name of the last field of the structure at the time this
143 * structure was added to the uAPI.
145 * Don't use directly, use PANTHOR_UOBJ_DECL() instead.
147 #define PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \
148 (offsetof(_typename, _last_mandatory_field) + \
149 sizeof(((_typename *)NULL)->_last_mandatory_field))
152 * PANTHOR_UOBJ_DECL() - Declare a new uAPI object whose subject to
154 * @_typename: Object type.
155 * @_last_mandatory_field: Last mandatory field.
157 * Should be used to extend the PANTHOR_UOBJ_MIN_SIZE() list.
159 #define PANTHOR_UOBJ_DECL(_typename, _last_mandatory_field) \
160 _typename : PANTHOR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field)
163 * PANTHOR_UOBJ_MIN_SIZE() - Get the minimum size of a given uAPI object
164 * @_obj_name: Object to get the minimum size of.
166 * Don't use this macro directly, it's automatically called by
167 * PANTHOR_UOBJ_{SET,GET_ARRAY}().
169 #define PANTHOR_UOBJ_MIN_SIZE(_obj_name) \
170 _Generic(_obj_name, \
171 PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \
172 PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \
173 PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \
174 PANTHOR_UOBJ_DECL(struct drm_panthor_group_priorities_info, pad), \
175 PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \
176 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \
177 PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \
178 PANTHOR_UOBJ_DECL(struct drm_panthor_vm_bind_op, syncs))
181 * PANTHOR_UOBJ_SET() - Copy a kernel object to a user object.
182 * @_dest_usr_ptr: User pointer to copy to.
183 * @_usr_size: Size of the user object.
184 * @_src_obj: Kernel object to copy (not a pointer).
186 * Return: 0 on success, a negative error code otherwise.
188 #define PANTHOR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \
189 panthor_set_uobj(_dest_usr_ptr, _usr_size, \
190 PANTHOR_UOBJ_MIN_SIZE(_src_obj), \
191 sizeof(_src_obj), &(_src_obj))
194 * PANTHOR_UOBJ_GET_ARRAY() - Copy a user object array to a kernel accessible
196 * @_dest_array: Local variable that will hold the newly allocated kernel
198 * @_uobj_array: The drm_panthor_obj_array object describing the user object
201 * Return: 0 on success, a negative error code otherwise.
203 #define PANTHOR_UOBJ_GET_ARRAY(_dest_array, _uobj_array) \
205 typeof(_dest_array) _tmp; \
206 _tmp = panthor_get_uobj_array(_uobj_array, \
207 PANTHOR_UOBJ_MIN_SIZE((_dest_array)[0]), \
208 sizeof((_dest_array)[0])); \
210 _dest_array = _tmp; \
211 PTR_ERR_OR_ZERO(_tmp); \
215 * struct panthor_sync_signal - Represent a synchronization object point to attach
218 * This structure is here to keep track of fences that are currently bound to
219 * a specific syncobj point.
221 * At the beginning of a job submission, the fence
222 * is retrieved from the syncobj itself, and can be NULL if no fence was attached
225 * At the end, it points to the fence of the last job that had a
226 * %DRM_PANTHOR_SYNC_OP_SIGNAL on this syncobj.
228 * With jobs being submitted in batches, the fence might change several times during
229 * the process, allowing one job to wait on a job that's part of the same submission
230 * but appears earlier in the drm_panthor_group_submit::queue_submits array.
232 struct panthor_sync_signal
{
233 /** @node: list_head to track signal ops within a submit operation */
234 struct list_head node
;
236 /** @handle: The syncobj handle. */
240 * @point: The syncobj point.
242 * Zero for regular syncobjs, and non-zero for timeline syncobjs.
247 * @syncobj: The sync object pointed by @handle.
249 struct drm_syncobj
*syncobj
;
252 * @chain: Chain object used to link the new fence to an existing
255 * NULL for regular syncobj, non-NULL for timeline syncobjs.
257 struct dma_fence_chain
*chain
;
260 * @fence: The fence to assign to the syncobj or syncobj-point.
262 struct dma_fence
*fence
;
266 * struct panthor_job_ctx - Job context
268 struct panthor_job_ctx
{
269 /** @job: The job that is about to be submitted to drm_sched. */
270 struct drm_sched_job
*job
;
272 /** @syncops: Array of sync operations. */
273 struct drm_panthor_sync_op
*syncops
;
275 /** @syncop_count: Number of sync operations. */
280 * struct panthor_submit_ctx - Submission context
282 * Anything that's related to a submission (%DRM_IOCTL_PANTHOR_VM_BIND or
283 * %DRM_IOCTL_PANTHOR_GROUP_SUBMIT) is kept here, so we can automate the
284 * initialization and cleanup steps.
286 struct panthor_submit_ctx
{
287 /** @file: DRM file this submission happens on. */
288 struct drm_file
*file
;
291 * @signals: List of struct panthor_sync_signal.
293 * %DRM_PANTHOR_SYNC_OP_SIGNAL operations will be recorded here,
294 * and %DRM_PANTHOR_SYNC_OP_WAIT will first check if an entry
295 * matching the syncobj+point exists before calling
296 * drm_syncobj_find_fence(). This allows us to describe dependencies
297 * existing between jobs that are part of the same batch.
299 struct list_head signals
;
301 /** @jobs: Array of jobs. */
302 struct panthor_job_ctx
*jobs
;
304 /** @job_count: Number of entries in the @jobs array. */
307 /** @exec: drm_exec context used to acquire and prepare resv objects. */
308 struct drm_exec exec
;
311 #define PANTHOR_SYNC_OP_FLAGS_MASK \
312 (DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK | DRM_PANTHOR_SYNC_OP_SIGNAL)
314 static bool sync_op_is_signal(const struct drm_panthor_sync_op
*sync_op
)
316 return !!(sync_op
->flags
& DRM_PANTHOR_SYNC_OP_SIGNAL
);
319 static bool sync_op_is_wait(const struct drm_panthor_sync_op
*sync_op
)
321 /* Note that DRM_PANTHOR_SYNC_OP_WAIT == 0 */
322 return !(sync_op
->flags
& DRM_PANTHOR_SYNC_OP_SIGNAL
);
326 * panthor_check_sync_op() - Check drm_panthor_sync_op fields
327 * @sync_op: The sync operation to check.
329 * Return: 0 on success, -EINVAL otherwise.
332 panthor_check_sync_op(const struct drm_panthor_sync_op
*sync_op
)
336 if (sync_op
->flags
& ~PANTHOR_SYNC_OP_FLAGS_MASK
)
339 handle_type
= sync_op
->flags
& DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_MASK
;
340 if (handle_type
!= DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ
&&
341 handle_type
!= DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_TIMELINE_SYNCOBJ
)
344 if (handle_type
== DRM_PANTHOR_SYNC_OP_HANDLE_TYPE_SYNCOBJ
&&
345 sync_op
->timeline_value
!= 0)
352 * panthor_sync_signal_free() - Release resources and free a panthor_sync_signal object
353 * @sig_sync: Signal object to free.
356 panthor_sync_signal_free(struct panthor_sync_signal
*sig_sync
)
361 drm_syncobj_put(sig_sync
->syncobj
);
362 dma_fence_chain_free(sig_sync
->chain
);
363 dma_fence_put(sig_sync
->fence
);
368 * panthor_submit_ctx_add_sync_signal() - Add a signal operation to a submit context
369 * @ctx: Context to add the signal operation to.
370 * @handle: Syncobj handle.
371 * @point: Syncobj point.
373 * Return: 0 on success, otherwise negative error value.
376 panthor_submit_ctx_add_sync_signal(struct panthor_submit_ctx
*ctx
, u32 handle
, u64 point
)
378 struct panthor_sync_signal
*sig_sync
;
379 struct dma_fence
*cur_fence
;
382 sig_sync
= kzalloc(sizeof(*sig_sync
), GFP_KERNEL
);
386 sig_sync
->handle
= handle
;
387 sig_sync
->point
= point
;
390 sig_sync
->chain
= dma_fence_chain_alloc();
391 if (!sig_sync
->chain
) {
393 goto err_free_sig_sync
;
397 sig_sync
->syncobj
= drm_syncobj_find(ctx
->file
, handle
);
398 if (!sig_sync
->syncobj
) {
400 goto err_free_sig_sync
;
403 /* Retrieve the current fence attached to that point. It's
404 * perfectly fine to get a NULL fence here, it just means there's
405 * no fence attached to that point yet.
407 if (!drm_syncobj_find_fence(ctx
->file
, handle
, point
, 0, &cur_fence
))
408 sig_sync
->fence
= cur_fence
;
410 list_add_tail(&sig_sync
->node
, &ctx
->signals
);
415 panthor_sync_signal_free(sig_sync
);
420 * panthor_submit_ctx_search_sync_signal() - Search an existing signal operation in a
422 * @ctx: Context to search the signal operation in.
423 * @handle: Syncobj handle.
424 * @point: Syncobj point.
426 * Return: A valid panthor_sync_signal object if found, NULL otherwise.
428 static struct panthor_sync_signal
*
429 panthor_submit_ctx_search_sync_signal(struct panthor_submit_ctx
*ctx
, u32 handle
, u64 point
)
431 struct panthor_sync_signal
*sig_sync
;
433 list_for_each_entry(sig_sync
, &ctx
->signals
, node
) {
434 if (handle
== sig_sync
->handle
&& point
== sig_sync
->point
)
442 * panthor_submit_ctx_add_job() - Add a job to a submit context
443 * @ctx: Context to search the signal operation in.
444 * @idx: Index of the job in the context.
446 * @syncs: Sync operations provided by userspace.
448 * Return: 0 on success, a negative error code otherwise.
451 panthor_submit_ctx_add_job(struct panthor_submit_ctx
*ctx
, u32 idx
,
452 struct drm_sched_job
*job
,
453 const struct drm_panthor_obj_array
*syncs
)
457 ctx
->jobs
[idx
].job
= job
;
459 ret
= PANTHOR_UOBJ_GET_ARRAY(ctx
->jobs
[idx
].syncops
, syncs
);
463 ctx
->jobs
[idx
].syncop_count
= syncs
->count
;
468 * panthor_submit_ctx_get_sync_signal() - Search signal operation and add one if none was found.
469 * @ctx: Context to search the signal operation in.
470 * @handle: Syncobj handle.
471 * @point: Syncobj point.
473 * Return: 0 on success, a negative error code otherwise.
476 panthor_submit_ctx_get_sync_signal(struct panthor_submit_ctx
*ctx
, u32 handle
, u64 point
)
478 struct panthor_sync_signal
*sig_sync
;
480 sig_sync
= panthor_submit_ctx_search_sync_signal(ctx
, handle
, point
);
484 return panthor_submit_ctx_add_sync_signal(ctx
, handle
, point
);
488 * panthor_submit_ctx_update_job_sync_signal_fences() - Update fences
489 * on the signal operations specified by a job.
490 * @ctx: Context to search the signal operation in.
491 * @job_idx: Index of the job to operate on.
493 * Return: 0 on success, a negative error code otherwise.
496 panthor_submit_ctx_update_job_sync_signal_fences(struct panthor_submit_ctx
*ctx
,
499 struct panthor_device
*ptdev
= container_of(ctx
->file
->minor
->dev
,
500 struct panthor_device
,
502 struct dma_fence
*done_fence
= &ctx
->jobs
[job_idx
].job
->s_fence
->finished
;
503 const struct drm_panthor_sync_op
*sync_ops
= ctx
->jobs
[job_idx
].syncops
;
504 u32 sync_op_count
= ctx
->jobs
[job_idx
].syncop_count
;
506 for (u32 i
= 0; i
< sync_op_count
; i
++) {
507 struct dma_fence
*old_fence
;
508 struct panthor_sync_signal
*sig_sync
;
510 if (!sync_op_is_signal(&sync_ops
[i
]))
513 sig_sync
= panthor_submit_ctx_search_sync_signal(ctx
, sync_ops
[i
].handle
,
514 sync_ops
[i
].timeline_value
);
515 if (drm_WARN_ON(&ptdev
->base
, !sig_sync
))
518 old_fence
= sig_sync
->fence
;
519 sig_sync
->fence
= dma_fence_get(done_fence
);
520 dma_fence_put(old_fence
);
522 if (drm_WARN_ON(&ptdev
->base
, !sig_sync
->fence
))
530 * panthor_submit_ctx_collect_job_signal_ops() - Iterate over all job signal operations
531 * and add them to the context.
532 * @ctx: Context to search the signal operation in.
533 * @job_idx: Index of the job to operate on.
535 * Return: 0 on success, a negative error code otherwise.
538 panthor_submit_ctx_collect_job_signal_ops(struct panthor_submit_ctx
*ctx
,
541 const struct drm_panthor_sync_op
*sync_ops
= ctx
->jobs
[job_idx
].syncops
;
542 u32 sync_op_count
= ctx
->jobs
[job_idx
].syncop_count
;
544 for (u32 i
= 0; i
< sync_op_count
; i
++) {
547 if (!sync_op_is_signal(&sync_ops
[i
]))
550 ret
= panthor_check_sync_op(&sync_ops
[i
]);
554 ret
= panthor_submit_ctx_get_sync_signal(ctx
,
556 sync_ops
[i
].timeline_value
);
565 * panthor_submit_ctx_push_fences() - Iterate over the signal array, and for each entry, push
566 * the currently assigned fence to the associated syncobj.
567 * @ctx: Context to push fences on.
569 * This is the last step of a submission procedure, and is done once we know the submission
570 * is effective and job fences are guaranteed to be signaled in finite time.
573 panthor_submit_ctx_push_fences(struct panthor_submit_ctx
*ctx
)
575 struct panthor_sync_signal
*sig_sync
;
577 list_for_each_entry(sig_sync
, &ctx
->signals
, node
) {
578 if (sig_sync
->chain
) {
579 drm_syncobj_add_point(sig_sync
->syncobj
, sig_sync
->chain
,
580 sig_sync
->fence
, sig_sync
->point
);
581 sig_sync
->chain
= NULL
;
583 drm_syncobj_replace_fence(sig_sync
->syncobj
, sig_sync
->fence
);
589 * panthor_submit_ctx_add_sync_deps_to_job() - Add sync wait operations as
591 * @ctx: Submit context.
592 * @job_idx: Index of the job to operate on.
594 * Return: 0 on success, a negative error code otherwise.
597 panthor_submit_ctx_add_sync_deps_to_job(struct panthor_submit_ctx
*ctx
,
600 struct panthor_device
*ptdev
= container_of(ctx
->file
->minor
->dev
,
601 struct panthor_device
,
603 const struct drm_panthor_sync_op
*sync_ops
= ctx
->jobs
[job_idx
].syncops
;
604 struct drm_sched_job
*job
= ctx
->jobs
[job_idx
].job
;
605 u32 sync_op_count
= ctx
->jobs
[job_idx
].syncop_count
;
608 for (u32 i
= 0; i
< sync_op_count
; i
++) {
609 struct panthor_sync_signal
*sig_sync
;
610 struct dma_fence
*fence
;
612 if (!sync_op_is_wait(&sync_ops
[i
]))
615 ret
= panthor_check_sync_op(&sync_ops
[i
]);
619 sig_sync
= panthor_submit_ctx_search_sync_signal(ctx
, sync_ops
[i
].handle
,
620 sync_ops
[i
].timeline_value
);
622 if (drm_WARN_ON(&ptdev
->base
, !sig_sync
->fence
))
625 fence
= dma_fence_get(sig_sync
->fence
);
627 ret
= drm_syncobj_find_fence(ctx
->file
, sync_ops
[i
].handle
,
628 sync_ops
[i
].timeline_value
,
634 ret
= drm_sched_job_add_dependency(job
, fence
);
643 * panthor_submit_ctx_collect_jobs_signal_ops() - Collect all signal operations
644 * and add them to the submit context.
645 * @ctx: Submit context.
647 * Return: 0 on success, a negative error code otherwise.
650 panthor_submit_ctx_collect_jobs_signal_ops(struct panthor_submit_ctx
*ctx
)
652 for (u32 i
= 0; i
< ctx
->job_count
; i
++) {
655 ret
= panthor_submit_ctx_collect_job_signal_ops(ctx
, i
);
664 * panthor_submit_ctx_add_deps_and_arm_jobs() - Add jobs dependencies and arm jobs
665 * @ctx: Submit context.
667 * Must be called after the resv preparation has been taken care of.
669 * Return: 0 on success, a negative error code otherwise.
672 panthor_submit_ctx_add_deps_and_arm_jobs(struct panthor_submit_ctx
*ctx
)
674 for (u32 i
= 0; i
< ctx
->job_count
; i
++) {
677 ret
= panthor_submit_ctx_add_sync_deps_to_job(ctx
, i
);
681 drm_sched_job_arm(ctx
->jobs
[i
].job
);
683 ret
= panthor_submit_ctx_update_job_sync_signal_fences(ctx
, i
);
692 * panthor_submit_ctx_push_jobs() - Push jobs to their scheduling entities.
693 * @ctx: Submit context.
694 * @upd_resvs: Callback used to update reservation objects that were previously
698 panthor_submit_ctx_push_jobs(struct panthor_submit_ctx
*ctx
,
699 void (*upd_resvs
)(struct drm_exec
*, struct drm_sched_job
*))
701 for (u32 i
= 0; i
< ctx
->job_count
; i
++) {
702 upd_resvs(&ctx
->exec
, ctx
->jobs
[i
].job
);
703 drm_sched_entity_push_job(ctx
->jobs
[i
].job
);
705 /* Job is owned by the scheduler now. */
706 ctx
->jobs
[i
].job
= NULL
;
709 panthor_submit_ctx_push_fences(ctx
);
713 * panthor_submit_ctx_init() - Initializes a submission context
714 * @ctx: Submit context to initialize.
715 * @file: drm_file this submission happens on.
716 * @job_count: Number of jobs that will be submitted.
718 * Return: 0 on success, a negative error code otherwise.
720 static int panthor_submit_ctx_init(struct panthor_submit_ctx
*ctx
,
721 struct drm_file
*file
, u32 job_count
)
723 ctx
->jobs
= kvmalloc_array(job_count
, sizeof(*ctx
->jobs
),
724 GFP_KERNEL
| __GFP_ZERO
);
729 ctx
->job_count
= job_count
;
730 INIT_LIST_HEAD(&ctx
->signals
);
731 drm_exec_init(&ctx
->exec
,
732 DRM_EXEC_INTERRUPTIBLE_WAIT
| DRM_EXEC_IGNORE_DUPLICATES
,
738 * panthor_submit_ctx_cleanup() - Cleanup a submission context
739 * @ctx: Submit context to cleanup.
740 * @job_put: Job put callback.
742 static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx
*ctx
,
743 void (*job_put
)(struct drm_sched_job
*))
745 struct panthor_sync_signal
*sig_sync
, *tmp
;
748 drm_exec_fini(&ctx
->exec
);
750 list_for_each_entry_safe(sig_sync
, tmp
, &ctx
->signals
, node
)
751 panthor_sync_signal_free(sig_sync
);
753 for (i
= 0; i
< ctx
->job_count
; i
++) {
754 job_put(ctx
->jobs
[i
].job
);
755 kvfree(ctx
->jobs
[i
].syncops
);
761 static int panthor_query_timestamp_info(struct panthor_device
*ptdev
,
762 struct drm_panthor_timestamp_info
*arg
)
766 ret
= pm_runtime_resume_and_get(ptdev
->base
.dev
);
770 #ifdef CONFIG_ARM_ARCH_TIMER
771 arg
->timestamp_frequency
= arch_timer_get_cntfrq();
773 arg
->timestamp_frequency
= 0;
775 arg
->current_timestamp
= panthor_gpu_read_timestamp(ptdev
);
776 arg
->timestamp_offset
= panthor_gpu_read_timestamp_offset(ptdev
);
778 pm_runtime_put(ptdev
->base
.dev
);
782 static int group_priority_permit(struct drm_file
*file
,
785 /* Ensure that priority is valid */
786 if (priority
> PANTHOR_GROUP_PRIORITY_REALTIME
)
789 /* Medium priority and below are always allowed */
790 if (priority
<= PANTHOR_GROUP_PRIORITY_MEDIUM
)
793 /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
794 if (capable(CAP_SYS_NICE
) || drm_is_current_master(file
))
800 static void panthor_query_group_priorities_info(struct drm_file
*file
,
801 struct drm_panthor_group_priorities_info
*arg
)
805 for (prio
= PANTHOR_GROUP_PRIORITY_REALTIME
; prio
>= 0; prio
--) {
806 if (!group_priority_permit(file
, prio
))
807 arg
->allowed_mask
|= BIT(prio
);
811 static int panthor_ioctl_dev_query(struct drm_device
*ddev
, void *data
, struct drm_file
*file
)
813 struct panthor_device
*ptdev
= container_of(ddev
, struct panthor_device
, base
);
814 struct drm_panthor_dev_query
*args
= data
;
815 struct drm_panthor_timestamp_info timestamp_info
;
816 struct drm_panthor_group_priorities_info priorities_info
;
819 if (!args
->pointer
) {
820 switch (args
->type
) {
821 case DRM_PANTHOR_DEV_QUERY_GPU_INFO
:
822 args
->size
= sizeof(ptdev
->gpu_info
);
825 case DRM_PANTHOR_DEV_QUERY_CSIF_INFO
:
826 args
->size
= sizeof(ptdev
->csif_info
);
829 case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO
:
830 args
->size
= sizeof(timestamp_info
);
833 case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO
:
834 args
->size
= sizeof(priorities_info
);
842 switch (args
->type
) {
843 case DRM_PANTHOR_DEV_QUERY_GPU_INFO
:
844 return PANTHOR_UOBJ_SET(args
->pointer
, args
->size
, ptdev
->gpu_info
);
846 case DRM_PANTHOR_DEV_QUERY_CSIF_INFO
:
847 return PANTHOR_UOBJ_SET(args
->pointer
, args
->size
, ptdev
->csif_info
);
849 case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO
:
850 ret
= panthor_query_timestamp_info(ptdev
, ×tamp_info
);
855 return PANTHOR_UOBJ_SET(args
->pointer
, args
->size
, timestamp_info
);
857 case DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO
:
858 panthor_query_group_priorities_info(file
, &priorities_info
);
859 return PANTHOR_UOBJ_SET(args
->pointer
, args
->size
, priorities_info
);
866 #define PANTHOR_VM_CREATE_FLAGS 0
868 static int panthor_ioctl_vm_create(struct drm_device
*ddev
, void *data
,
869 struct drm_file
*file
)
871 struct panthor_device
*ptdev
= container_of(ddev
, struct panthor_device
, base
);
872 struct panthor_file
*pfile
= file
->driver_priv
;
873 struct drm_panthor_vm_create
*args
= data
;
876 if (!drm_dev_enter(ddev
, &cookie
))
879 ret
= panthor_vm_pool_create_vm(ptdev
, pfile
->vms
, args
);
885 drm_dev_exit(cookie
);
889 static int panthor_ioctl_vm_destroy(struct drm_device
*ddev
, void *data
,
890 struct drm_file
*file
)
892 struct panthor_file
*pfile
= file
->driver_priv
;
893 struct drm_panthor_vm_destroy
*args
= data
;
898 return panthor_vm_pool_destroy_vm(pfile
->vms
, args
->id
);
901 #define PANTHOR_BO_FLAGS DRM_PANTHOR_BO_NO_MMAP
903 static int panthor_ioctl_bo_create(struct drm_device
*ddev
, void *data
,
904 struct drm_file
*file
)
906 struct panthor_file
*pfile
= file
->driver_priv
;
907 struct drm_panthor_bo_create
*args
= data
;
908 struct panthor_vm
*vm
= NULL
;
911 if (!drm_dev_enter(ddev
, &cookie
))
914 if (!args
->size
|| args
->pad
||
915 (args
->flags
& ~PANTHOR_BO_FLAGS
)) {
920 if (args
->exclusive_vm_id
) {
921 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->exclusive_vm_id
);
928 ret
= panthor_gem_create_with_handle(file
, ddev
, vm
, &args
->size
,
929 args
->flags
, &args
->handle
);
934 drm_dev_exit(cookie
);
938 static int panthor_ioctl_bo_mmap_offset(struct drm_device
*ddev
, void *data
,
939 struct drm_file
*file
)
941 struct drm_panthor_bo_mmap_offset
*args
= data
;
942 struct drm_gem_object
*obj
;
948 obj
= drm_gem_object_lookup(file
, args
->handle
);
952 ret
= drm_gem_create_mmap_offset(obj
);
956 args
->offset
= drm_vma_node_offset_addr(&obj
->vma_node
);
959 drm_gem_object_put(obj
);
963 static int panthor_ioctl_group_submit(struct drm_device
*ddev
, void *data
,
964 struct drm_file
*file
)
966 struct panthor_file
*pfile
= file
->driver_priv
;
967 struct drm_panthor_group_submit
*args
= data
;
968 struct drm_panthor_queue_submit
*jobs_args
;
969 struct panthor_submit_ctx ctx
;
975 if (!drm_dev_enter(ddev
, &cookie
))
978 ret
= PANTHOR_UOBJ_GET_ARRAY(jobs_args
, &args
->queue_submits
);
982 ret
= panthor_submit_ctx_init(&ctx
, file
, args
->queue_submits
.count
);
984 goto out_free_jobs_args
;
986 /* Create jobs and attach sync operations */
987 for (u32 i
= 0; i
< args
->queue_submits
.count
; i
++) {
988 const struct drm_panthor_queue_submit
*qsubmit
= &jobs_args
[i
];
989 struct drm_sched_job
*job
;
991 job
= panthor_job_create(pfile
, args
->group_handle
, qsubmit
);
994 goto out_cleanup_submit_ctx
;
997 ret
= panthor_submit_ctx_add_job(&ctx
, i
, job
, &qsubmit
->syncs
);
999 goto out_cleanup_submit_ctx
;
1003 * Collect signal operations on all jobs, such that each job can pick
1004 * from it for its dependencies and update the fence to signal when the
1007 ret
= panthor_submit_ctx_collect_jobs_signal_ops(&ctx
);
1009 goto out_cleanup_submit_ctx
;
1012 * We acquire/prepare revs on all jobs before proceeding with the
1013 * dependency registration.
1015 * This is solving two problems:
1016 * 1. drm_sched_job_arm() and drm_sched_entity_push_job() must be
1017 * protected by a lock to make sure no concurrent access to the same
1018 * entity get interleaved, which would mess up with the fence seqno
1019 * ordering. Luckily, one of the resv being acquired is the VM resv,
1020 * and a scheduling entity is only bound to a single VM. As soon as
1021 * we acquire the VM resv, we should be safe.
1022 * 2. Jobs might depend on fences that were issued by previous jobs in
1023 * the same batch, so we can't add dependencies on all jobs before
1024 * arming previous jobs and registering the fence to the signal
1025 * array, otherwise we might miss dependencies, or point to an
1028 if (args
->queue_submits
.count
> 0) {
1029 /* All jobs target the same group, so they also point to the same VM. */
1030 struct panthor_vm
*vm
= panthor_job_vm(ctx
.jobs
[0].job
);
1032 drm_exec_until_all_locked(&ctx
.exec
) {
1033 ret
= panthor_vm_prepare_mapped_bos_resvs(&ctx
.exec
, vm
,
1034 args
->queue_submits
.count
);
1038 goto out_cleanup_submit_ctx
;
1042 * Now that resvs are locked/prepared, we can iterate over each job to
1043 * add the dependencies, arm the job fence, register the job fence to
1046 ret
= panthor_submit_ctx_add_deps_and_arm_jobs(&ctx
);
1048 goto out_cleanup_submit_ctx
;
1050 /* Nothing can fail after that point, so we can make our job fences
1051 * visible to the outside world. Push jobs and set the job fences to
1052 * the resv slots we reserved. This also pushes the fences to the
1053 * syncobjs that are part of the signal array.
1055 panthor_submit_ctx_push_jobs(&ctx
, panthor_job_update_resvs
);
1057 out_cleanup_submit_ctx
:
1058 panthor_submit_ctx_cleanup(&ctx
, panthor_job_put
);
1064 drm_dev_exit(cookie
);
1068 static int panthor_ioctl_group_destroy(struct drm_device
*ddev
, void *data
,
1069 struct drm_file
*file
)
1071 struct panthor_file
*pfile
= file
->driver_priv
;
1072 struct drm_panthor_group_destroy
*args
= data
;
1077 return panthor_group_destroy(pfile
, args
->group_handle
);
1080 static int panthor_ioctl_group_create(struct drm_device
*ddev
, void *data
,
1081 struct drm_file
*file
)
1083 struct panthor_file
*pfile
= file
->driver_priv
;
1084 struct drm_panthor_group_create
*args
= data
;
1085 struct drm_panthor_queue_create
*queue_args
;
1088 if (!args
->queues
.count
)
1091 ret
= PANTHOR_UOBJ_GET_ARRAY(queue_args
, &args
->queues
);
1095 ret
= group_priority_permit(file
, args
->priority
);
1099 ret
= panthor_group_create(pfile
, args
, queue_args
);
1101 args
->group_handle
= ret
;
1109 static int panthor_ioctl_group_get_state(struct drm_device
*ddev
, void *data
,
1110 struct drm_file
*file
)
1112 struct panthor_file
*pfile
= file
->driver_priv
;
1113 struct drm_panthor_group_get_state
*args
= data
;
1115 return panthor_group_get_state(pfile
, args
);
1118 static int panthor_ioctl_tiler_heap_create(struct drm_device
*ddev
, void *data
,
1119 struct drm_file
*file
)
1121 struct panthor_file
*pfile
= file
->driver_priv
;
1122 struct drm_panthor_tiler_heap_create
*args
= data
;
1123 struct panthor_heap_pool
*pool
;
1124 struct panthor_vm
*vm
;
1127 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->vm_id
);
1131 pool
= panthor_vm_get_heap_pool(vm
, true);
1133 ret
= PTR_ERR(pool
);
1137 ret
= panthor_heap_create(pool
,
1138 args
->initial_chunk_count
,
1141 args
->target_in_flight
,
1142 &args
->tiler_heap_ctx_gpu_va
,
1143 &args
->first_heap_chunk_gpu_va
);
1145 goto out_put_heap_pool
;
1147 /* Heap pools are per-VM. We combine the VM and HEAP id to make
1148 * a unique heap handle.
1150 args
->handle
= (args
->vm_id
<< 16) | ret
;
1154 panthor_heap_pool_put(pool
);
1161 static int panthor_ioctl_tiler_heap_destroy(struct drm_device
*ddev
, void *data
,
1162 struct drm_file
*file
)
1164 struct panthor_file
*pfile
= file
->driver_priv
;
1165 struct drm_panthor_tiler_heap_destroy
*args
= data
;
1166 struct panthor_heap_pool
*pool
;
1167 struct panthor_vm
*vm
;
1173 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->handle
>> 16);
1177 pool
= panthor_vm_get_heap_pool(vm
, false);
1179 ret
= PTR_ERR(pool
);
1183 ret
= panthor_heap_destroy(pool
, args
->handle
& GENMASK(15, 0));
1184 panthor_heap_pool_put(pool
);
1191 static int panthor_ioctl_vm_bind_async(struct drm_device
*ddev
,
1192 struct drm_panthor_vm_bind
*args
,
1193 struct drm_file
*file
)
1195 struct panthor_file
*pfile
= file
->driver_priv
;
1196 struct drm_panthor_vm_bind_op
*jobs_args
;
1197 struct panthor_submit_ctx ctx
;
1198 struct panthor_vm
*vm
;
1201 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->vm_id
);
1205 ret
= PANTHOR_UOBJ_GET_ARRAY(jobs_args
, &args
->ops
);
1209 ret
= panthor_submit_ctx_init(&ctx
, file
, args
->ops
.count
);
1211 goto out_free_jobs_args
;
1213 for (u32 i
= 0; i
< args
->ops
.count
; i
++) {
1214 struct drm_panthor_vm_bind_op
*op
= &jobs_args
[i
];
1215 struct drm_sched_job
*job
;
1217 job
= panthor_vm_bind_job_create(file
, vm
, op
);
1220 goto out_cleanup_submit_ctx
;
1223 ret
= panthor_submit_ctx_add_job(&ctx
, i
, job
, &op
->syncs
);
1225 goto out_cleanup_submit_ctx
;
1228 ret
= panthor_submit_ctx_collect_jobs_signal_ops(&ctx
);
1230 goto out_cleanup_submit_ctx
;
1232 /* Prepare reservation objects for each VM_BIND job. */
1233 drm_exec_until_all_locked(&ctx
.exec
) {
1234 for (u32 i
= 0; i
< ctx
.job_count
; i
++) {
1235 ret
= panthor_vm_bind_job_prepare_resvs(&ctx
.exec
, ctx
.jobs
[i
].job
);
1236 drm_exec_retry_on_contention(&ctx
.exec
);
1238 goto out_cleanup_submit_ctx
;
1242 ret
= panthor_submit_ctx_add_deps_and_arm_jobs(&ctx
);
1244 goto out_cleanup_submit_ctx
;
1246 /* Nothing can fail after that point. */
1247 panthor_submit_ctx_push_jobs(&ctx
, panthor_vm_bind_job_update_resvs
);
1249 out_cleanup_submit_ctx
:
1250 panthor_submit_ctx_cleanup(&ctx
, panthor_vm_bind_job_put
);
1260 static int panthor_ioctl_vm_bind_sync(struct drm_device
*ddev
,
1261 struct drm_panthor_vm_bind
*args
,
1262 struct drm_file
*file
)
1264 struct panthor_file
*pfile
= file
->driver_priv
;
1265 struct drm_panthor_vm_bind_op
*jobs_args
;
1266 struct panthor_vm
*vm
;
1269 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->vm_id
);
1273 ret
= PANTHOR_UOBJ_GET_ARRAY(jobs_args
, &args
->ops
);
1277 for (u32 i
= 0; i
< args
->ops
.count
; i
++) {
1278 ret
= panthor_vm_bind_exec_sync_op(file
, vm
, &jobs_args
[i
]);
1280 /* Update ops.count so the user knows where things failed. */
1281 args
->ops
.count
= i
;
1293 #define PANTHOR_VM_BIND_FLAGS DRM_PANTHOR_VM_BIND_ASYNC
1295 static int panthor_ioctl_vm_bind(struct drm_device
*ddev
, void *data
,
1296 struct drm_file
*file
)
1298 struct drm_panthor_vm_bind
*args
= data
;
1301 if (!drm_dev_enter(ddev
, &cookie
))
1304 if (args
->flags
& DRM_PANTHOR_VM_BIND_ASYNC
)
1305 ret
= panthor_ioctl_vm_bind_async(ddev
, args
, file
);
1307 ret
= panthor_ioctl_vm_bind_sync(ddev
, args
, file
);
1309 drm_dev_exit(cookie
);
1313 static int panthor_ioctl_vm_get_state(struct drm_device
*ddev
, void *data
,
1314 struct drm_file
*file
)
1316 struct panthor_file
*pfile
= file
->driver_priv
;
1317 struct drm_panthor_vm_get_state
*args
= data
;
1318 struct panthor_vm
*vm
;
1320 vm
= panthor_vm_pool_get_vm(pfile
->vms
, args
->vm_id
);
1324 if (panthor_vm_is_unusable(vm
))
1325 args
->state
= DRM_PANTHOR_VM_STATE_UNUSABLE
;
1327 args
->state
= DRM_PANTHOR_VM_STATE_USABLE
;
1334 panthor_open(struct drm_device
*ddev
, struct drm_file
*file
)
1336 struct panthor_device
*ptdev
= container_of(ddev
, struct panthor_device
, base
);
1337 struct panthor_file
*pfile
;
1340 if (!try_module_get(THIS_MODULE
))
1343 pfile
= kzalloc(sizeof(*pfile
), GFP_KERNEL
);
1349 pfile
->ptdev
= ptdev
;
1351 ret
= panthor_vm_pool_create(pfile
);
1355 ret
= panthor_group_pool_create(pfile
);
1357 goto err_destroy_vm_pool
;
1359 file
->driver_priv
= pfile
;
1362 err_destroy_vm_pool
:
1363 panthor_vm_pool_destroy(pfile
);
1369 module_put(THIS_MODULE
);
1374 panthor_postclose(struct drm_device
*ddev
, struct drm_file
*file
)
1376 struct panthor_file
*pfile
= file
->driver_priv
;
1378 panthor_group_pool_destroy(pfile
);
1379 panthor_vm_pool_destroy(pfile
);
1382 module_put(THIS_MODULE
);
1385 static const struct drm_ioctl_desc panthor_drm_driver_ioctls
[] = {
1386 #define PANTHOR_IOCTL(n, func, flags) \
1387 DRM_IOCTL_DEF_DRV(PANTHOR_##n, panthor_ioctl_##func, flags)
1389 PANTHOR_IOCTL(DEV_QUERY
, dev_query
, DRM_RENDER_ALLOW
),
1390 PANTHOR_IOCTL(VM_CREATE
, vm_create
, DRM_RENDER_ALLOW
),
1391 PANTHOR_IOCTL(VM_DESTROY
, vm_destroy
, DRM_RENDER_ALLOW
),
1392 PANTHOR_IOCTL(VM_BIND
, vm_bind
, DRM_RENDER_ALLOW
),
1393 PANTHOR_IOCTL(VM_GET_STATE
, vm_get_state
, DRM_RENDER_ALLOW
),
1394 PANTHOR_IOCTL(BO_CREATE
, bo_create
, DRM_RENDER_ALLOW
),
1395 PANTHOR_IOCTL(BO_MMAP_OFFSET
, bo_mmap_offset
, DRM_RENDER_ALLOW
),
1396 PANTHOR_IOCTL(GROUP_CREATE
, group_create
, DRM_RENDER_ALLOW
),
1397 PANTHOR_IOCTL(GROUP_DESTROY
, group_destroy
, DRM_RENDER_ALLOW
),
1398 PANTHOR_IOCTL(GROUP_GET_STATE
, group_get_state
, DRM_RENDER_ALLOW
),
1399 PANTHOR_IOCTL(TILER_HEAP_CREATE
, tiler_heap_create
, DRM_RENDER_ALLOW
),
1400 PANTHOR_IOCTL(TILER_HEAP_DESTROY
, tiler_heap_destroy
, DRM_RENDER_ALLOW
),
1401 PANTHOR_IOCTL(GROUP_SUBMIT
, group_submit
, DRM_RENDER_ALLOW
),
1404 static int panthor_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1406 struct drm_file
*file
= filp
->private_data
;
1407 struct panthor_file
*pfile
= file
->driver_priv
;
1408 struct panthor_device
*ptdev
= pfile
->ptdev
;
1409 u64 offset
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
;
1412 if (!drm_dev_enter(file
->minor
->dev
, &cookie
))
1417 * With 32-bit systems being limited by the 32-bit representation of
1418 * mmap2's pgoffset field, we need to make the MMIO offset arch
1419 * specific. This converts a user MMIO offset into something the kernel
1420 * driver understands.
1422 if (test_tsk_thread_flag(current
, TIF_32BIT
) &&
1423 offset
>= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT
) {
1424 offset
+= DRM_PANTHOR_USER_MMIO_OFFSET_64BIT
-
1425 DRM_PANTHOR_USER_MMIO_OFFSET_32BIT
;
1426 vma
->vm_pgoff
= offset
>> PAGE_SHIFT
;
1430 if (offset
>= DRM_PANTHOR_USER_MMIO_OFFSET
)
1431 ret
= panthor_device_mmap_io(ptdev
, vma
);
1433 ret
= drm_gem_mmap(filp
, vma
);
1435 drm_dev_exit(cookie
);
1439 static void panthor_gpu_show_fdinfo(struct panthor_device
*ptdev
,
1440 struct panthor_file
*pfile
,
1441 struct drm_printer
*p
)
1443 if (ptdev
->profile_mask
& PANTHOR_DEVICE_PROFILING_ALL
)
1444 panthor_fdinfo_gather_group_samples(pfile
);
1446 if (ptdev
->profile_mask
& PANTHOR_DEVICE_PROFILING_TIMESTAMP
) {
1447 #ifdef CONFIG_ARM_ARCH_TIMER
1448 drm_printf(p
, "drm-engine-panthor:\t%llu ns\n",
1449 DIV_ROUND_UP_ULL((pfile
->stats
.time
* NSEC_PER_SEC
),
1450 arch_timer_get_cntfrq()));
1453 if (ptdev
->profile_mask
& PANTHOR_DEVICE_PROFILING_CYCLES
)
1454 drm_printf(p
, "drm-cycles-panthor:\t%llu\n", pfile
->stats
.cycles
);
1456 drm_printf(p
, "drm-maxfreq-panthor:\t%lu Hz\n", ptdev
->fast_rate
);
1457 drm_printf(p
, "drm-curfreq-panthor:\t%lu Hz\n", ptdev
->current_frequency
);
1460 static void panthor_show_fdinfo(struct drm_printer
*p
, struct drm_file
*file
)
1462 struct drm_device
*dev
= file
->minor
->dev
;
1463 struct panthor_device
*ptdev
= container_of(dev
, struct panthor_device
, base
);
1465 panthor_gpu_show_fdinfo(ptdev
, file
->driver_priv
, p
);
1467 drm_show_memory_stats(p
, file
);
1470 static const struct file_operations panthor_drm_driver_fops
= {
1472 .release
= drm_release
,
1473 .unlocked_ioctl
= drm_ioctl
,
1474 .compat_ioctl
= drm_compat_ioctl
,
1477 .llseek
= noop_llseek
,
1478 .mmap
= panthor_mmap
,
1479 .show_fdinfo
= drm_show_fdinfo
,
1480 .fop_flags
= FOP_UNSIGNED_OFFSET
,
1483 #ifdef CONFIG_DEBUG_FS
1484 static void panthor_debugfs_init(struct drm_minor
*minor
)
1486 panthor_mmu_debugfs_init(minor
);
1491 * PanCSF driver version:
1492 * - 1.0 - initial interface
1493 * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query
1494 * - 1.2 - adds DEV_QUERY_GROUP_PRIORITIES_INFO query
1495 * - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
1497 static const struct drm_driver panthor_drm_driver
= {
1498 .driver_features
= DRIVER_RENDER
| DRIVER_GEM
| DRIVER_SYNCOBJ
|
1499 DRIVER_SYNCOBJ_TIMELINE
| DRIVER_GEM_GPUVA
,
1500 .open
= panthor_open
,
1501 .postclose
= panthor_postclose
,
1502 .show_fdinfo
= panthor_show_fdinfo
,
1503 .ioctls
= panthor_drm_driver_ioctls
,
1504 .num_ioctls
= ARRAY_SIZE(panthor_drm_driver_ioctls
),
1505 .fops
= &panthor_drm_driver_fops
,
1507 .desc
= "Panthor DRM driver",
1512 .gem_create_object
= panthor_gem_create_object
,
1513 .gem_prime_import_sg_table
= drm_gem_shmem_prime_import_sg_table
,
1514 #ifdef CONFIG_DEBUG_FS
1515 .debugfs_init
= panthor_debugfs_init
,
1519 static int panthor_probe(struct platform_device
*pdev
)
1521 struct panthor_device
*ptdev
;
1523 ptdev
= devm_drm_dev_alloc(&pdev
->dev
, &panthor_drm_driver
,
1524 struct panthor_device
, base
);
1528 platform_set_drvdata(pdev
, ptdev
);
1530 return panthor_device_init(ptdev
);
1533 static void panthor_remove(struct platform_device
*pdev
)
1535 struct panthor_device
*ptdev
= platform_get_drvdata(pdev
);
1537 panthor_device_unplug(ptdev
);
1540 static ssize_t
profiling_show(struct device
*dev
,
1541 struct device_attribute
*attr
,
1544 struct panthor_device
*ptdev
= dev_get_drvdata(dev
);
1546 return sysfs_emit(buf
, "%d\n", ptdev
->profile_mask
);
1549 static ssize_t
profiling_store(struct device
*dev
,
1550 struct device_attribute
*attr
,
1551 const char *buf
, size_t len
)
1553 struct panthor_device
*ptdev
= dev_get_drvdata(dev
);
1557 err
= kstrtou32(buf
, 0, &value
);
1561 if ((value
& ~PANTHOR_DEVICE_PROFILING_ALL
) != 0)
1564 ptdev
->profile_mask
= value
;
1569 static DEVICE_ATTR_RW(profiling
);
1571 static struct attribute
*panthor_attrs
[] = {
1572 &dev_attr_profiling
.attr
,
1576 ATTRIBUTE_GROUPS(panthor
);
1578 static const struct of_device_id dt_match
[] = {
1579 { .compatible
= "rockchip,rk3588-mali" },
1580 { .compatible
= "arm,mali-valhall-csf" },
1583 MODULE_DEVICE_TABLE(of
, dt_match
);
1585 static DEFINE_RUNTIME_DEV_PM_OPS(panthor_pm_ops
,
1586 panthor_device_suspend
,
1587 panthor_device_resume
,
1590 static struct platform_driver panthor_driver
= {
1591 .probe
= panthor_probe
,
1592 .remove
= panthor_remove
,
1595 .pm
= pm_ptr(&panthor_pm_ops
),
1596 .of_match_table
= dt_match
,
1597 .dev_groups
= panthor_groups
,
1602 * Workqueue used to cleanup stuff.
1604 * We create a dedicated workqueue so we can drain on unplug and
1605 * make sure all resources are freed before the module is unloaded.
1607 struct workqueue_struct
*panthor_cleanup_wq
;
1609 static int __init
panthor_init(void)
1613 ret
= panthor_mmu_pt_cache_init();
1617 panthor_cleanup_wq
= alloc_workqueue("panthor-cleanup", WQ_UNBOUND
, 0);
1618 if (!panthor_cleanup_wq
) {
1619 pr_err("panthor: Failed to allocate the workqueues");
1621 goto err_mmu_pt_cache_fini
;
1624 ret
= platform_driver_register(&panthor_driver
);
1626 goto err_destroy_cleanup_wq
;
1630 err_destroy_cleanup_wq
:
1631 destroy_workqueue(panthor_cleanup_wq
);
1633 err_mmu_pt_cache_fini
:
1634 panthor_mmu_pt_cache_fini();
1637 module_init(panthor_init
);
1639 static void __exit
panthor_exit(void)
1641 platform_driver_unregister(&panthor_driver
);
1642 destroy_workqueue(panthor_cleanup_wq
);
1643 panthor_mmu_pt_cache_fini();
1645 module_exit(panthor_exit
);
1647 MODULE_AUTHOR("Panthor Project Developers");
1648 MODULE_DESCRIPTION("Panthor DRM Driver");
1649 MODULE_LICENSE("Dual MIT/GPL");