1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
4 /* Copyright 2019 Collabora ltd. */
6 #include <linux/module.h>
7 #include <linux/of_platform.h>
8 #include <linux/pagemap.h>
9 #include <linux/pm_runtime.h>
10 #include <drm/panfrost_drm.h>
11 #include <drm/drm_drv.h>
12 #include <drm/drm_ioctl.h>
13 #include <drm/drm_syncobj.h>
14 #include <drm/drm_utils.h>
16 #include "panfrost_device.h"
17 #include "panfrost_gem.h"
18 #include "panfrost_mmu.h"
19 #include "panfrost_job.h"
20 #include "panfrost_gpu.h"
21 #include "panfrost_perfcnt.h"
23 static bool unstable_ioctls
;
24 module_param_unsafe(unstable_ioctls
, bool, 0600);
26 static int panfrost_ioctl_get_param(struct drm_device
*ddev
, void *data
, struct drm_file
*file
)
28 struct drm_panfrost_get_param
*param
= data
;
29 struct panfrost_device
*pfdev
= ddev
->dev_private
;
34 #define PANFROST_FEATURE(name, member) \
35 case DRM_PANFROST_PARAM_ ## name: \
36 param->value = pfdev->features.member; \
38 #define PANFROST_FEATURE_ARRAY(name, member, max) \
39 case DRM_PANFROST_PARAM_ ## name ## 0 ... \
40 DRM_PANFROST_PARAM_ ## name ## max: \
41 param->value = pfdev->features.member[param->param - \
42 DRM_PANFROST_PARAM_ ## name ## 0]; \
45 switch (param
->param
) {
46 PANFROST_FEATURE(GPU_PROD_ID
, id
);
47 PANFROST_FEATURE(GPU_REVISION
, revision
);
48 PANFROST_FEATURE(SHADER_PRESENT
, shader_present
);
49 PANFROST_FEATURE(TILER_PRESENT
, tiler_present
);
50 PANFROST_FEATURE(L2_PRESENT
, l2_present
);
51 PANFROST_FEATURE(STACK_PRESENT
, stack_present
);
52 PANFROST_FEATURE(AS_PRESENT
, as_present
);
53 PANFROST_FEATURE(JS_PRESENT
, js_present
);
54 PANFROST_FEATURE(L2_FEATURES
, l2_features
);
55 PANFROST_FEATURE(CORE_FEATURES
, core_features
);
56 PANFROST_FEATURE(TILER_FEATURES
, tiler_features
);
57 PANFROST_FEATURE(MEM_FEATURES
, mem_features
);
58 PANFROST_FEATURE(MMU_FEATURES
, mmu_features
);
59 PANFROST_FEATURE(THREAD_FEATURES
, thread_features
);
60 PANFROST_FEATURE(MAX_THREADS
, max_threads
);
61 PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ
,
62 thread_max_workgroup_sz
);
63 PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ
,
64 thread_max_barrier_sz
);
65 PANFROST_FEATURE(COHERENCY_FEATURES
, coherency_features
);
66 PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES
, texture_features
, 3);
67 PANFROST_FEATURE_ARRAY(JS_FEATURES
, js_features
, 15);
68 PANFROST_FEATURE(NR_CORE_GROUPS
, nr_core_groups
);
69 PANFROST_FEATURE(THREAD_TLS_ALLOC
, thread_tls_alloc
);
77 static int panfrost_ioctl_create_bo(struct drm_device
*dev
, void *data
,
78 struct drm_file
*file
)
80 struct panfrost_file_priv
*priv
= file
->driver_priv
;
81 struct panfrost_gem_object
*bo
;
82 struct drm_panfrost_create_bo
*args
= data
;
83 struct panfrost_gem_mapping
*mapping
;
85 if (!args
->size
|| args
->pad
||
86 (args
->flags
& ~(PANFROST_BO_NOEXEC
| PANFROST_BO_HEAP
)))
89 /* Heaps should never be executable */
90 if ((args
->flags
& PANFROST_BO_HEAP
) &&
91 !(args
->flags
& PANFROST_BO_NOEXEC
))
94 bo
= panfrost_gem_create_with_handle(file
, dev
, args
->size
, args
->flags
,
99 mapping
= panfrost_gem_mapping_get(bo
, priv
);
101 drm_gem_object_put(&bo
->base
.base
);
105 args
->offset
= mapping
->mmnode
.start
<< PAGE_SHIFT
;
106 panfrost_gem_mapping_put(mapping
);
112 * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
113 * referenced by the job.
115 * @file_priv: DRM file for this fd
117 * @job: job being set up
119 * Resolve handles from userspace to BOs and attach them to job.
121 * Note that this function doesn't need to unreference the BOs on
122 * failure, because that will happen at panfrost_job_cleanup() time.
125 panfrost_lookup_bos(struct drm_device
*dev
,
126 struct drm_file
*file_priv
,
127 struct drm_panfrost_submit
*args
,
128 struct panfrost_job
*job
)
130 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
131 struct panfrost_gem_object
*bo
;
135 job
->bo_count
= args
->bo_handle_count
;
140 job
->implicit_fences
= kvmalloc_array(job
->bo_count
,
141 sizeof(struct dma_fence
*),
142 GFP_KERNEL
| __GFP_ZERO
);
143 if (!job
->implicit_fences
)
146 ret
= drm_gem_objects_lookup(file_priv
,
147 (void __user
*)(uintptr_t)args
->bo_handles
,
148 job
->bo_count
, &job
->bos
);
152 job
->mappings
= kvmalloc_array(job
->bo_count
,
153 sizeof(struct panfrost_gem_mapping
*),
154 GFP_KERNEL
| __GFP_ZERO
);
158 for (i
= 0; i
< job
->bo_count
; i
++) {
159 struct panfrost_gem_mapping
*mapping
;
161 bo
= to_panfrost_bo(job
->bos
[i
]);
162 mapping
= panfrost_gem_mapping_get(bo
, priv
);
168 atomic_inc(&bo
->gpu_usecount
);
169 job
->mappings
[i
] = mapping
;
176 * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
177 * referenced by the job.
179 * @file_priv: DRM file for this fd
181 * @job: job being set up
183 * Resolve syncobjs from userspace to fences and attach them to job.
185 * Note that this function doesn't need to unreference the fences on
186 * failure, because that will happen at panfrost_job_cleanup() time.
189 panfrost_copy_in_sync(struct drm_device
*dev
,
190 struct drm_file
*file_priv
,
191 struct drm_panfrost_submit
*args
,
192 struct panfrost_job
*job
)
198 job
->in_fence_count
= args
->in_sync_count
;
200 if (!job
->in_fence_count
)
203 job
->in_fences
= kvmalloc_array(job
->in_fence_count
,
204 sizeof(struct dma_fence
*),
205 GFP_KERNEL
| __GFP_ZERO
);
206 if (!job
->in_fences
) {
207 DRM_DEBUG("Failed to allocate job in fences\n");
211 handles
= kvmalloc_array(job
->in_fence_count
, sizeof(u32
), GFP_KERNEL
);
214 DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
218 if (copy_from_user(handles
,
219 (void __user
*)(uintptr_t)args
->in_syncs
,
220 job
->in_fence_count
* sizeof(u32
))) {
222 DRM_DEBUG("Failed to copy in syncobj handles\n");
226 for (i
= 0; i
< job
->in_fence_count
; i
++) {
227 ret
= drm_syncobj_find_fence(file_priv
, handles
[i
], 0, 0,
238 static int panfrost_ioctl_submit(struct drm_device
*dev
, void *data
,
239 struct drm_file
*file
)
241 struct panfrost_device
*pfdev
= dev
->dev_private
;
242 struct drm_panfrost_submit
*args
= data
;
243 struct drm_syncobj
*sync_out
= NULL
;
244 struct panfrost_job
*job
;
250 if (args
->requirements
&& args
->requirements
!= PANFROST_JD_REQ_FS
)
253 if (args
->out_sync
> 0) {
254 sync_out
= drm_syncobj_find(file
, args
->out_sync
);
259 job
= kzalloc(sizeof(*job
), GFP_KERNEL
);
265 kref_init(&job
->refcount
);
269 job
->requirements
= args
->requirements
;
270 job
->flush_id
= panfrost_gpu_get_latest_flush_id(pfdev
);
271 job
->file_priv
= file
->driver_priv
;
273 ret
= panfrost_copy_in_sync(dev
, file
, args
, job
);
277 ret
= panfrost_lookup_bos(dev
, file
, args
, job
);
281 ret
= panfrost_job_push(job
);
285 /* Update the return sync object for the job */
287 drm_syncobj_replace_fence(sync_out
, job
->render_done_fence
);
290 panfrost_job_put(job
);
293 drm_syncobj_put(sync_out
);
299 panfrost_ioctl_wait_bo(struct drm_device
*dev
, void *data
,
300 struct drm_file
*file_priv
)
303 struct drm_panfrost_wait_bo
*args
= data
;
304 struct drm_gem_object
*gem_obj
;
305 unsigned long timeout
= drm_timeout_abs_to_jiffies(args
->timeout_ns
);
310 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
314 ret
= dma_resv_wait_timeout_rcu(gem_obj
->resv
, true,
317 ret
= timeout
? -ETIMEDOUT
: -EBUSY
;
319 drm_gem_object_put(gem_obj
);
324 static int panfrost_ioctl_mmap_bo(struct drm_device
*dev
, void *data
,
325 struct drm_file
*file_priv
)
327 struct drm_panfrost_mmap_bo
*args
= data
;
328 struct drm_gem_object
*gem_obj
;
331 if (args
->flags
!= 0) {
332 DRM_INFO("unknown mmap_bo flags: %d\n", args
->flags
);
336 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
338 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
342 /* Don't allow mmapping of heap objects as pages are not pinned. */
343 if (to_panfrost_bo(gem_obj
)->is_heap
) {
348 ret
= drm_gem_create_mmap_offset(gem_obj
);
350 args
->offset
= drm_vma_node_offset_addr(&gem_obj
->vma_node
);
353 drm_gem_object_put(gem_obj
);
357 static int panfrost_ioctl_get_bo_offset(struct drm_device
*dev
, void *data
,
358 struct drm_file
*file_priv
)
360 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
361 struct drm_panfrost_get_bo_offset
*args
= data
;
362 struct panfrost_gem_mapping
*mapping
;
363 struct drm_gem_object
*gem_obj
;
364 struct panfrost_gem_object
*bo
;
366 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
368 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
371 bo
= to_panfrost_bo(gem_obj
);
373 mapping
= panfrost_gem_mapping_get(bo
, priv
);
374 drm_gem_object_put(gem_obj
);
379 args
->offset
= mapping
->mmnode
.start
<< PAGE_SHIFT
;
380 panfrost_gem_mapping_put(mapping
);
384 static int panfrost_ioctl_madvise(struct drm_device
*dev
, void *data
,
385 struct drm_file
*file_priv
)
387 struct panfrost_file_priv
*priv
= file_priv
->driver_priv
;
388 struct drm_panfrost_madvise
*args
= data
;
389 struct panfrost_device
*pfdev
= dev
->dev_private
;
390 struct drm_gem_object
*gem_obj
;
391 struct panfrost_gem_object
*bo
;
394 gem_obj
= drm_gem_object_lookup(file_priv
, args
->handle
);
396 DRM_DEBUG("Failed to look up GEM BO %d\n", args
->handle
);
400 bo
= to_panfrost_bo(gem_obj
);
402 mutex_lock(&pfdev
->shrinker_lock
);
403 mutex_lock(&bo
->mappings
.lock
);
404 if (args
->madv
== PANFROST_MADV_DONTNEED
) {
405 struct panfrost_gem_mapping
*first
;
407 first
= list_first_entry(&bo
->mappings
.list
,
408 struct panfrost_gem_mapping
,
412 * If we want to mark the BO purgeable, there must be only one
413 * user: the caller FD.
414 * We could do something smarter and mark the BO purgeable only
415 * when all its users have marked it purgeable, but globally
416 * visible/shared BOs are likely to never be marked purgeable
417 * anyway, so let's not bother.
419 if (!list_is_singular(&bo
->mappings
.list
) ||
420 WARN_ON_ONCE(first
->mmu
!= &priv
->mmu
)) {
422 goto out_unlock_mappings
;
426 args
->retained
= drm_gem_shmem_madvise(gem_obj
, args
->madv
);
428 if (args
->retained
) {
429 if (args
->madv
== PANFROST_MADV_DONTNEED
)
430 list_add_tail(&bo
->base
.madv_list
,
431 &pfdev
->shrinker_list
);
432 else if (args
->madv
== PANFROST_MADV_WILLNEED
)
433 list_del_init(&bo
->base
.madv_list
);
437 mutex_unlock(&bo
->mappings
.lock
);
438 mutex_unlock(&pfdev
->shrinker_lock
);
440 drm_gem_object_put(gem_obj
);
444 int panfrost_unstable_ioctl_check(void)
446 if (!unstable_ioctls
)
452 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
453 #define PFN_4G_MASK (PFN_4G - 1)
454 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
456 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node
*node
,
458 u64
*start
, u64
*end
)
460 /* Executable buffers can't start or end on a 4GB boundary */
461 if (!(color
& PANFROST_BO_NOEXEC
)) {
464 if ((*start
& PFN_4G_MASK
) == 0)
467 if ((*end
& PFN_4G_MASK
) == 0)
470 next_seg
= ALIGN(*start
, PFN_4G
);
471 if (next_seg
- *start
<= PFN_16M
)
472 *start
= next_seg
+ 1;
474 *end
= min(*end
, ALIGN(*start
, PFN_4G
) - 1);
479 panfrost_open(struct drm_device
*dev
, struct drm_file
*file
)
482 struct panfrost_device
*pfdev
= dev
->dev_private
;
483 struct panfrost_file_priv
*panfrost_priv
;
485 panfrost_priv
= kzalloc(sizeof(*panfrost_priv
), GFP_KERNEL
);
489 panfrost_priv
->pfdev
= pfdev
;
490 file
->driver_priv
= panfrost_priv
;
492 spin_lock_init(&panfrost_priv
->mm_lock
);
494 /* 4G enough for now. can be 48-bit */
495 drm_mm_init(&panfrost_priv
->mm
, SZ_32M
>> PAGE_SHIFT
, (SZ_4G
- SZ_32M
) >> PAGE_SHIFT
);
496 panfrost_priv
->mm
.color_adjust
= panfrost_drm_mm_color_adjust
;
498 ret
= panfrost_mmu_pgtable_alloc(panfrost_priv
);
502 ret
= panfrost_job_open(panfrost_priv
);
509 panfrost_mmu_pgtable_free(panfrost_priv
);
511 drm_mm_takedown(&panfrost_priv
->mm
);
512 kfree(panfrost_priv
);
517 panfrost_postclose(struct drm_device
*dev
, struct drm_file
*file
)
519 struct panfrost_file_priv
*panfrost_priv
= file
->driver_priv
;
521 panfrost_perfcnt_close(file
);
522 panfrost_job_close(panfrost_priv
);
524 panfrost_mmu_pgtable_free(panfrost_priv
);
525 drm_mm_takedown(&panfrost_priv
->mm
);
526 kfree(panfrost_priv
);
529 static const struct drm_ioctl_desc panfrost_drm_driver_ioctls
[] = {
530 #define PANFROST_IOCTL(n, func, flags) \
531 DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
533 PANFROST_IOCTL(SUBMIT
, submit
, DRM_RENDER_ALLOW
),
534 PANFROST_IOCTL(WAIT_BO
, wait_bo
, DRM_RENDER_ALLOW
),
535 PANFROST_IOCTL(CREATE_BO
, create_bo
, DRM_RENDER_ALLOW
),
536 PANFROST_IOCTL(MMAP_BO
, mmap_bo
, DRM_RENDER_ALLOW
),
537 PANFROST_IOCTL(GET_PARAM
, get_param
, DRM_RENDER_ALLOW
),
538 PANFROST_IOCTL(GET_BO_OFFSET
, get_bo_offset
, DRM_RENDER_ALLOW
),
539 PANFROST_IOCTL(PERFCNT_ENABLE
, perfcnt_enable
, DRM_RENDER_ALLOW
),
540 PANFROST_IOCTL(PERFCNT_DUMP
, perfcnt_dump
, DRM_RENDER_ALLOW
),
541 PANFROST_IOCTL(MADVISE
, madvise
, DRM_RENDER_ALLOW
),
544 DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops
);
547 * Panfrost driver version:
548 * - 1.0 - initial interface
549 * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO
551 static const struct drm_driver panfrost_drm_driver
= {
552 .driver_features
= DRIVER_RENDER
| DRIVER_GEM
| DRIVER_SYNCOBJ
,
553 .open
= panfrost_open
,
554 .postclose
= panfrost_postclose
,
555 .ioctls
= panfrost_drm_driver_ioctls
,
556 .num_ioctls
= ARRAY_SIZE(panfrost_drm_driver_ioctls
),
557 .fops
= &panfrost_drm_driver_fops
,
559 .desc
= "panfrost DRM",
564 .gem_create_object
= panfrost_gem_create_object
,
565 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
566 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
567 .gem_prime_import_sg_table
= panfrost_gem_prime_import_sg_table
,
568 .gem_prime_mmap
= drm_gem_prime_mmap
,
571 static int panfrost_probe(struct platform_device
*pdev
)
573 struct panfrost_device
*pfdev
;
574 struct drm_device
*ddev
;
577 pfdev
= devm_kzalloc(&pdev
->dev
, sizeof(*pfdev
), GFP_KERNEL
);
582 pfdev
->dev
= &pdev
->dev
;
584 platform_set_drvdata(pdev
, pfdev
);
586 pfdev
->comp
= of_device_get_match_data(&pdev
->dev
);
590 pfdev
->coherent
= device_get_dma_attr(&pdev
->dev
) == DEV_DMA_COHERENT
;
592 /* Allocate and initialze the DRM device. */
593 ddev
= drm_dev_alloc(&panfrost_drm_driver
, &pdev
->dev
);
595 return PTR_ERR(ddev
);
597 ddev
->dev_private
= pfdev
;
600 mutex_init(&pfdev
->shrinker_lock
);
601 INIT_LIST_HEAD(&pfdev
->shrinker_list
);
603 err
= panfrost_device_init(pfdev
);
605 if (err
!= -EPROBE_DEFER
)
606 dev_err(&pdev
->dev
, "Fatal error during GPU init\n");
610 pm_runtime_set_active(pfdev
->dev
);
611 pm_runtime_mark_last_busy(pfdev
->dev
);
612 pm_runtime_enable(pfdev
->dev
);
613 pm_runtime_set_autosuspend_delay(pfdev
->dev
, 50); /* ~3 frames */
614 pm_runtime_use_autosuspend(pfdev
->dev
);
617 * Register the DRM device with the core and the connectors with
620 err
= drm_dev_register(ddev
, 0);
624 panfrost_gem_shrinker_init(ddev
);
629 pm_runtime_disable(pfdev
->dev
);
630 panfrost_device_fini(pfdev
);
631 pm_runtime_set_suspended(pfdev
->dev
);
637 static int panfrost_remove(struct platform_device
*pdev
)
639 struct panfrost_device
*pfdev
= platform_get_drvdata(pdev
);
640 struct drm_device
*ddev
= pfdev
->ddev
;
642 drm_dev_unregister(ddev
);
643 panfrost_gem_shrinker_cleanup(ddev
);
645 pm_runtime_get_sync(pfdev
->dev
);
646 pm_runtime_disable(pfdev
->dev
);
647 panfrost_device_fini(pfdev
);
648 pm_runtime_set_suspended(pfdev
->dev
);
654 static const char * const default_supplies
[] = { "mali" };
655 static const struct panfrost_compatible default_data
= {
656 .num_supplies
= ARRAY_SIZE(default_supplies
),
657 .supply_names
= default_supplies
,
658 .num_pm_domains
= 1, /* optional */
659 .pm_domain_names
= NULL
,
662 static const struct panfrost_compatible amlogic_data
= {
663 .num_supplies
= ARRAY_SIZE(default_supplies
),
664 .supply_names
= default_supplies
,
665 .vendor_quirk
= panfrost_gpu_amlogic_quirk
,
668 static const struct of_device_id dt_match
[] = {
669 /* Set first to probe before the generic compatibles */
670 { .compatible
= "amlogic,meson-gxm-mali",
671 .data
= &amlogic_data
, },
672 { .compatible
= "amlogic,meson-g12a-mali",
673 .data
= &amlogic_data
, },
674 { .compatible
= "arm,mali-t604", .data
= &default_data
, },
675 { .compatible
= "arm,mali-t624", .data
= &default_data
, },
676 { .compatible
= "arm,mali-t628", .data
= &default_data
, },
677 { .compatible
= "arm,mali-t720", .data
= &default_data
, },
678 { .compatible
= "arm,mali-t760", .data
= &default_data
, },
679 { .compatible
= "arm,mali-t820", .data
= &default_data
, },
680 { .compatible
= "arm,mali-t830", .data
= &default_data
, },
681 { .compatible
= "arm,mali-t860", .data
= &default_data
, },
682 { .compatible
= "arm,mali-t880", .data
= &default_data
, },
683 { .compatible
= "arm,mali-bifrost", .data
= &default_data
, },
686 MODULE_DEVICE_TABLE(of
, dt_match
);
688 static const struct dev_pm_ops panfrost_pm_ops
= {
689 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
, pm_runtime_force_resume
)
690 SET_RUNTIME_PM_OPS(panfrost_device_suspend
, panfrost_device_resume
, NULL
)
693 static struct platform_driver panfrost_driver
= {
694 .probe
= panfrost_probe
,
695 .remove
= panfrost_remove
,
698 .pm
= &panfrost_pm_ops
,
699 .of_match_table
= dt_match
,
702 module_platform_driver(panfrost_driver
);
704 MODULE_AUTHOR("Panfrost Project Developers");
705 MODULE_DESCRIPTION("Panfrost DRM Driver");
706 MODULE_LICENSE("GPL v2");