1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/component.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/uaccess.h>
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_of.h>
17 #include <drm/drm_prime.h>
19 #include "etnaviv_cmdbuf.h"
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_perfmon.h"
31 static void load_gpu(struct drm_device
*dev
)
33 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
36 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
37 struct etnaviv_gpu
*g
= priv
->gpu
[i
];
42 ret
= etnaviv_gpu_init(g
);
49 static int etnaviv_open(struct drm_device
*dev
, struct drm_file
*file
)
51 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
52 struct etnaviv_file_private
*ctx
;
55 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
59 ctx
->mmu
= etnaviv_iommu_context_init(priv
->mmu_global
,
60 priv
->cmdbuf_suballoc
);
66 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
67 struct etnaviv_gpu
*gpu
= priv
->gpu
[i
];
68 struct drm_gpu_scheduler
*sched
;
72 drm_sched_entity_init(&ctx
->sched_entity
[i
],
73 DRM_SCHED_PRIORITY_NORMAL
, &sched
,
78 file
->driver_priv
= ctx
;
87 static void etnaviv_postclose(struct drm_device
*dev
, struct drm_file
*file
)
89 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
90 struct etnaviv_file_private
*ctx
= file
->driver_priv
;
93 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
94 struct etnaviv_gpu
*gpu
= priv
->gpu
[i
];
97 drm_sched_entity_destroy(&ctx
->sched_entity
[i
]);
100 etnaviv_iommu_context_put(ctx
->mmu
);
109 #ifdef CONFIG_DEBUG_FS
110 static int etnaviv_gem_show(struct drm_device
*dev
, struct seq_file
*m
)
112 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
114 etnaviv_gem_describe_objects(priv
, m
);
119 static int etnaviv_mm_show(struct drm_device
*dev
, struct seq_file
*m
)
121 struct drm_printer p
= drm_seq_file_printer(m
);
123 read_lock(&dev
->vma_offset_manager
->vm_lock
);
124 drm_mm_print(&dev
->vma_offset_manager
->vm_addr_space_mm
, &p
);
125 read_unlock(&dev
->vma_offset_manager
->vm_lock
);
130 static int etnaviv_mmu_show(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
132 struct drm_printer p
= drm_seq_file_printer(m
);
133 struct etnaviv_iommu_context
*mmu_context
;
135 seq_printf(m
, "Active Objects (%s):\n", dev_name(gpu
->dev
));
138 * Lock the GPU to avoid a MMU context switch just now and elevate
139 * the refcount of the current context to avoid it disappearing from
142 mutex_lock(&gpu
->lock
);
143 mmu_context
= gpu
->mmu_context
;
145 etnaviv_iommu_context_get(mmu_context
);
146 mutex_unlock(&gpu
->lock
);
151 mutex_lock(&mmu_context
->lock
);
152 drm_mm_print(&mmu_context
->mm
, &p
);
153 mutex_unlock(&mmu_context
->lock
);
155 etnaviv_iommu_context_put(mmu_context
);
160 static void etnaviv_buffer_dump(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
162 struct etnaviv_cmdbuf
*buf
= &gpu
->buffer
;
163 u32 size
= buf
->size
;
164 u32
*ptr
= buf
->vaddr
;
167 seq_printf(m
, "virt %p - phys 0x%llx - free 0x%08x\n",
168 buf
->vaddr
, (u64
)etnaviv_cmdbuf_get_pa(buf
),
169 size
- buf
->user_size
);
171 for (i
= 0; i
< size
/ 4; i
++) {
175 seq_printf(m
, "\t0x%p: ", ptr
+ i
);
176 seq_printf(m
, "%08x ", *(ptr
+ i
));
181 static int etnaviv_ring_show(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
183 seq_printf(m
, "Ring Buffer (%s): ", dev_name(gpu
->dev
));
185 mutex_lock(&gpu
->lock
);
186 etnaviv_buffer_dump(gpu
, m
);
187 mutex_unlock(&gpu
->lock
);
192 static int show_unlocked(struct seq_file
*m
, void *arg
)
194 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
195 struct drm_device
*dev
= node
->minor
->dev
;
196 int (*show
)(struct drm_device
*dev
, struct seq_file
*m
) =
197 node
->info_ent
->data
;
202 static int show_each_gpu(struct seq_file
*m
, void *arg
)
204 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
205 struct drm_device
*dev
= node
->minor
->dev
;
206 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
207 struct etnaviv_gpu
*gpu
;
208 int (*show
)(struct etnaviv_gpu
*gpu
, struct seq_file
*m
) =
209 node
->info_ent
->data
;
213 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
226 static struct drm_info_list etnaviv_debugfs_list
[] = {
227 {"gpu", show_each_gpu
, 0, etnaviv_gpu_debugfs
},
228 {"gem", show_unlocked
, 0, etnaviv_gem_show
},
229 { "mm", show_unlocked
, 0, etnaviv_mm_show
},
230 {"mmu", show_each_gpu
, 0, etnaviv_mmu_show
},
231 {"ring", show_each_gpu
, 0, etnaviv_ring_show
},
234 static int etnaviv_debugfs_init(struct drm_minor
*minor
)
236 struct drm_device
*dev
= minor
->dev
;
239 ret
= drm_debugfs_create_files(etnaviv_debugfs_list
,
240 ARRAY_SIZE(etnaviv_debugfs_list
),
241 minor
->debugfs_root
, minor
);
244 dev_err(dev
->dev
, "could not install etnaviv_debugfs_list\n");
256 static int etnaviv_ioctl_get_param(struct drm_device
*dev
, void *data
,
257 struct drm_file
*file
)
259 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
260 struct drm_etnaviv_param
*args
= data
;
261 struct etnaviv_gpu
*gpu
;
263 if (args
->pipe
>= ETNA_MAX_PIPES
)
266 gpu
= priv
->gpu
[args
->pipe
];
270 return etnaviv_gpu_get_param(gpu
, args
->param
, &args
->value
);
273 static int etnaviv_ioctl_gem_new(struct drm_device
*dev
, void *data
,
274 struct drm_file
*file
)
276 struct drm_etnaviv_gem_new
*args
= data
;
278 if (args
->flags
& ~(ETNA_BO_CACHED
| ETNA_BO_WC
| ETNA_BO_UNCACHED
|
282 return etnaviv_gem_new_handle(dev
, file
, args
->size
,
283 args
->flags
, &args
->handle
);
286 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
287 struct drm_file
*file
)
289 struct drm_etnaviv_gem_cpu_prep
*args
= data
;
290 struct drm_gem_object
*obj
;
293 if (args
->op
& ~(ETNA_PREP_READ
| ETNA_PREP_WRITE
| ETNA_PREP_NOSYNC
))
296 obj
= drm_gem_object_lookup(file
, args
->handle
);
300 ret
= etnaviv_gem_cpu_prep(obj
, args
->op
, &args
->timeout
);
302 drm_gem_object_put_unlocked(obj
);
307 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
308 struct drm_file
*file
)
310 struct drm_etnaviv_gem_cpu_fini
*args
= data
;
311 struct drm_gem_object
*obj
;
317 obj
= drm_gem_object_lookup(file
, args
->handle
);
321 ret
= etnaviv_gem_cpu_fini(obj
);
323 drm_gem_object_put_unlocked(obj
);
328 static int etnaviv_ioctl_gem_info(struct drm_device
*dev
, void *data
,
329 struct drm_file
*file
)
331 struct drm_etnaviv_gem_info
*args
= data
;
332 struct drm_gem_object
*obj
;
338 obj
= drm_gem_object_lookup(file
, args
->handle
);
342 ret
= etnaviv_gem_mmap_offset(obj
, &args
->offset
);
343 drm_gem_object_put_unlocked(obj
);
348 static int etnaviv_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
349 struct drm_file
*file
)
351 struct drm_etnaviv_wait_fence
*args
= data
;
352 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
353 struct drm_etnaviv_timespec
*timeout
= &args
->timeout
;
354 struct etnaviv_gpu
*gpu
;
356 if (args
->flags
& ~(ETNA_WAIT_NONBLOCK
))
359 if (args
->pipe
>= ETNA_MAX_PIPES
)
362 gpu
= priv
->gpu
[args
->pipe
];
366 if (args
->flags
& ETNA_WAIT_NONBLOCK
)
369 return etnaviv_gpu_wait_fence_interruptible(gpu
, args
->fence
,
373 static int etnaviv_ioctl_gem_userptr(struct drm_device
*dev
, void *data
,
374 struct drm_file
*file
)
376 struct drm_etnaviv_gem_userptr
*args
= data
;
378 if (args
->flags
& ~(ETNA_USERPTR_READ
|ETNA_USERPTR_WRITE
) ||
382 if (offset_in_page(args
->user_ptr
| args
->user_size
) ||
383 (uintptr_t)args
->user_ptr
!= args
->user_ptr
||
384 (u32
)args
->user_size
!= args
->user_size
||
385 args
->user_ptr
& ~PAGE_MASK
)
388 if (!access_ok((void __user
*)(unsigned long)args
->user_ptr
,
392 return etnaviv_gem_new_userptr(dev
, file
, args
->user_ptr
,
393 args
->user_size
, args
->flags
,
397 static int etnaviv_ioctl_gem_wait(struct drm_device
*dev
, void *data
,
398 struct drm_file
*file
)
400 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
401 struct drm_etnaviv_gem_wait
*args
= data
;
402 struct drm_etnaviv_timespec
*timeout
= &args
->timeout
;
403 struct drm_gem_object
*obj
;
404 struct etnaviv_gpu
*gpu
;
407 if (args
->flags
& ~(ETNA_WAIT_NONBLOCK
))
410 if (args
->pipe
>= ETNA_MAX_PIPES
)
413 gpu
= priv
->gpu
[args
->pipe
];
417 obj
= drm_gem_object_lookup(file
, args
->handle
);
421 if (args
->flags
& ETNA_WAIT_NONBLOCK
)
424 ret
= etnaviv_gem_wait_bo(gpu
, obj
, timeout
);
426 drm_gem_object_put_unlocked(obj
);
431 static int etnaviv_ioctl_pm_query_dom(struct drm_device
*dev
, void *data
,
432 struct drm_file
*file
)
434 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
435 struct drm_etnaviv_pm_domain
*args
= data
;
436 struct etnaviv_gpu
*gpu
;
438 if (args
->pipe
>= ETNA_MAX_PIPES
)
441 gpu
= priv
->gpu
[args
->pipe
];
445 return etnaviv_pm_query_dom(gpu
, args
);
448 static int etnaviv_ioctl_pm_query_sig(struct drm_device
*dev
, void *data
,
449 struct drm_file
*file
)
451 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
452 struct drm_etnaviv_pm_signal
*args
= data
;
453 struct etnaviv_gpu
*gpu
;
455 if (args
->pipe
>= ETNA_MAX_PIPES
)
458 gpu
= priv
->gpu
[args
->pipe
];
462 return etnaviv_pm_query_sig(gpu
, args
);
465 static const struct drm_ioctl_desc etnaviv_ioctls
[] = {
466 #define ETNA_IOCTL(n, func, flags) \
467 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
468 ETNA_IOCTL(GET_PARAM
, get_param
, DRM_RENDER_ALLOW
),
469 ETNA_IOCTL(GEM_NEW
, gem_new
, DRM_RENDER_ALLOW
),
470 ETNA_IOCTL(GEM_INFO
, gem_info
, DRM_RENDER_ALLOW
),
471 ETNA_IOCTL(GEM_CPU_PREP
, gem_cpu_prep
, DRM_RENDER_ALLOW
),
472 ETNA_IOCTL(GEM_CPU_FINI
, gem_cpu_fini
, DRM_RENDER_ALLOW
),
473 ETNA_IOCTL(GEM_SUBMIT
, gem_submit
, DRM_RENDER_ALLOW
),
474 ETNA_IOCTL(WAIT_FENCE
, wait_fence
, DRM_RENDER_ALLOW
),
475 ETNA_IOCTL(GEM_USERPTR
, gem_userptr
, DRM_RENDER_ALLOW
),
476 ETNA_IOCTL(GEM_WAIT
, gem_wait
, DRM_RENDER_ALLOW
),
477 ETNA_IOCTL(PM_QUERY_DOM
, pm_query_dom
, DRM_RENDER_ALLOW
),
478 ETNA_IOCTL(PM_QUERY_SIG
, pm_query_sig
, DRM_RENDER_ALLOW
),
481 static const struct vm_operations_struct vm_ops
= {
482 .fault
= etnaviv_gem_fault
,
483 .open
= drm_gem_vm_open
,
484 .close
= drm_gem_vm_close
,
487 static const struct file_operations fops
= {
488 .owner
= THIS_MODULE
,
490 .release
= drm_release
,
491 .unlocked_ioctl
= drm_ioctl
,
492 .compat_ioctl
= drm_compat_ioctl
,
496 .mmap
= etnaviv_gem_mmap
,
499 static struct drm_driver etnaviv_drm_driver
= {
500 .driver_features
= DRIVER_GEM
| DRIVER_RENDER
,
501 .open
= etnaviv_open
,
502 .postclose
= etnaviv_postclose
,
503 .gem_free_object_unlocked
= etnaviv_gem_free_object
,
504 .gem_vm_ops
= &vm_ops
,
505 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
506 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
507 .gem_prime_pin
= etnaviv_gem_prime_pin
,
508 .gem_prime_unpin
= etnaviv_gem_prime_unpin
,
509 .gem_prime_get_sg_table
= etnaviv_gem_prime_get_sg_table
,
510 .gem_prime_import_sg_table
= etnaviv_gem_prime_import_sg_table
,
511 .gem_prime_vmap
= etnaviv_gem_prime_vmap
,
512 .gem_prime_vunmap
= etnaviv_gem_prime_vunmap
,
513 .gem_prime_mmap
= etnaviv_gem_prime_mmap
,
514 #ifdef CONFIG_DEBUG_FS
515 .debugfs_init
= etnaviv_debugfs_init
,
517 .ioctls
= etnaviv_ioctls
,
518 .num_ioctls
= DRM_ETNAVIV_NUM_IOCTLS
,
521 .desc
= "etnaviv DRM",
530 static int etnaviv_bind(struct device
*dev
)
532 struct etnaviv_drm_private
*priv
;
533 struct drm_device
*drm
;
536 drm
= drm_dev_alloc(&etnaviv_drm_driver
, dev
);
540 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
542 dev_err(dev
, "failed to allocate private data\n");
546 drm
->dev_private
= priv
;
548 dev
->dma_parms
= &priv
->dma_parms
;
549 dma_set_max_seg_size(dev
, SZ_2G
);
551 mutex_init(&priv
->gem_lock
);
552 INIT_LIST_HEAD(&priv
->gem_list
);
555 priv
->cmdbuf_suballoc
= etnaviv_cmdbuf_suballoc_new(drm
->dev
);
556 if (IS_ERR(priv
->cmdbuf_suballoc
)) {
557 dev_err(drm
->dev
, "Failed to create cmdbuf suballocator\n");
558 ret
= PTR_ERR(priv
->cmdbuf_suballoc
);
562 dev_set_drvdata(dev
, drm
);
564 ret
= component_bind_all(dev
, drm
);
566 goto out_destroy_suballoc
;
570 ret
= drm_dev_register(drm
, 0);
577 component_unbind_all(dev
, drm
);
578 out_destroy_suballoc
:
579 etnaviv_cmdbuf_suballoc_destroy(priv
->cmdbuf_suballoc
);
588 static void etnaviv_unbind(struct device
*dev
)
590 struct drm_device
*drm
= dev_get_drvdata(dev
);
591 struct etnaviv_drm_private
*priv
= drm
->dev_private
;
593 drm_dev_unregister(drm
);
595 component_unbind_all(dev
, drm
);
597 dev
->dma_parms
= NULL
;
599 etnaviv_cmdbuf_suballoc_destroy(priv
->cmdbuf_suballoc
);
601 drm
->dev_private
= NULL
;
607 static const struct component_master_ops etnaviv_master_ops
= {
608 .bind
= etnaviv_bind
,
609 .unbind
= etnaviv_unbind
,
612 static int compare_of(struct device
*dev
, void *data
)
614 struct device_node
*np
= data
;
616 return dev
->of_node
== np
;
619 static int compare_str(struct device
*dev
, void *data
)
621 return !strcmp(dev_name(dev
), data
);
624 static int etnaviv_pdev_probe(struct platform_device
*pdev
)
626 struct device
*dev
= &pdev
->dev
;
627 struct component_match
*match
= NULL
;
629 if (!dev
->platform_data
) {
630 struct device_node
*core_node
;
632 for_each_compatible_node(core_node
, NULL
, "vivante,gc") {
633 if (!of_device_is_available(core_node
))
636 drm_of_component_match_add(&pdev
->dev
, &match
,
637 compare_of
, core_node
);
640 char **names
= dev
->platform_data
;
643 for (i
= 0; names
[i
]; i
++)
644 component_match_add(dev
, &match
, compare_str
, names
[i
]);
647 return component_master_add_with_match(dev
, &etnaviv_master_ops
, match
);
650 static int etnaviv_pdev_remove(struct platform_device
*pdev
)
652 component_master_del(&pdev
->dev
, &etnaviv_master_ops
);
657 static struct platform_driver etnaviv_platform_driver
= {
658 .probe
= etnaviv_pdev_probe
,
659 .remove
= etnaviv_pdev_remove
,
665 static struct platform_device
*etnaviv_drm
;
667 static int __init
etnaviv_init(void)
669 struct platform_device
*pdev
;
671 struct device_node
*np
;
673 etnaviv_validate_init();
675 ret
= platform_driver_register(&etnaviv_gpu_driver
);
679 ret
= platform_driver_register(&etnaviv_platform_driver
);
681 goto unregister_gpu_driver
;
684 * If the DT contains at least one available GPU device, instantiate
685 * the DRM platform device.
687 for_each_compatible_node(np
, NULL
, "vivante,gc") {
688 if (!of_device_is_available(np
))
691 pdev
= platform_device_alloc("etnaviv", -1);
695 goto unregister_platform_driver
;
697 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(40);
698 pdev
->dev
.dma_mask
= &pdev
->dev
.coherent_dma_mask
;
701 * Apply the same DMA configuration to the virtual etnaviv
702 * device as the GPU we found. This assumes that all Vivante
703 * GPUs in the system share the same DMA constraints.
705 of_dma_configure(&pdev
->dev
, np
, true);
707 ret
= platform_device_add(pdev
);
709 platform_device_put(pdev
);
711 goto unregister_platform_driver
;
721 unregister_platform_driver
:
722 platform_driver_unregister(&etnaviv_platform_driver
);
723 unregister_gpu_driver
:
724 platform_driver_unregister(&etnaviv_gpu_driver
);
727 module_init(etnaviv_init
);
729 static void __exit
etnaviv_exit(void)
731 platform_device_unregister(etnaviv_drm
);
732 platform_driver_unregister(&etnaviv_platform_driver
);
733 platform_driver_unregister(&etnaviv_gpu_driver
);
735 module_exit(etnaviv_exit
);
737 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
738 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
739 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
740 MODULE_DESCRIPTION("etnaviv DRM Driver");
741 MODULE_LICENSE("GPL v2");
742 MODULE_ALIAS("platform:etnaviv");