1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2015-2018 Etnaviv Project
6 #include <linux/component.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
10 #include <linux/of_device.h>
11 #include <linux/platform_device.h>
12 #include <linux/uaccess.h>
14 #include <drm/drm_debugfs.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_of.h>
19 #include <drm/drm_prime.h>
21 #include "etnaviv_cmdbuf.h"
22 #include "etnaviv_drv.h"
23 #include "etnaviv_gpu.h"
24 #include "etnaviv_gem.h"
25 #include "etnaviv_mmu.h"
26 #include "etnaviv_perfmon.h"
32 static struct device_node
*etnaviv_of_first_available_node(void)
34 struct device_node
*np
;
36 for_each_compatible_node(np
, NULL
, "vivante,gc") {
37 if (of_device_is_available(np
))
44 static void load_gpu(struct drm_device
*dev
)
46 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
49 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
50 struct etnaviv_gpu
*g
= priv
->gpu
[i
];
55 ret
= etnaviv_gpu_init(g
);
62 static int etnaviv_open(struct drm_device
*dev
, struct drm_file
*file
)
64 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
65 struct etnaviv_file_private
*ctx
;
68 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
72 ret
= xa_alloc_cyclic(&priv
->active_contexts
, &ctx
->id
, ctx
,
73 xa_limit_32b
, &priv
->next_context_id
, GFP_KERNEL
);
77 ctx
->mmu
= etnaviv_iommu_context_init(priv
->mmu_global
,
78 priv
->cmdbuf_suballoc
);
84 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
85 struct etnaviv_gpu
*gpu
= priv
->gpu
[i
];
86 struct drm_gpu_scheduler
*sched
;
90 drm_sched_entity_init(&ctx
->sched_entity
[i
],
91 DRM_SCHED_PRIORITY_NORMAL
, &sched
,
96 file
->driver_priv
= ctx
;
105 static void etnaviv_postclose(struct drm_device
*dev
, struct drm_file
*file
)
107 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
108 struct etnaviv_file_private
*ctx
= file
->driver_priv
;
111 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
112 struct etnaviv_gpu
*gpu
= priv
->gpu
[i
];
115 drm_sched_entity_destroy(&ctx
->sched_entity
[i
]);
118 etnaviv_iommu_context_put(ctx
->mmu
);
120 xa_erase(&priv
->active_contexts
, ctx
->id
);
129 #ifdef CONFIG_DEBUG_FS
130 static int etnaviv_gem_show(struct drm_device
*dev
, struct seq_file
*m
)
132 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
134 etnaviv_gem_describe_objects(priv
, m
);
139 static int etnaviv_mm_show(struct drm_device
*dev
, struct seq_file
*m
)
141 struct drm_printer p
= drm_seq_file_printer(m
);
143 read_lock(&dev
->vma_offset_manager
->vm_lock
);
144 drm_mm_print(&dev
->vma_offset_manager
->vm_addr_space_mm
, &p
);
145 read_unlock(&dev
->vma_offset_manager
->vm_lock
);
150 static int etnaviv_mmu_show(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
152 struct drm_printer p
= drm_seq_file_printer(m
);
153 struct etnaviv_iommu_context
*mmu_context
;
155 seq_printf(m
, "Active Objects (%s):\n", dev_name(gpu
->dev
));
158 * Lock the GPU to avoid a MMU context switch just now and elevate
159 * the refcount of the current context to avoid it disappearing from
162 mutex_lock(&gpu
->lock
);
163 mmu_context
= gpu
->mmu_context
;
165 etnaviv_iommu_context_get(mmu_context
);
166 mutex_unlock(&gpu
->lock
);
171 mutex_lock(&mmu_context
->lock
);
172 drm_mm_print(&mmu_context
->mm
, &p
);
173 mutex_unlock(&mmu_context
->lock
);
175 etnaviv_iommu_context_put(mmu_context
);
180 static void etnaviv_buffer_dump(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
182 struct etnaviv_cmdbuf
*buf
= &gpu
->buffer
;
183 u32 size
= buf
->size
;
184 u32
*ptr
= buf
->vaddr
;
187 seq_printf(m
, "virt %p - phys 0x%llx - free 0x%08x\n",
188 buf
->vaddr
, (u64
)etnaviv_cmdbuf_get_pa(buf
),
189 size
- buf
->user_size
);
191 for (i
= 0; i
< size
/ 4; i
++) {
195 seq_printf(m
, "\t0x%p: ", ptr
+ i
);
196 seq_printf(m
, "%08x ", *(ptr
+ i
));
201 static int etnaviv_ring_show(struct etnaviv_gpu
*gpu
, struct seq_file
*m
)
203 seq_printf(m
, "Ring Buffer (%s): ", dev_name(gpu
->dev
));
205 mutex_lock(&gpu
->lock
);
206 etnaviv_buffer_dump(gpu
, m
);
207 mutex_unlock(&gpu
->lock
);
212 static int show_unlocked(struct seq_file
*m
, void *arg
)
214 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
215 struct drm_device
*dev
= node
->minor
->dev
;
216 int (*show
)(struct drm_device
*dev
, struct seq_file
*m
) =
217 node
->info_ent
->data
;
222 static int show_each_gpu(struct seq_file
*m
, void *arg
)
224 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
225 struct drm_device
*dev
= node
->minor
->dev
;
226 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
227 struct etnaviv_gpu
*gpu
;
228 int (*show
)(struct etnaviv_gpu
*gpu
, struct seq_file
*m
) =
229 node
->info_ent
->data
;
233 for (i
= 0; i
< ETNA_MAX_PIPES
; i
++) {
246 static struct drm_info_list etnaviv_debugfs_list
[] = {
247 {"gpu", show_each_gpu
, 0, etnaviv_gpu_debugfs
},
248 {"gem", show_unlocked
, 0, etnaviv_gem_show
},
249 { "mm", show_unlocked
, 0, etnaviv_mm_show
},
250 {"mmu", show_each_gpu
, 0, etnaviv_mmu_show
},
251 {"ring", show_each_gpu
, 0, etnaviv_ring_show
},
254 static void etnaviv_debugfs_init(struct drm_minor
*minor
)
256 drm_debugfs_create_files(etnaviv_debugfs_list
,
257 ARRAY_SIZE(etnaviv_debugfs_list
),
258 minor
->debugfs_root
, minor
);
266 static int etnaviv_ioctl_get_param(struct drm_device
*dev
, void *data
,
267 struct drm_file
*file
)
269 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
270 struct drm_etnaviv_param
*args
= data
;
271 struct etnaviv_gpu
*gpu
;
273 if (args
->pipe
>= ETNA_MAX_PIPES
)
276 gpu
= priv
->gpu
[args
->pipe
];
280 return etnaviv_gpu_get_param(gpu
, args
->param
, &args
->value
);
283 static int etnaviv_ioctl_gem_new(struct drm_device
*dev
, void *data
,
284 struct drm_file
*file
)
286 struct drm_etnaviv_gem_new
*args
= data
;
288 if (args
->flags
& ~(ETNA_BO_CACHED
| ETNA_BO_WC
| ETNA_BO_UNCACHED
|
292 return etnaviv_gem_new_handle(dev
, file
, args
->size
,
293 args
->flags
, &args
->handle
);
296 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
297 struct drm_file
*file
)
299 struct drm_etnaviv_gem_cpu_prep
*args
= data
;
300 struct drm_gem_object
*obj
;
303 if (args
->op
& ~(ETNA_PREP_READ
| ETNA_PREP_WRITE
| ETNA_PREP_NOSYNC
))
306 obj
= drm_gem_object_lookup(file
, args
->handle
);
310 ret
= etnaviv_gem_cpu_prep(obj
, args
->op
, &args
->timeout
);
312 drm_gem_object_put(obj
);
317 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
318 struct drm_file
*file
)
320 struct drm_etnaviv_gem_cpu_fini
*args
= data
;
321 struct drm_gem_object
*obj
;
327 obj
= drm_gem_object_lookup(file
, args
->handle
);
331 ret
= etnaviv_gem_cpu_fini(obj
);
333 drm_gem_object_put(obj
);
338 static int etnaviv_ioctl_gem_info(struct drm_device
*dev
, void *data
,
339 struct drm_file
*file
)
341 struct drm_etnaviv_gem_info
*args
= data
;
342 struct drm_gem_object
*obj
;
348 obj
= drm_gem_object_lookup(file
, args
->handle
);
352 ret
= etnaviv_gem_mmap_offset(obj
, &args
->offset
);
353 drm_gem_object_put(obj
);
358 static int etnaviv_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
359 struct drm_file
*file
)
361 struct drm_etnaviv_wait_fence
*args
= data
;
362 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
363 struct drm_etnaviv_timespec
*timeout
= &args
->timeout
;
364 struct etnaviv_gpu
*gpu
;
366 if (args
->flags
& ~(ETNA_WAIT_NONBLOCK
))
369 if (args
->pipe
>= ETNA_MAX_PIPES
)
372 gpu
= priv
->gpu
[args
->pipe
];
376 if (args
->flags
& ETNA_WAIT_NONBLOCK
)
379 return etnaviv_gpu_wait_fence_interruptible(gpu
, args
->fence
,
383 static int etnaviv_ioctl_gem_userptr(struct drm_device
*dev
, void *data
,
384 struct drm_file
*file
)
386 struct drm_etnaviv_gem_userptr
*args
= data
;
388 if (args
->flags
& ~(ETNA_USERPTR_READ
|ETNA_USERPTR_WRITE
) ||
392 if (offset_in_page(args
->user_ptr
| args
->user_size
) ||
393 (uintptr_t)args
->user_ptr
!= args
->user_ptr
||
394 (u32
)args
->user_size
!= args
->user_size
||
395 args
->user_ptr
& ~PAGE_MASK
)
398 if (!access_ok((void __user
*)(unsigned long)args
->user_ptr
,
402 return etnaviv_gem_new_userptr(dev
, file
, args
->user_ptr
,
403 args
->user_size
, args
->flags
,
407 static int etnaviv_ioctl_gem_wait(struct drm_device
*dev
, void *data
,
408 struct drm_file
*file
)
410 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
411 struct drm_etnaviv_gem_wait
*args
= data
;
412 struct drm_etnaviv_timespec
*timeout
= &args
->timeout
;
413 struct drm_gem_object
*obj
;
414 struct etnaviv_gpu
*gpu
;
417 if (args
->flags
& ~(ETNA_WAIT_NONBLOCK
))
420 if (args
->pipe
>= ETNA_MAX_PIPES
)
423 gpu
= priv
->gpu
[args
->pipe
];
427 obj
= drm_gem_object_lookup(file
, args
->handle
);
431 if (args
->flags
& ETNA_WAIT_NONBLOCK
)
434 ret
= etnaviv_gem_wait_bo(gpu
, obj
, timeout
);
436 drm_gem_object_put(obj
);
441 static int etnaviv_ioctl_pm_query_dom(struct drm_device
*dev
, void *data
,
442 struct drm_file
*file
)
444 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
445 struct drm_etnaviv_pm_domain
*args
= data
;
446 struct etnaviv_gpu
*gpu
;
448 if (args
->pipe
>= ETNA_MAX_PIPES
)
451 gpu
= priv
->gpu
[args
->pipe
];
455 return etnaviv_pm_query_dom(gpu
, args
);
458 static int etnaviv_ioctl_pm_query_sig(struct drm_device
*dev
, void *data
,
459 struct drm_file
*file
)
461 struct etnaviv_drm_private
*priv
= dev
->dev_private
;
462 struct drm_etnaviv_pm_signal
*args
= data
;
463 struct etnaviv_gpu
*gpu
;
465 if (args
->pipe
>= ETNA_MAX_PIPES
)
468 gpu
= priv
->gpu
[args
->pipe
];
472 return etnaviv_pm_query_sig(gpu
, args
);
475 static const struct drm_ioctl_desc etnaviv_ioctls
[] = {
476 #define ETNA_IOCTL(n, func, flags) \
477 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
478 ETNA_IOCTL(GET_PARAM
, get_param
, DRM_RENDER_ALLOW
),
479 ETNA_IOCTL(GEM_NEW
, gem_new
, DRM_RENDER_ALLOW
),
480 ETNA_IOCTL(GEM_INFO
, gem_info
, DRM_RENDER_ALLOW
),
481 ETNA_IOCTL(GEM_CPU_PREP
, gem_cpu_prep
, DRM_RENDER_ALLOW
),
482 ETNA_IOCTL(GEM_CPU_FINI
, gem_cpu_fini
, DRM_RENDER_ALLOW
),
483 ETNA_IOCTL(GEM_SUBMIT
, gem_submit
, DRM_RENDER_ALLOW
),
484 ETNA_IOCTL(WAIT_FENCE
, wait_fence
, DRM_RENDER_ALLOW
),
485 ETNA_IOCTL(GEM_USERPTR
, gem_userptr
, DRM_RENDER_ALLOW
),
486 ETNA_IOCTL(GEM_WAIT
, gem_wait
, DRM_RENDER_ALLOW
),
487 ETNA_IOCTL(PM_QUERY_DOM
, pm_query_dom
, DRM_RENDER_ALLOW
),
488 ETNA_IOCTL(PM_QUERY_SIG
, pm_query_sig
, DRM_RENDER_ALLOW
),
491 DEFINE_DRM_GEM_FOPS(fops
);
493 static const struct drm_driver etnaviv_drm_driver
= {
494 .driver_features
= DRIVER_GEM
| DRIVER_RENDER
,
495 .open
= etnaviv_open
,
496 .postclose
= etnaviv_postclose
,
497 .gem_prime_import_sg_table
= etnaviv_gem_prime_import_sg_table
,
498 #ifdef CONFIG_DEBUG_FS
499 .debugfs_init
= etnaviv_debugfs_init
,
501 .ioctls
= etnaviv_ioctls
,
502 .num_ioctls
= DRM_ETNAVIV_NUM_IOCTLS
,
505 .desc
= "etnaviv DRM",
514 static int etnaviv_bind(struct device
*dev
)
516 struct etnaviv_drm_private
*priv
;
517 struct drm_device
*drm
;
520 drm
= drm_dev_alloc(&etnaviv_drm_driver
, dev
);
524 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
526 dev_err(dev
, "failed to allocate private data\n");
530 drm
->dev_private
= priv
;
532 dma_set_max_seg_size(dev
, SZ_2G
);
534 xa_init_flags(&priv
->active_contexts
, XA_FLAGS_ALLOC
);
536 mutex_init(&priv
->gem_lock
);
537 INIT_LIST_HEAD(&priv
->gem_list
);
539 priv
->shm_gfp_mask
= GFP_HIGHUSER
| __GFP_RETRY_MAYFAIL
| __GFP_NOWARN
;
542 * If the GPU is part of a system with DMA addressing limitations,
543 * request pages for our SHM backend buffers from the DMA32 zone to
544 * hopefully avoid performance killing SWIOTLB bounce buffering.
546 if (dma_addressing_limited(dev
)) {
547 priv
->shm_gfp_mask
|= GFP_DMA32
;
548 priv
->shm_gfp_mask
&= ~__GFP_HIGHMEM
;
551 priv
->cmdbuf_suballoc
= etnaviv_cmdbuf_suballoc_new(drm
->dev
);
552 if (IS_ERR(priv
->cmdbuf_suballoc
)) {
553 dev_err(drm
->dev
, "Failed to create cmdbuf suballocator\n");
554 ret
= PTR_ERR(priv
->cmdbuf_suballoc
);
558 dev_set_drvdata(dev
, drm
);
560 ret
= component_bind_all(dev
, drm
);
562 goto out_destroy_suballoc
;
566 ret
= drm_dev_register(drm
, 0);
573 component_unbind_all(dev
, drm
);
574 out_destroy_suballoc
:
575 etnaviv_cmdbuf_suballoc_destroy(priv
->cmdbuf_suballoc
);
577 mutex_destroy(&priv
->gem_lock
);
585 static void etnaviv_unbind(struct device
*dev
)
587 struct drm_device
*drm
= dev_get_drvdata(dev
);
588 struct etnaviv_drm_private
*priv
= drm
->dev_private
;
590 drm_dev_unregister(drm
);
592 component_unbind_all(dev
, drm
);
594 etnaviv_cmdbuf_suballoc_destroy(priv
->cmdbuf_suballoc
);
596 xa_destroy(&priv
->active_contexts
);
598 drm
->dev_private
= NULL
;
604 static const struct component_master_ops etnaviv_master_ops
= {
605 .bind
= etnaviv_bind
,
606 .unbind
= etnaviv_unbind
,
609 static int etnaviv_pdev_probe(struct platform_device
*pdev
)
611 struct device
*dev
= &pdev
->dev
;
612 struct device_node
*first_node
= NULL
;
613 struct component_match
*match
= NULL
;
615 if (!dev
->platform_data
) {
616 struct device_node
*core_node
;
618 for_each_compatible_node(core_node
, NULL
, "vivante,gc") {
619 if (!of_device_is_available(core_node
))
622 drm_of_component_match_add(dev
, &match
,
623 component_compare_of
, core_node
);
626 char **names
= dev
->platform_data
;
629 for (i
= 0; names
[i
]; i
++)
630 component_match_add(dev
, &match
, component_compare_dev_name
, names
[i
]);
634 * PTA and MTLB can have 40 bit base addresses, but
635 * unfortunately, an entry in the MTLB can only point to a
636 * 32 bit base address of a STLB. Moreover, to initialize the
637 * MMU we need a command buffer with a 32 bit address because
638 * without an MMU there is only an indentity mapping between
639 * the internal 32 bit addresses and the bus addresses.
641 * To make things easy, we set the dma_coherent_mask to 32
642 * bit to make sure we are allocating the command buffers and
643 * TLBs in the lower 4 GiB address space.
645 if (dma_set_mask(dev
, DMA_BIT_MASK(40)) ||
646 dma_set_coherent_mask(dev
, DMA_BIT_MASK(32))) {
647 dev_dbg(dev
, "No suitable DMA available\n");
652 * Apply the same DMA configuration to the virtual etnaviv
653 * device as the GPU we found. This assumes that all Vivante
654 * GPUs in the system share the same DMA constraints.
656 first_node
= etnaviv_of_first_available_node();
658 of_dma_configure(dev
, first_node
, true);
659 of_node_put(first_node
);
662 return component_master_add_with_match(dev
, &etnaviv_master_ops
, match
);
665 static void etnaviv_pdev_remove(struct platform_device
*pdev
)
667 component_master_del(&pdev
->dev
, &etnaviv_master_ops
);
670 static struct platform_driver etnaviv_platform_driver
= {
671 .probe
= etnaviv_pdev_probe
,
672 .remove
= etnaviv_pdev_remove
,
678 static int etnaviv_create_platform_device(const char *name
,
679 struct platform_device
**ppdev
)
681 struct platform_device
*pdev
;
684 pdev
= platform_device_alloc(name
, PLATFORM_DEVID_NONE
);
688 ret
= platform_device_add(pdev
);
690 platform_device_put(pdev
);
699 static void etnaviv_destroy_platform_device(struct platform_device
**ppdev
)
701 struct platform_device
*pdev
= *ppdev
;
706 platform_device_unregister(pdev
);
711 static struct platform_device
*etnaviv_drm
;
713 static int __init
etnaviv_init(void)
716 struct device_node
*np
;
718 etnaviv_validate_init();
720 ret
= platform_driver_register(&etnaviv_gpu_driver
);
724 ret
= platform_driver_register(&etnaviv_platform_driver
);
726 goto unregister_gpu_driver
;
729 * If the DT contains at least one available GPU device, instantiate
730 * the DRM platform device.
732 np
= etnaviv_of_first_available_node();
736 ret
= etnaviv_create_platform_device("etnaviv", &etnaviv_drm
);
738 goto unregister_platform_driver
;
743 unregister_platform_driver
:
744 platform_driver_unregister(&etnaviv_platform_driver
);
745 unregister_gpu_driver
:
746 platform_driver_unregister(&etnaviv_gpu_driver
);
749 module_init(etnaviv_init
);
751 static void __exit
etnaviv_exit(void)
753 etnaviv_destroy_platform_device(&etnaviv_drm
);
754 platform_driver_unregister(&etnaviv_platform_driver
);
755 platform_driver_unregister(&etnaviv_gpu_driver
);
757 module_exit(etnaviv_exit
);
759 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
760 MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
761 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
762 MODULE_DESCRIPTION("etnaviv DRM Driver");
763 MODULE_LICENSE("GPL v2");
764 MODULE_ALIAS("platform:etnaviv");