1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2016-2018, 2020-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #include <linux/dma-mapping.h>
9 #include <linux/fault-inject.h>
10 #include <linux/debugfs.h>
11 #include <linux/of_address.h>
12 #include <linux/uaccess.h>
14 #include <drm/drm_client_setup.h>
15 #include <drm/drm_drv.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_of.h>
21 #include "msm_debugfs.h"
28 * - 1.0.0 - initial interface
29 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
30 * - 1.2.0 - adds explicit fence support for submit ioctl
31 * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
32 * SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
34 * - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
35 * GEM object's debug name
36 * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
37 * - 1.6.0 - Syncobj support
38 * - 1.7.0 - Add MSM_PARAM_SUSPENDS to access suspend count
39 * - 1.8.0 - Add MSM_BO_CACHED_COHERENT for supported GPUs (a6xx)
40 * - 1.9.0 - Add MSM_SUBMIT_FENCE_SN_IN
41 * - 1.10.0 - Add MSM_SUBMIT_BO_NO_IMPLICIT
42 * - 1.11.0 - Add wait boost (MSM_WAIT_FENCE_BOOST, MSM_PREP_BOOST)
43 * - 1.12.0 - Add MSM_INFO_SET_METADATA and MSM_INFO_GET_METADATA
45 #define MSM_VERSION_MAJOR 1
46 #define MSM_VERSION_MINOR 12
47 #define MSM_VERSION_PATCHLEVEL 0
49 static void msm_deinit_vram(struct drm_device
*ddev
);
51 static char *vram
= "16m";
52 MODULE_PARM_DESC(vram
, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
53 module_param(vram
, charp
, 0);
56 MODULE_PARM_DESC(dumpstate
, "Dump KMS state on errors");
57 module_param(dumpstate
, bool, 0600);
59 static bool modeset
= true;
60 MODULE_PARM_DESC(modeset
, "Use kernel modesetting [KMS] (1=on (default), 0=disable)");
61 module_param(modeset
, bool, 0600);
63 DECLARE_FAULT_ATTR(fail_gem_alloc
);
64 DECLARE_FAULT_ATTR(fail_gem_iova
);
66 static int msm_drm_uninit(struct device
*dev
)
68 struct platform_device
*pdev
= to_platform_device(dev
);
69 struct msm_drm_private
*priv
= platform_get_drvdata(pdev
);
70 struct drm_device
*ddev
= priv
->dev
;
73 * Shutdown the hw if we're far enough along where things might be on.
74 * If we run this too early, we'll end up panicking in any variety of
75 * places. Since we don't register the drm device until late in
76 * msm_drm_init, drm_dev->registered is used as an indicator that the
77 * shutdown will be successful.
79 if (ddev
->registered
) {
80 drm_dev_unregister(ddev
);
82 drm_atomic_helper_shutdown(ddev
);
85 /* We must cancel and cleanup any pending vblank enable/disable
86 * work before msm_irq_uninstall() to avoid work re-enabling an
87 * irq after uninstall has disabled it.
90 flush_workqueue(priv
->wq
);
92 msm_gem_shrinker_cleanup(ddev
);
94 msm_perf_debugfs_cleanup(priv
);
95 msm_rd_debugfs_cleanup(priv
);
98 msm_drm_kms_uninit(dev
);
100 msm_deinit_vram(ddev
);
102 component_unbind_all(dev
, ddev
);
104 ddev
->dev_private
= NULL
;
107 destroy_workqueue(priv
->wq
);
112 bool msm_use_mmu(struct drm_device
*dev
)
114 struct msm_drm_private
*priv
= dev
->dev_private
;
117 * a2xx comes with its own MMU
118 * On other platforms IOMMU can be declared specified either for the
119 * MDP/DPU device or for its parent, MDSS device.
121 return priv
->is_a2xx
||
122 device_iommu_mapped(dev
->dev
) ||
123 device_iommu_mapped(dev
->dev
->parent
);
126 static int msm_init_vram(struct drm_device
*dev
)
128 struct msm_drm_private
*priv
= dev
->dev_private
;
129 struct device_node
*node
;
130 unsigned long size
= 0;
133 /* In the device-tree world, we could have a 'memory-region'
134 * phandle, which gives us a link to our "vram". Allocating
135 * is all nicely abstracted behind the dma api, but we need
136 * to know the entire size to allocate it all in one go. There
138 * 1) device with no IOMMU, in which case we need exclusive
139 * access to a VRAM carveout big enough for all gpu
141 * 2) device with IOMMU, but where the bootloader puts up
142 * a splash screen. In this case, the VRAM carveout
143 * need only be large enough for fbdev fb. But we need
144 * exclusive access to the buffer to avoid the kernel
145 * using those pages for other purposes (which appears
146 * as corruption on screen before we have a chance to
147 * load and do initial modeset)
150 node
= of_parse_phandle(dev
->dev
->of_node
, "memory-region", 0);
153 ret
= of_address_to_resource(node
, 0, &r
);
157 size
= r
.end
- r
.start
+ 1;
158 DRM_INFO("using VRAM carveout: %lx@%pa\n", size
, &r
.start
);
160 /* if we have no IOMMU, then we need to use carveout allocator.
161 * Grab the entire DMA chunk carved out in early startup in
164 } else if (!msm_use_mmu(dev
)) {
165 DRM_INFO("using %s VRAM carveout\n", vram
);
166 size
= memparse(vram
, NULL
);
170 unsigned long attrs
= 0;
173 priv
->vram
.size
= size
;
175 drm_mm_init(&priv
->vram
.mm
, 0, (size
>> PAGE_SHIFT
) - 1);
176 spin_lock_init(&priv
->vram
.lock
);
178 attrs
|= DMA_ATTR_NO_KERNEL_MAPPING
;
179 attrs
|= DMA_ATTR_WRITE_COMBINE
;
181 /* note that for no-kernel-mapping, the vaddr returned
182 * is bogus, but non-null if allocation succeeded:
184 p
= dma_alloc_attrs(dev
->dev
, size
,
185 &priv
->vram
.paddr
, GFP_KERNEL
, attrs
);
187 DRM_DEV_ERROR(dev
->dev
, "failed to allocate VRAM\n");
188 priv
->vram
.paddr
= 0;
192 DRM_DEV_INFO(dev
->dev
, "VRAM: %08x->%08x\n",
193 (uint32_t)priv
->vram
.paddr
,
194 (uint32_t)(priv
->vram
.paddr
+ size
));
200 static void msm_deinit_vram(struct drm_device
*ddev
)
202 struct msm_drm_private
*priv
= ddev
->dev_private
;
203 unsigned long attrs
= DMA_ATTR_NO_KERNEL_MAPPING
;
205 if (!priv
->vram
.paddr
)
208 drm_mm_takedown(&priv
->vram
.mm
);
209 dma_free_attrs(ddev
->dev
, priv
->vram
.size
, NULL
, priv
->vram
.paddr
,
213 static int msm_drm_init(struct device
*dev
, const struct drm_driver
*drv
)
215 struct msm_drm_private
*priv
= dev_get_drvdata(dev
);
216 struct drm_device
*ddev
;
219 if (drm_firmware_drivers_only())
222 ddev
= drm_dev_alloc(drv
, dev
);
224 DRM_DEV_ERROR(dev
, "failed to allocate drm_device\n");
225 return PTR_ERR(ddev
);
227 ddev
->dev_private
= priv
;
230 priv
->wq
= alloc_ordered_workqueue("msm", 0);
236 INIT_LIST_HEAD(&priv
->objects
);
237 mutex_init(&priv
->obj_lock
);
240 * Initialize the LRUs:
242 mutex_init(&priv
->lru
.lock
);
243 drm_gem_lru_init(&priv
->lru
.unbacked
, &priv
->lru
.lock
);
244 drm_gem_lru_init(&priv
->lru
.pinned
, &priv
->lru
.lock
);
245 drm_gem_lru_init(&priv
->lru
.willneed
, &priv
->lru
.lock
);
246 drm_gem_lru_init(&priv
->lru
.dontneed
, &priv
->lru
.lock
);
248 /* Teach lockdep about lock ordering wrt. shrinker: */
249 fs_reclaim_acquire(GFP_KERNEL
);
250 might_lock(&priv
->lru
.lock
);
251 fs_reclaim_release(GFP_KERNEL
);
253 if (priv
->kms_init
) {
254 ret
= drmm_mode_config_init(ddev
);
259 ret
= msm_init_vram(ddev
);
263 dma_set_max_seg_size(dev
, UINT_MAX
);
265 /* Bind all our sub-components: */
266 ret
= component_bind_all(dev
, ddev
);
268 goto err_deinit_vram
;
270 ret
= msm_gem_shrinker_init(ddev
);
274 if (priv
->kms_init
) {
275 ret
= msm_drm_kms_init(dev
, drv
);
279 /* valid only for the dummy headless case, where of_node=NULL */
280 WARN_ON(dev
->of_node
);
281 ddev
->driver_features
&= ~DRIVER_MODESET
;
282 ddev
->driver_features
&= ~DRIVER_ATOMIC
;
285 ret
= drm_dev_register(ddev
, 0);
289 ret
= msm_debugfs_late_init(ddev
);
293 if (priv
->kms_init
) {
294 drm_kms_helper_poll_init(ddev
);
295 drm_client_setup(ddev
, NULL
);
306 msm_deinit_vram(ddev
);
308 destroy_workqueue(priv
->wq
);
319 static void load_gpu(struct drm_device
*dev
)
321 static DEFINE_MUTEX(init_lock
);
322 struct msm_drm_private
*priv
= dev
->dev_private
;
324 mutex_lock(&init_lock
);
327 priv
->gpu
= adreno_load_gpu(dev
);
329 mutex_unlock(&init_lock
);
332 static int context_init(struct drm_device
*dev
, struct drm_file
*file
)
334 static atomic_t ident
= ATOMIC_INIT(0);
335 struct msm_drm_private
*priv
= dev
->dev_private
;
336 struct msm_file_private
*ctx
;
338 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
342 INIT_LIST_HEAD(&ctx
->submitqueues
);
343 rwlock_init(&ctx
->queuelock
);
345 kref_init(&ctx
->ref
);
346 msm_submitqueue_init(dev
, ctx
);
348 ctx
->aspace
= msm_gpu_create_private_address_space(priv
->gpu
, current
);
349 file
->driver_priv
= ctx
;
351 ctx
->seqno
= atomic_inc_return(&ident
);
356 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
358 /* For now, load gpu on open.. to avoid the requirement of having
359 * firmware in the initrd.
363 return context_init(dev
, file
);
366 static void context_close(struct msm_file_private
*ctx
)
368 msm_submitqueue_close(ctx
);
369 msm_file_private_put(ctx
);
372 static void msm_postclose(struct drm_device
*dev
, struct drm_file
*file
)
374 struct msm_drm_private
*priv
= dev
->dev_private
;
375 struct msm_file_private
*ctx
= file
->driver_priv
;
378 * It is not possible to set sysprof param to non-zero if gpu
379 * is not initialized:
382 msm_file_private_set_sysprof(ctx
, priv
->gpu
, 0);
391 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
392 struct drm_file
*file
)
394 struct msm_drm_private
*priv
= dev
->dev_private
;
395 struct drm_msm_param
*args
= data
;
398 /* for now, we just have 3d pipe.. eventually this would need to
399 * be more clever to dispatch to appropriate gpu module:
401 if ((args
->pipe
!= MSM_PIPE_3D0
) || (args
->pad
!= 0))
409 return gpu
->funcs
->get_param(gpu
, file
->driver_priv
,
410 args
->param
, &args
->value
, &args
->len
);
413 static int msm_ioctl_set_param(struct drm_device
*dev
, void *data
,
414 struct drm_file
*file
)
416 struct msm_drm_private
*priv
= dev
->dev_private
;
417 struct drm_msm_param
*args
= data
;
420 if ((args
->pipe
!= MSM_PIPE_3D0
) || (args
->pad
!= 0))
428 return gpu
->funcs
->set_param(gpu
, file
->driver_priv
,
429 args
->param
, args
->value
, args
->len
);
432 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
433 struct drm_file
*file
)
435 struct drm_msm_gem_new
*args
= data
;
436 uint32_t flags
= args
->flags
;
438 if (args
->flags
& ~MSM_BO_FLAGS
) {
439 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
444 * Uncached CPU mappings are deprecated, as of:
446 * 9ef364432db4 ("drm/msm: deprecate MSM_BO_UNCACHED (map as writecombine instead)")
448 * So promote them to WC.
450 if (flags
& MSM_BO_UNCACHED
) {
451 flags
&= ~MSM_BO_CACHED
;
455 if (should_fail(&fail_gem_alloc
, args
->size
))
458 return msm_gem_new_handle(dev
, file
, args
->size
,
459 args
->flags
, &args
->handle
, NULL
);
462 static inline ktime_t
to_ktime(struct drm_msm_timespec timeout
)
464 return ktime_set(timeout
.tv_sec
, timeout
.tv_nsec
);
467 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
468 struct drm_file
*file
)
470 struct drm_msm_gem_cpu_prep
*args
= data
;
471 struct drm_gem_object
*obj
;
472 ktime_t timeout
= to_ktime(args
->timeout
);
475 if (args
->op
& ~MSM_PREP_FLAGS
) {
476 DRM_ERROR("invalid op: %08x\n", args
->op
);
480 obj
= drm_gem_object_lookup(file
, args
->handle
);
484 ret
= msm_gem_cpu_prep(obj
, args
->op
, &timeout
);
486 drm_gem_object_put(obj
);
491 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
492 struct drm_file
*file
)
494 struct drm_msm_gem_cpu_fini
*args
= data
;
495 struct drm_gem_object
*obj
;
498 obj
= drm_gem_object_lookup(file
, args
->handle
);
502 ret
= msm_gem_cpu_fini(obj
);
504 drm_gem_object_put(obj
);
509 static int msm_ioctl_gem_info_iova(struct drm_device
*dev
,
510 struct drm_file
*file
, struct drm_gem_object
*obj
,
513 struct msm_drm_private
*priv
= dev
->dev_private
;
514 struct msm_file_private
*ctx
= file
->driver_priv
;
519 if (should_fail(&fail_gem_iova
, obj
->size
))
523 * Don't pin the memory here - just get an address so that userspace can
526 return msm_gem_get_iova(obj
, ctx
->aspace
, iova
);
529 static int msm_ioctl_gem_info_set_iova(struct drm_device
*dev
,
530 struct drm_file
*file
, struct drm_gem_object
*obj
,
533 struct msm_drm_private
*priv
= dev
->dev_private
;
534 struct msm_file_private
*ctx
= file
->driver_priv
;
539 /* Only supported if per-process address space is supported: */
540 if (priv
->gpu
->aspace
== ctx
->aspace
)
543 if (should_fail(&fail_gem_iova
, obj
->size
))
546 return msm_gem_set_iova(obj
, ctx
->aspace
, iova
);
549 static int msm_ioctl_gem_info_set_metadata(struct drm_gem_object
*obj
,
550 __user
void *metadata
,
553 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
557 /* Impose a moderate upper bound on metadata size: */
558 if (metadata_size
> 128) {
562 /* Use a temporary buf to keep copy_from_user() outside of gem obj lock: */
563 buf
= memdup_user(metadata
, metadata_size
);
567 ret
= msm_gem_lock_interruptible(obj
);
572 krealloc(msm_obj
->metadata
, metadata_size
, GFP_KERNEL
);
573 msm_obj
->metadata_size
= metadata_size
;
574 memcpy(msm_obj
->metadata
, buf
, metadata_size
);
584 static int msm_ioctl_gem_info_get_metadata(struct drm_gem_object
*obj
,
585 __user
void *metadata
,
588 struct msm_gem_object
*msm_obj
= to_msm_bo(obj
);
594 * Querying the size is inherently racey, but
595 * EXT_external_objects expects the app to confirm
596 * via device and driver UUIDs that the exporter and
597 * importer versions match. All we can do from the
598 * kernel side is check the length under obj lock
599 * when userspace tries to retrieve the metadata
601 *metadata_size
= msm_obj
->metadata_size
;
605 ret
= msm_gem_lock_interruptible(obj
);
609 /* Avoid copy_to_user() under gem obj lock: */
610 len
= msm_obj
->metadata_size
;
611 buf
= kmemdup(msm_obj
->metadata
, len
, GFP_KERNEL
);
615 if (*metadata_size
< len
) {
617 } else if (copy_to_user(metadata
, buf
, len
)) {
620 *metadata_size
= len
;
628 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
629 struct drm_file
*file
)
631 struct drm_msm_gem_info
*args
= data
;
632 struct drm_gem_object
*obj
;
633 struct msm_gem_object
*msm_obj
;
639 switch (args
->info
) {
640 case MSM_INFO_GET_OFFSET
:
641 case MSM_INFO_GET_IOVA
:
642 case MSM_INFO_SET_IOVA
:
643 case MSM_INFO_GET_FLAGS
:
644 /* value returned as immediate, not pointer, so len==0: */
648 case MSM_INFO_SET_NAME
:
649 case MSM_INFO_GET_NAME
:
650 case MSM_INFO_SET_METADATA
:
651 case MSM_INFO_GET_METADATA
:
657 obj
= drm_gem_object_lookup(file
, args
->handle
);
661 msm_obj
= to_msm_bo(obj
);
663 switch (args
->info
) {
664 case MSM_INFO_GET_OFFSET
:
665 args
->value
= msm_gem_mmap_offset(obj
);
667 case MSM_INFO_GET_IOVA
:
668 ret
= msm_ioctl_gem_info_iova(dev
, file
, obj
, &args
->value
);
670 case MSM_INFO_SET_IOVA
:
671 ret
= msm_ioctl_gem_info_set_iova(dev
, file
, obj
, args
->value
);
673 case MSM_INFO_GET_FLAGS
:
674 if (obj
->import_attach
) {
678 /* Hide internal kernel-only flags: */
679 args
->value
= to_msm_bo(obj
)->flags
& MSM_BO_FLAGS
;
682 case MSM_INFO_SET_NAME
:
683 /* length check should leave room for terminating null: */
684 if (args
->len
>= sizeof(msm_obj
->name
)) {
688 if (copy_from_user(msm_obj
->name
, u64_to_user_ptr(args
->value
),
690 msm_obj
->name
[0] = '\0';
694 msm_obj
->name
[args
->len
] = '\0';
695 for (i
= 0; i
< args
->len
; i
++) {
696 if (!isprint(msm_obj
->name
[i
])) {
697 msm_obj
->name
[i
] = '\0';
702 case MSM_INFO_GET_NAME
:
703 if (args
->value
&& (args
->len
< strlen(msm_obj
->name
))) {
707 args
->len
= strlen(msm_obj
->name
);
709 if (copy_to_user(u64_to_user_ptr(args
->value
),
710 msm_obj
->name
, args
->len
))
714 case MSM_INFO_SET_METADATA
:
715 ret
= msm_ioctl_gem_info_set_metadata(
716 obj
, u64_to_user_ptr(args
->value
), args
->len
);
718 case MSM_INFO_GET_METADATA
:
719 ret
= msm_ioctl_gem_info_get_metadata(
720 obj
, u64_to_user_ptr(args
->value
), &args
->len
);
724 drm_gem_object_put(obj
);
729 static int wait_fence(struct msm_gpu_submitqueue
*queue
, uint32_t fence_id
,
730 ktime_t timeout
, uint32_t flags
)
732 struct dma_fence
*fence
;
735 if (fence_after(fence_id
, queue
->last_fence
)) {
736 DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
737 fence_id
, queue
->last_fence
);
742 * Map submitqueue scoped "seqno" (which is actually an idr key)
743 * back to underlying dma-fence
745 * The fence is removed from the fence_idr when the submit is
746 * retired, so if the fence is not found it means there is nothing
749 spin_lock(&queue
->idr_lock
);
750 fence
= idr_find(&queue
->fence_idr
, fence_id
);
752 fence
= dma_fence_get_rcu(fence
);
753 spin_unlock(&queue
->idr_lock
);
758 if (flags
& MSM_WAIT_FENCE_BOOST
)
759 dma_fence_set_deadline(fence
, ktime_get());
761 ret
= dma_fence_wait_timeout(fence
, true, timeout_to_jiffies(&timeout
));
764 } else if (ret
!= -ERESTARTSYS
) {
768 dma_fence_put(fence
);
773 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
774 struct drm_file
*file
)
776 struct msm_drm_private
*priv
= dev
->dev_private
;
777 struct drm_msm_wait_fence
*args
= data
;
778 struct msm_gpu_submitqueue
*queue
;
781 if (args
->flags
& ~MSM_WAIT_FENCE_FLAGS
) {
782 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
789 queue
= msm_submitqueue_get(file
->driver_priv
, args
->queueid
);
793 ret
= wait_fence(queue
, args
->fence
, to_ktime(args
->timeout
), args
->flags
);
795 msm_submitqueue_put(queue
);
800 static int msm_ioctl_gem_madvise(struct drm_device
*dev
, void *data
,
801 struct drm_file
*file
)
803 struct drm_msm_gem_madvise
*args
= data
;
804 struct drm_gem_object
*obj
;
807 switch (args
->madv
) {
808 case MSM_MADV_DONTNEED
:
809 case MSM_MADV_WILLNEED
:
815 obj
= drm_gem_object_lookup(file
, args
->handle
);
820 ret
= msm_gem_madvise(obj
, args
->madv
);
822 args
->retained
= ret
;
826 drm_gem_object_put(obj
);
832 static int msm_ioctl_submitqueue_new(struct drm_device
*dev
, void *data
,
833 struct drm_file
*file
)
835 struct drm_msm_submitqueue
*args
= data
;
837 if (args
->flags
& ~MSM_SUBMITQUEUE_FLAGS
)
840 return msm_submitqueue_create(dev
, file
->driver_priv
, args
->prio
,
841 args
->flags
, &args
->id
);
844 static int msm_ioctl_submitqueue_query(struct drm_device
*dev
, void *data
,
845 struct drm_file
*file
)
847 return msm_submitqueue_query(dev
, file
->driver_priv
, data
);
850 static int msm_ioctl_submitqueue_close(struct drm_device
*dev
, void *data
,
851 struct drm_file
*file
)
853 u32 id
= *(u32
*) data
;
855 return msm_submitqueue_remove(file
->driver_priv
, id
);
858 static const struct drm_ioctl_desc msm_ioctls
[] = {
859 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_RENDER_ALLOW
),
860 DRM_IOCTL_DEF_DRV(MSM_SET_PARAM
, msm_ioctl_set_param
, DRM_RENDER_ALLOW
),
861 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_RENDER_ALLOW
),
862 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_RENDER_ALLOW
),
863 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_RENDER_ALLOW
),
864 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_RENDER_ALLOW
),
865 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_RENDER_ALLOW
),
866 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_RENDER_ALLOW
),
867 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE
, msm_ioctl_gem_madvise
, DRM_RENDER_ALLOW
),
868 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW
, msm_ioctl_submitqueue_new
, DRM_RENDER_ALLOW
),
869 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE
, msm_ioctl_submitqueue_close
, DRM_RENDER_ALLOW
),
870 DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY
, msm_ioctl_submitqueue_query
, DRM_RENDER_ALLOW
),
873 static void msm_show_fdinfo(struct drm_printer
*p
, struct drm_file
*file
)
875 struct drm_device
*dev
= file
->minor
->dev
;
876 struct msm_drm_private
*priv
= dev
->dev_private
;
881 msm_gpu_show_fdinfo(priv
->gpu
, file
->driver_priv
, p
);
883 drm_show_memory_stats(p
, file
);
886 static const struct file_operations fops
= {
887 .owner
= THIS_MODULE
,
889 .show_fdinfo
= drm_show_fdinfo
,
892 static const struct drm_driver msm_driver
= {
893 .driver_features
= DRIVER_GEM
|
899 .postclose
= msm_postclose
,
900 .dumb_create
= msm_gem_dumb_create
,
901 .dumb_map_offset
= msm_gem_dumb_map_offset
,
902 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
903 #ifdef CONFIG_DEBUG_FS
904 .debugfs_init
= msm_debugfs_init
,
906 MSM_FBDEV_DRIVER_OPS
,
907 .show_fdinfo
= msm_show_fdinfo
,
908 .ioctls
= msm_ioctls
,
909 .num_ioctls
= ARRAY_SIZE(msm_ioctls
),
912 .desc
= "MSM Snapdragon DRM",
914 .major
= MSM_VERSION_MAJOR
,
915 .minor
= MSM_VERSION_MINOR
,
916 .patchlevel
= MSM_VERSION_PATCHLEVEL
,
920 * Componentized driver support:
924 * Identify what components need to be added by parsing what remote-endpoints
925 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
926 * is no external component that we need to add since LVDS is within MDP4
929 static int add_components_mdp(struct device
*master_dev
,
930 struct component_match
**matchptr
)
932 struct device_node
*np
= master_dev
->of_node
;
933 struct device_node
*ep_node
;
935 for_each_endpoint_of_node(np
, ep_node
) {
936 struct device_node
*intf
;
937 struct of_endpoint ep
;
940 ret
= of_graph_parse_endpoint(ep_node
, &ep
);
942 DRM_DEV_ERROR(master_dev
, "unable to parse port endpoint\n");
943 of_node_put(ep_node
);
948 * The LCDC/LVDS port on MDP4 is a speacial case where the
949 * remote-endpoint isn't a component that we need to add
951 if (of_device_is_compatible(np
, "qcom,mdp4") &&
956 * It's okay if some of the ports don't have a remote endpoint
957 * specified. It just means that the port isn't connected to
958 * any external interface.
960 intf
= of_graph_get_remote_port_parent(ep_node
);
964 if (of_device_is_available(intf
))
965 drm_of_component_match_add(master_dev
, matchptr
,
966 component_compare_of
, intf
);
974 #if !IS_REACHABLE(CONFIG_DRM_MSM_MDP5) || !IS_REACHABLE(CONFIG_DRM_MSM_DPU)
975 bool msm_disp_drv_should_bind(struct device
*dev
, bool dpu_driver
)
977 /* If just a single driver is enabled, use it no matter what */
982 static bool prefer_mdp5
= true;
983 MODULE_PARM_DESC(prefer_mdp5
, "Select whether MDP5 or DPU driver should be preferred");
984 module_param(prefer_mdp5
, bool, 0444);
986 /* list all platforms supported by both mdp5 and dpu drivers */
987 static const char *const msm_mdp5_dpu_migration
[] = {
997 bool msm_disp_drv_should_bind(struct device
*dev
, bool dpu_driver
)
999 /* If it is not an MDP5 device, do not try MDP5 driver */
1000 if (!of_device_is_compatible(dev
->of_node
, "qcom,mdp5"))
1003 /* If it is not in the migration list, use MDP5 */
1004 if (!of_device_compatible_match(dev
->of_node
, msm_mdp5_dpu_migration
))
1007 return prefer_mdp5
? !dpu_driver
: dpu_driver
;
1012 * We don't know what's the best binding to link the gpu with the drm device.
1013 * Fow now, we just hunt for all the possible gpus that we support, and add them
1016 static const struct of_device_id msm_gpu_match
[] = {
1017 { .compatible
= "qcom,adreno" },
1018 { .compatible
= "qcom,adreno-3xx" },
1019 { .compatible
= "amd,imageon" },
1020 { .compatible
= "qcom,kgsl-3d0" },
1024 static int add_gpu_components(struct device
*dev
,
1025 struct component_match
**matchptr
)
1027 struct device_node
*np
;
1029 np
= of_find_matching_node(NULL
, msm_gpu_match
);
1033 if (of_device_is_available(np
))
1034 drm_of_component_match_add(dev
, matchptr
, component_compare_of
, np
);
1041 static int msm_drm_bind(struct device
*dev
)
1043 return msm_drm_init(dev
, &msm_driver
);
1046 static void msm_drm_unbind(struct device
*dev
)
1048 msm_drm_uninit(dev
);
1051 const struct component_master_ops msm_drm_ops
= {
1052 .bind
= msm_drm_bind
,
1053 .unbind
= msm_drm_unbind
,
1056 int msm_drv_probe(struct device
*master_dev
,
1057 int (*kms_init
)(struct drm_device
*dev
),
1058 struct msm_kms
*kms
)
1060 struct msm_drm_private
*priv
;
1061 struct component_match
*match
= NULL
;
1064 priv
= devm_kzalloc(master_dev
, sizeof(*priv
), GFP_KERNEL
);
1069 priv
->kms_init
= kms_init
;
1070 dev_set_drvdata(master_dev
, priv
);
1072 /* Add mdp components if we have KMS. */
1074 ret
= add_components_mdp(master_dev
, &match
);
1079 ret
= add_gpu_components(master_dev
, &match
);
1083 /* on all devices that I am aware of, iommu's which can map
1084 * any address the cpu can see are used:
1086 ret
= dma_set_mask_and_coherent(master_dev
, ~0);
1090 ret
= component_master_add_with_match(master_dev
, &msm_drm_ops
, match
);
1099 * Used only for headlesss GPU instances
1102 static int msm_pdev_probe(struct platform_device
*pdev
)
1104 return msm_drv_probe(&pdev
->dev
, NULL
, NULL
);
1107 static void msm_pdev_remove(struct platform_device
*pdev
)
1109 component_master_del(&pdev
->dev
, &msm_drm_ops
);
1112 static struct platform_driver msm_platform_driver
= {
1113 .probe
= msm_pdev_probe
,
1114 .remove
= msm_pdev_remove
,
1120 static int __init
msm_drm_register(void)
1129 msm_hdmi_register();
1132 msm_mdp4_register();
1133 msm_mdss_register();
1134 return platform_driver_register(&msm_platform_driver
);
1137 static void __exit
msm_drm_unregister(void)
1140 platform_driver_unregister(&msm_platform_driver
);
1141 msm_mdss_unregister();
1142 msm_mdp4_unregister();
1143 msm_dp_unregister();
1144 msm_hdmi_unregister();
1145 adreno_unregister();
1146 msm_dsi_unregister();
1147 msm_mdp_unregister();
1148 msm_dpu_unregister();
1151 module_init(msm_drm_register
);
1152 module_exit(msm_drm_unregister
);
1154 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1155 MODULE_DESCRIPTION("MSM DRM Driver");
1156 MODULE_LICENSE("GPL");