2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
22 static void msm_fb_output_poll_changed(struct drm_device
*dev
)
24 #ifdef CONFIG_DRM_MSM_FBDEV
25 struct msm_drm_private
*priv
= dev
->dev_private
;
27 drm_fb_helper_hotplug_event(priv
->fbdev
);
31 static const struct drm_mode_config_funcs mode_config_funcs
= {
32 .fb_create
= msm_framebuffer_create
,
33 .output_poll_changed
= msm_fb_output_poll_changed
,
34 .atomic_check
= msm_atomic_check
,
35 .atomic_commit
= msm_atomic_commit
,
38 int msm_register_mmu(struct drm_device
*dev
, struct msm_mmu
*mmu
)
40 struct msm_drm_private
*priv
= dev
->dev_private
;
41 int idx
= priv
->num_mmus
++;
43 if (WARN_ON(idx
>= ARRAY_SIZE(priv
->mmus
)))
46 priv
->mmus
[idx
] = mmu
;
51 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
52 static bool reglog
= false;
53 MODULE_PARM_DESC(reglog
, "Enable register read/write logging");
54 module_param(reglog
, bool, 0600);
59 #ifdef CONFIG_DRM_MSM_FBDEV
60 static bool fbdev
= true;
61 MODULE_PARM_DESC(fbdev
, "Enable fbdev compat layer");
62 module_param(fbdev
, bool, 0600);
65 static char *vram
= "16m";
66 MODULE_PARM_DESC(vram
, "Configure VRAM size (for devices without IOMMU/GPUMMU");
67 module_param(vram
, charp
, 0);
73 void __iomem
*msm_ioremap(struct platform_device
*pdev
, const char *name
,
81 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
83 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
86 dev_err(&pdev
->dev
, "failed to get memory resource: %s\n", name
);
87 return ERR_PTR(-EINVAL
);
90 size
= resource_size(res
);
92 ptr
= devm_ioremap_nocache(&pdev
->dev
, res
->start
, size
);
94 dev_err(&pdev
->dev
, "failed to ioremap: %s\n", name
);
95 return ERR_PTR(-ENOMEM
);
99 printk(KERN_DEBUG
"IO:region %s %p %08lx\n", dbgname
, ptr
, size
);
104 void msm_writel(u32 data
, void __iomem
*addr
)
107 printk(KERN_DEBUG
"IO:W %p %08x\n", addr
, data
);
111 u32
msm_readl(const void __iomem
*addr
)
113 u32 val
= readl(addr
);
115 printk(KERN_ERR
"IO:R %p %08x\n", addr
, val
);
123 static int msm_unload(struct drm_device
*dev
)
125 struct msm_drm_private
*priv
= dev
->dev_private
;
126 struct msm_kms
*kms
= priv
->kms
;
127 struct msm_gpu
*gpu
= priv
->gpu
;
129 drm_kms_helper_poll_fini(dev
);
130 drm_mode_config_cleanup(dev
);
131 drm_vblank_cleanup(dev
);
133 pm_runtime_get_sync(dev
->dev
);
134 drm_irq_uninstall(dev
);
135 pm_runtime_put_sync(dev
->dev
);
137 flush_workqueue(priv
->wq
);
138 destroy_workqueue(priv
->wq
);
141 pm_runtime_disable(dev
->dev
);
142 kms
->funcs
->destroy(kms
);
146 mutex_lock(&dev
->struct_mutex
);
147 gpu
->funcs
->pm_suspend(gpu
);
148 mutex_unlock(&dev
->struct_mutex
);
149 gpu
->funcs
->destroy(gpu
);
152 if (priv
->vram
.paddr
) {
153 DEFINE_DMA_ATTRS(attrs
);
154 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &attrs
);
155 drm_mm_takedown(&priv
->vram
.mm
);
156 dma_free_attrs(dev
->dev
, priv
->vram
.size
, NULL
,
157 priv
->vram
.paddr
, &attrs
);
160 component_unbind_all(dev
->dev
, dev
);
162 dev
->dev_private
= NULL
;
169 static int get_mdp_ver(struct platform_device
*pdev
)
172 static const struct of_device_id match_types
[] = { {
173 .compatible
= "qcom,mdss_mdp",
178 struct device
*dev
= &pdev
->dev
;
179 const struct of_device_id
*match
;
180 match
= of_match_node(match_types
, dev
->of_node
);
182 return (int)(unsigned long)match
->data
;
187 #include <linux/of_address.h>
189 static int msm_init_vram(struct drm_device
*dev
)
191 struct msm_drm_private
*priv
= dev
->dev_private
;
192 unsigned long size
= 0;
196 /* In the device-tree world, we could have a 'memory-region'
197 * phandle, which gives us a link to our "vram". Allocating
198 * is all nicely abstracted behind the dma api, but we need
199 * to know the entire size to allocate it all in one go. There
201 * 1) device with no IOMMU, in which case we need exclusive
202 * access to a VRAM carveout big enough for all gpu
204 * 2) device with IOMMU, but where the bootloader puts up
205 * a splash screen. In this case, the VRAM carveout
206 * need only be large enough for fbdev fb. But we need
207 * exclusive access to the buffer to avoid the kernel
208 * using those pages for other purposes (which appears
209 * as corruption on screen before we have a chance to
210 * load and do initial modeset)
212 struct device_node
*node
;
214 node
= of_parse_phandle(dev
->dev
->of_node
, "memory-region", 0);
217 ret
= of_address_to_resource(node
, 0, &r
);
220 size
= r
.end
- r
.start
;
221 DRM_INFO("using VRAM carveout: %lx@%pa\n", size
, &r
.start
);
225 /* if we have no IOMMU, then we need to use carveout allocator.
226 * Grab the entire CMA chunk carved out in early startup in
229 if (!iommu_present(&platform_bus_type
)) {
230 DRM_INFO("using %s VRAM carveout\n", vram
);
231 size
= memparse(vram
, NULL
);
235 DEFINE_DMA_ATTRS(attrs
);
238 priv
->vram
.size
= size
;
240 drm_mm_init(&priv
->vram
.mm
, 0, (size
>> PAGE_SHIFT
) - 1);
242 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &attrs
);
243 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
245 /* note that for no-kernel-mapping, the vaddr returned
246 * is bogus, but non-null if allocation succeeded:
248 p
= dma_alloc_attrs(dev
->dev
, size
,
249 &priv
->vram
.paddr
, GFP_KERNEL
, &attrs
);
251 dev_err(dev
->dev
, "failed to allocate VRAM\n");
252 priv
->vram
.paddr
= 0;
256 dev_info(dev
->dev
, "VRAM: %08x->%08x\n",
257 (uint32_t)priv
->vram
.paddr
,
258 (uint32_t)(priv
->vram
.paddr
+ size
));
264 static int msm_load(struct drm_device
*dev
, unsigned long flags
)
266 struct platform_device
*pdev
= dev
->platformdev
;
267 struct msm_drm_private
*priv
;
271 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
273 dev_err(dev
->dev
, "failed to allocate private data\n");
277 dev
->dev_private
= priv
;
279 priv
->wq
= alloc_ordered_workqueue("msm", 0);
280 init_waitqueue_head(&priv
->fence_event
);
281 init_waitqueue_head(&priv
->pending_crtcs_event
);
283 INIT_LIST_HEAD(&priv
->inactive_list
);
284 INIT_LIST_HEAD(&priv
->fence_cbs
);
286 drm_mode_config_init(dev
);
288 platform_set_drvdata(pdev
, dev
);
290 /* Bind all our sub-components: */
291 ret
= component_bind_all(dev
->dev
, dev
);
295 ret
= msm_init_vram(dev
);
299 switch (get_mdp_ver(pdev
)) {
301 kms
= mdp4_kms_init(dev
);
304 kms
= mdp5_kms_init(dev
);
307 kms
= ERR_PTR(-ENODEV
);
313 * NOTE: once we have GPU support, having no kms should not
314 * be considered fatal.. ideally we would still support gpu
315 * and (for example) use dmabuf/prime to share buffers with
316 * imx drm driver on iMX5
318 dev_err(dev
->dev
, "failed to load kms\n");
326 pm_runtime_enable(dev
->dev
);
327 ret
= kms
->funcs
->hw_init(kms
);
329 dev_err(dev
->dev
, "kms hw init failed: %d\n", ret
);
334 dev
->mode_config
.min_width
= 0;
335 dev
->mode_config
.min_height
= 0;
336 dev
->mode_config
.max_width
= 2048;
337 dev
->mode_config
.max_height
= 2048;
338 dev
->mode_config
.funcs
= &mode_config_funcs
;
340 ret
= drm_vblank_init(dev
, priv
->num_crtcs
);
342 dev_err(dev
->dev
, "failed to initialize vblank\n");
346 pm_runtime_get_sync(dev
->dev
);
347 ret
= drm_irq_install(dev
, platform_get_irq(dev
->platformdev
, 0));
348 pm_runtime_put_sync(dev
->dev
);
350 dev_err(dev
->dev
, "failed to install IRQ handler\n");
354 drm_mode_config_reset(dev
);
356 #ifdef CONFIG_DRM_MSM_FBDEV
358 priv
->fbdev
= msm_fbdev_init(dev
);
361 ret
= msm_debugfs_late_init(dev
);
365 drm_kms_helper_poll_init(dev
);
374 static void load_gpu(struct drm_device
*dev
)
376 static DEFINE_MUTEX(init_lock
);
377 struct msm_drm_private
*priv
= dev
->dev_private
;
379 mutex_lock(&init_lock
);
382 priv
->gpu
= adreno_load_gpu(dev
);
384 mutex_unlock(&init_lock
);
387 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
389 struct msm_file_private
*ctx
;
391 /* For now, load gpu on open.. to avoid the requirement of having
392 * firmware in the initrd.
396 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
400 file
->driver_priv
= ctx
;
405 static void msm_preclose(struct drm_device
*dev
, struct drm_file
*file
)
407 struct msm_drm_private
*priv
= dev
->dev_private
;
408 struct msm_file_private
*ctx
= file
->driver_priv
;
409 struct msm_kms
*kms
= priv
->kms
;
412 kms
->funcs
->preclose(kms
, file
);
414 mutex_lock(&dev
->struct_mutex
);
415 if (ctx
== priv
->lastctx
)
416 priv
->lastctx
= NULL
;
417 mutex_unlock(&dev
->struct_mutex
);
422 static void msm_lastclose(struct drm_device
*dev
)
424 #ifdef CONFIG_DRM_MSM_FBDEV
425 struct msm_drm_private
*priv
= dev
->dev_private
;
427 drm_fb_helper_restore_fbdev_mode_unlocked(priv
->fbdev
);
431 static irqreturn_t
msm_irq(int irq
, void *arg
)
433 struct drm_device
*dev
= arg
;
434 struct msm_drm_private
*priv
= dev
->dev_private
;
435 struct msm_kms
*kms
= priv
->kms
;
437 return kms
->funcs
->irq(kms
);
440 static void msm_irq_preinstall(struct drm_device
*dev
)
442 struct msm_drm_private
*priv
= dev
->dev_private
;
443 struct msm_kms
*kms
= priv
->kms
;
445 kms
->funcs
->irq_preinstall(kms
);
448 static int msm_irq_postinstall(struct drm_device
*dev
)
450 struct msm_drm_private
*priv
= dev
->dev_private
;
451 struct msm_kms
*kms
= priv
->kms
;
453 return kms
->funcs
->irq_postinstall(kms
);
456 static void msm_irq_uninstall(struct drm_device
*dev
)
458 struct msm_drm_private
*priv
= dev
->dev_private
;
459 struct msm_kms
*kms
= priv
->kms
;
461 kms
->funcs
->irq_uninstall(kms
);
464 static int msm_enable_vblank(struct drm_device
*dev
, int crtc_id
)
466 struct msm_drm_private
*priv
= dev
->dev_private
;
467 struct msm_kms
*kms
= priv
->kms
;
470 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
471 return kms
->funcs
->enable_vblank(kms
, priv
->crtcs
[crtc_id
]);
474 static void msm_disable_vblank(struct drm_device
*dev
, int crtc_id
)
476 struct msm_drm_private
*priv
= dev
->dev_private
;
477 struct msm_kms
*kms
= priv
->kms
;
480 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
481 kms
->funcs
->disable_vblank(kms
, priv
->crtcs
[crtc_id
]);
488 #ifdef CONFIG_DEBUG_FS
489 static int msm_gpu_show(struct drm_device
*dev
, struct seq_file
*m
)
491 struct msm_drm_private
*priv
= dev
->dev_private
;
492 struct msm_gpu
*gpu
= priv
->gpu
;
495 seq_printf(m
, "%s Status:\n", gpu
->name
);
496 gpu
->funcs
->show(gpu
, m
);
502 static int msm_gem_show(struct drm_device
*dev
, struct seq_file
*m
)
504 struct msm_drm_private
*priv
= dev
->dev_private
;
505 struct msm_gpu
*gpu
= priv
->gpu
;
508 seq_printf(m
, "Active Objects (%s):\n", gpu
->name
);
509 msm_gem_describe_objects(&gpu
->active_list
, m
);
512 seq_printf(m
, "Inactive Objects:\n");
513 msm_gem_describe_objects(&priv
->inactive_list
, m
);
518 static int msm_mm_show(struct drm_device
*dev
, struct seq_file
*m
)
520 return drm_mm_dump_table(m
, &dev
->vma_offset_manager
->vm_addr_space_mm
);
523 static int msm_fb_show(struct drm_device
*dev
, struct seq_file
*m
)
525 struct msm_drm_private
*priv
= dev
->dev_private
;
526 struct drm_framebuffer
*fb
, *fbdev_fb
= NULL
;
529 seq_printf(m
, "fbcon ");
530 fbdev_fb
= priv
->fbdev
->fb
;
531 msm_framebuffer_describe(fbdev_fb
, m
);
534 mutex_lock(&dev
->mode_config
.fb_lock
);
535 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, head
) {
539 seq_printf(m
, "user ");
540 msm_framebuffer_describe(fb
, m
);
542 mutex_unlock(&dev
->mode_config
.fb_lock
);
547 static int show_locked(struct seq_file
*m
, void *arg
)
549 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
550 struct drm_device
*dev
= node
->minor
->dev
;
551 int (*show
)(struct drm_device
*dev
, struct seq_file
*m
) =
552 node
->info_ent
->data
;
555 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
561 mutex_unlock(&dev
->struct_mutex
);
566 static struct drm_info_list msm_debugfs_list
[] = {
567 {"gpu", show_locked
, 0, msm_gpu_show
},
568 {"gem", show_locked
, 0, msm_gem_show
},
569 { "mm", show_locked
, 0, msm_mm_show
},
570 { "fb", show_locked
, 0, msm_fb_show
},
573 static int late_init_minor(struct drm_minor
*minor
)
580 ret
= msm_rd_debugfs_init(minor
);
582 dev_err(minor
->dev
->dev
, "could not install rd debugfs\n");
586 ret
= msm_perf_debugfs_init(minor
);
588 dev_err(minor
->dev
->dev
, "could not install perf debugfs\n");
595 int msm_debugfs_late_init(struct drm_device
*dev
)
598 ret
= late_init_minor(dev
->primary
);
601 ret
= late_init_minor(dev
->render
);
604 ret
= late_init_minor(dev
->control
);
608 static int msm_debugfs_init(struct drm_minor
*minor
)
610 struct drm_device
*dev
= minor
->dev
;
613 ret
= drm_debugfs_create_files(msm_debugfs_list
,
614 ARRAY_SIZE(msm_debugfs_list
),
615 minor
->debugfs_root
, minor
);
618 dev_err(dev
->dev
, "could not install msm_debugfs_list\n");
625 static void msm_debugfs_cleanup(struct drm_minor
*minor
)
627 drm_debugfs_remove_files(msm_debugfs_list
,
628 ARRAY_SIZE(msm_debugfs_list
), minor
);
629 if (!minor
->dev
->dev_private
)
631 msm_rd_debugfs_cleanup(minor
);
632 msm_perf_debugfs_cleanup(minor
);
640 int msm_wait_fence(struct drm_device
*dev
, uint32_t fence
,
641 ktime_t
*timeout
, bool interruptible
)
643 struct msm_drm_private
*priv
= dev
->dev_private
;
649 if (fence
> priv
->gpu
->submitted_fence
) {
650 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
651 fence
, priv
->gpu
->submitted_fence
);
657 ret
= fence_completed(dev
, fence
) ? 0 : -EBUSY
;
659 ktime_t now
= ktime_get();
660 unsigned long remaining_jiffies
;
662 if (ktime_compare(*timeout
, now
) < 0) {
663 remaining_jiffies
= 0;
665 ktime_t rem
= ktime_sub(*timeout
, now
);
666 struct timespec ts
= ktime_to_timespec(rem
);
667 remaining_jiffies
= timespec_to_jiffies(&ts
);
671 ret
= wait_event_interruptible_timeout(priv
->fence_event
,
672 fence_completed(dev
, fence
),
675 ret
= wait_event_timeout(priv
->fence_event
,
676 fence_completed(dev
, fence
),
680 DBG("timeout waiting for fence: %u (completed: %u)",
681 fence
, priv
->completed_fence
);
683 } else if (ret
!= -ERESTARTSYS
) {
691 int msm_queue_fence_cb(struct drm_device
*dev
,
692 struct msm_fence_cb
*cb
, uint32_t fence
)
694 struct msm_drm_private
*priv
= dev
->dev_private
;
697 mutex_lock(&dev
->struct_mutex
);
698 if (!list_empty(&cb
->work
.entry
)) {
700 } else if (fence
> priv
->completed_fence
) {
702 list_add_tail(&cb
->work
.entry
, &priv
->fence_cbs
);
704 queue_work(priv
->wq
, &cb
->work
);
706 mutex_unlock(&dev
->struct_mutex
);
711 /* called from workqueue */
712 void msm_update_fence(struct drm_device
*dev
, uint32_t fence
)
714 struct msm_drm_private
*priv
= dev
->dev_private
;
716 mutex_lock(&dev
->struct_mutex
);
717 priv
->completed_fence
= max(fence
, priv
->completed_fence
);
719 while (!list_empty(&priv
->fence_cbs
)) {
720 struct msm_fence_cb
*cb
;
722 cb
= list_first_entry(&priv
->fence_cbs
,
723 struct msm_fence_cb
, work
.entry
);
725 if (cb
->fence
> priv
->completed_fence
)
728 list_del_init(&cb
->work
.entry
);
729 queue_work(priv
->wq
, &cb
->work
);
732 mutex_unlock(&dev
->struct_mutex
);
734 wake_up_all(&priv
->fence_event
);
737 void __msm_fence_worker(struct work_struct
*work
)
739 struct msm_fence_cb
*cb
= container_of(work
, struct msm_fence_cb
, work
);
747 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
748 struct drm_file
*file
)
750 struct msm_drm_private
*priv
= dev
->dev_private
;
751 struct drm_msm_param
*args
= data
;
754 /* for now, we just have 3d pipe.. eventually this would need to
755 * be more clever to dispatch to appropriate gpu module:
757 if (args
->pipe
!= MSM_PIPE_3D0
)
765 return gpu
->funcs
->get_param(gpu
, args
->param
, &args
->value
);
768 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
769 struct drm_file
*file
)
771 struct drm_msm_gem_new
*args
= data
;
773 if (args
->flags
& ~MSM_BO_FLAGS
) {
774 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
778 return msm_gem_new_handle(dev
, file
, args
->size
,
779 args
->flags
, &args
->handle
);
782 static inline ktime_t
to_ktime(struct drm_msm_timespec timeout
)
784 return ktime_set(timeout
.tv_sec
, timeout
.tv_nsec
);
787 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
788 struct drm_file
*file
)
790 struct drm_msm_gem_cpu_prep
*args
= data
;
791 struct drm_gem_object
*obj
;
792 ktime_t timeout
= to_ktime(args
->timeout
);
795 if (args
->op
& ~MSM_PREP_FLAGS
) {
796 DRM_ERROR("invalid op: %08x\n", args
->op
);
800 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
804 ret
= msm_gem_cpu_prep(obj
, args
->op
, &timeout
);
806 drm_gem_object_unreference_unlocked(obj
);
811 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
812 struct drm_file
*file
)
814 struct drm_msm_gem_cpu_fini
*args
= data
;
815 struct drm_gem_object
*obj
;
818 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
822 ret
= msm_gem_cpu_fini(obj
);
824 drm_gem_object_unreference_unlocked(obj
);
829 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
830 struct drm_file
*file
)
832 struct drm_msm_gem_info
*args
= data
;
833 struct drm_gem_object
*obj
;
839 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
843 args
->offset
= msm_gem_mmap_offset(obj
);
845 drm_gem_object_unreference_unlocked(obj
);
850 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
851 struct drm_file
*file
)
853 struct drm_msm_wait_fence
*args
= data
;
854 ktime_t timeout
= to_ktime(args
->timeout
);
857 DRM_ERROR("invalid pad: %08x\n", args
->pad
);
861 return msm_wait_fence(dev
, args
->fence
, &timeout
, true);
864 static const struct drm_ioctl_desc msm_ioctls
[] = {
865 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
866 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
867 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
868 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
869 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
870 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
871 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
874 static const struct vm_operations_struct vm_ops
= {
875 .fault
= msm_gem_fault
,
876 .open
= drm_gem_vm_open
,
877 .close
= drm_gem_vm_close
,
880 static const struct file_operations fops
= {
881 .owner
= THIS_MODULE
,
883 .release
= drm_release
,
884 .unlocked_ioctl
= drm_ioctl
,
886 .compat_ioctl
= drm_compat_ioctl
,
891 .mmap
= msm_gem_mmap
,
894 static struct drm_driver msm_driver
= {
895 .driver_features
= DRIVER_HAVE_IRQ
|
902 .unload
= msm_unload
,
904 .preclose
= msm_preclose
,
905 .lastclose
= msm_lastclose
,
906 .set_busid
= drm_platform_set_busid
,
907 .irq_handler
= msm_irq
,
908 .irq_preinstall
= msm_irq_preinstall
,
909 .irq_postinstall
= msm_irq_postinstall
,
910 .irq_uninstall
= msm_irq_uninstall
,
911 .get_vblank_counter
= drm_vblank_count
,
912 .enable_vblank
= msm_enable_vblank
,
913 .disable_vblank
= msm_disable_vblank
,
914 .gem_free_object
= msm_gem_free_object
,
915 .gem_vm_ops
= &vm_ops
,
916 .dumb_create
= msm_gem_dumb_create
,
917 .dumb_map_offset
= msm_gem_dumb_map_offset
,
918 .dumb_destroy
= drm_gem_dumb_destroy
,
919 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
920 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
921 .gem_prime_export
= drm_gem_prime_export
,
922 .gem_prime_import
= drm_gem_prime_import
,
923 .gem_prime_pin
= msm_gem_prime_pin
,
924 .gem_prime_unpin
= msm_gem_prime_unpin
,
925 .gem_prime_get_sg_table
= msm_gem_prime_get_sg_table
,
926 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
927 .gem_prime_vmap
= msm_gem_prime_vmap
,
928 .gem_prime_vunmap
= msm_gem_prime_vunmap
,
929 .gem_prime_mmap
= msm_gem_prime_mmap
,
930 #ifdef CONFIG_DEBUG_FS
931 .debugfs_init
= msm_debugfs_init
,
932 .debugfs_cleanup
= msm_debugfs_cleanup
,
934 .ioctls
= msm_ioctls
,
935 .num_ioctls
= DRM_MSM_NUM_IOCTLS
,
938 .desc
= "MSM Snapdragon DRM",
944 #ifdef CONFIG_PM_SLEEP
945 static int msm_pm_suspend(struct device
*dev
)
947 struct drm_device
*ddev
= dev_get_drvdata(dev
);
949 drm_kms_helper_poll_disable(ddev
);
954 static int msm_pm_resume(struct device
*dev
)
956 struct drm_device
*ddev
= dev_get_drvdata(dev
);
958 drm_kms_helper_poll_enable(ddev
);
964 static const struct dev_pm_ops msm_pm_ops
= {
965 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend
, msm_pm_resume
)
969 * Componentized driver support:
973 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
974 * (or probably any other).. so probably some room for some helpers
976 static int compare_of(struct device
*dev
, void *data
)
978 return dev
->of_node
== data
;
981 static int add_components(struct device
*dev
, struct component_match
**matchptr
,
984 struct device_node
*np
= dev
->of_node
;
988 struct device_node
*node
;
990 node
= of_parse_phandle(np
, name
, i
);
994 component_match_add(dev
, matchptr
, compare_of
, node
);
1000 static int compare_dev(struct device
*dev
, void *data
)
1006 static int msm_drm_bind(struct device
*dev
)
1008 return drm_platform_init(&msm_driver
, to_platform_device(dev
));
1011 static void msm_drm_unbind(struct device
*dev
)
1013 drm_put_dev(platform_get_drvdata(to_platform_device(dev
)));
1016 static const struct component_master_ops msm_drm_ops
= {
1017 .bind
= msm_drm_bind
,
1018 .unbind
= msm_drm_unbind
,
1025 static int msm_pdev_probe(struct platform_device
*pdev
)
1027 struct component_match
*match
= NULL
;
1029 add_components(&pdev
->dev
, &match
, "connectors");
1030 add_components(&pdev
->dev
, &match
, "gpus");
1032 /* For non-DT case, it kinda sucks. We don't actually have a way
1033 * to know whether or not we are waiting for certain devices (or if
1034 * they are simply not present). But for non-DT we only need to
1035 * care about apq8064/apq8060/etc (all mdp4/a3xx):
1037 static const char *devnames
[] = {
1038 "hdmi_msm.0", "kgsl-3d0.0",
1042 DBG("Adding components..");
1044 for (i
= 0; i
< ARRAY_SIZE(devnames
); i
++) {
1047 dev
= bus_find_device_by_name(&platform_bus_type
,
1050 dev_info(&pdev
->dev
, "still waiting for %s\n", devnames
[i
]);
1051 return -EPROBE_DEFER
;
1054 component_match_add(&pdev
->dev
, &match
, compare_dev
, dev
);
1058 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
1059 return component_master_add_with_match(&pdev
->dev
, &msm_drm_ops
, match
);
1062 static int msm_pdev_remove(struct platform_device
*pdev
)
1064 component_master_del(&pdev
->dev
, &msm_drm_ops
);
1069 static const struct platform_device_id msm_id
[] = {
1074 static const struct of_device_id dt_match
[] = {
1075 { .compatible
= "qcom,mdp" }, /* mdp4 */
1076 { .compatible
= "qcom,mdss_mdp" }, /* mdp5 */
1079 MODULE_DEVICE_TABLE(of
, dt_match
);
1081 static struct platform_driver msm_platform_driver
= {
1082 .probe
= msm_pdev_probe
,
1083 .remove
= msm_pdev_remove
,
1086 .of_match_table
= dt_match
,
1092 static int __init
msm_drm_register(void)
1099 return platform_driver_register(&msm_platform_driver
);
1102 static void __exit
msm_drm_unregister(void)
1105 platform_driver_unregister(&msm_platform_driver
);
1107 adreno_unregister();
1108 msm_edp_unregister();
1109 msm_dsi_unregister();
1112 module_init(msm_drm_register
);
1113 module_exit(msm_drm_unregister
);
1115 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1116 MODULE_DESCRIPTION("MSM DRM Driver");
1117 MODULE_LICENSE("GPL");