2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
22 static void msm_fb_output_poll_changed(struct drm_device
*dev
)
24 struct msm_drm_private
*priv
= dev
->dev_private
;
26 drm_fb_helper_hotplug_event(priv
->fbdev
);
29 static const struct drm_mode_config_funcs mode_config_funcs
= {
30 .fb_create
= msm_framebuffer_create
,
31 .output_poll_changed
= msm_fb_output_poll_changed
,
32 .atomic_check
= msm_atomic_check
,
33 .atomic_commit
= msm_atomic_commit
,
36 int msm_register_mmu(struct drm_device
*dev
, struct msm_mmu
*mmu
)
38 struct msm_drm_private
*priv
= dev
->dev_private
;
39 int idx
= priv
->num_mmus
++;
41 if (WARN_ON(idx
>= ARRAY_SIZE(priv
->mmus
)))
44 priv
->mmus
[idx
] = mmu
;
49 #ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
50 static bool reglog
= false;
51 MODULE_PARM_DESC(reglog
, "Enable register read/write logging");
52 module_param(reglog
, bool, 0600);
57 #ifdef CONFIG_DRM_MSM_FBDEV
58 static bool fbdev
= true;
59 MODULE_PARM_DESC(fbdev
, "Enable fbdev compat layer");
60 module_param(fbdev
, bool, 0600);
63 static char *vram
= "16m";
64 MODULE_PARM_DESC(vram
, "Configure VRAM size (for devices without IOMMU/GPUMMU");
65 module_param(vram
, charp
, 0);
71 void __iomem
*msm_ioremap(struct platform_device
*pdev
, const char *name
,
79 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
81 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
84 dev_err(&pdev
->dev
, "failed to get memory resource: %s\n", name
);
85 return ERR_PTR(-EINVAL
);
88 size
= resource_size(res
);
90 ptr
= devm_ioremap_nocache(&pdev
->dev
, res
->start
, size
);
92 dev_err(&pdev
->dev
, "failed to ioremap: %s\n", name
);
93 return ERR_PTR(-ENOMEM
);
97 printk(KERN_DEBUG
"IO:region %s %08x %08lx\n", dbgname
, (u32
)ptr
, size
);
102 void msm_writel(u32 data
, void __iomem
*addr
)
105 printk(KERN_DEBUG
"IO:W %08x %08x\n", (u32
)addr
, data
);
109 u32
msm_readl(const void __iomem
*addr
)
111 u32 val
= readl(addr
);
113 printk(KERN_ERR
"IO:R %08x %08x\n", (u32
)addr
, val
);
121 static int msm_unload(struct drm_device
*dev
)
123 struct msm_drm_private
*priv
= dev
->dev_private
;
124 struct msm_kms
*kms
= priv
->kms
;
125 struct msm_gpu
*gpu
= priv
->gpu
;
127 drm_kms_helper_poll_fini(dev
);
128 drm_mode_config_cleanup(dev
);
129 drm_vblank_cleanup(dev
);
131 pm_runtime_get_sync(dev
->dev
);
132 drm_irq_uninstall(dev
);
133 pm_runtime_put_sync(dev
->dev
);
135 flush_workqueue(priv
->wq
);
136 destroy_workqueue(priv
->wq
);
139 pm_runtime_disable(dev
->dev
);
140 kms
->funcs
->destroy(kms
);
144 mutex_lock(&dev
->struct_mutex
);
145 gpu
->funcs
->pm_suspend(gpu
);
146 gpu
->funcs
->destroy(gpu
);
147 mutex_unlock(&dev
->struct_mutex
);
150 if (priv
->vram
.paddr
) {
151 DEFINE_DMA_ATTRS(attrs
);
152 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &attrs
);
153 drm_mm_takedown(&priv
->vram
.mm
);
154 dma_free_attrs(dev
->dev
, priv
->vram
.size
, NULL
,
155 priv
->vram
.paddr
, &attrs
);
158 component_unbind_all(dev
->dev
, dev
);
160 dev
->dev_private
= NULL
;
167 static int get_mdp_ver(struct platform_device
*pdev
)
170 static const struct of_device_id match_types
[] = { {
171 .compatible
= "qcom,mdss_mdp",
176 struct device
*dev
= &pdev
->dev
;
177 const struct of_device_id
*match
;
178 match
= of_match_node(match_types
, dev
->of_node
);
180 return (int)match
->data
;
185 static int msm_load(struct drm_device
*dev
, unsigned long flags
)
187 struct platform_device
*pdev
= dev
->platformdev
;
188 struct msm_drm_private
*priv
;
192 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
194 dev_err(dev
->dev
, "failed to allocate private data\n");
198 dev
->dev_private
= priv
;
200 priv
->wq
= alloc_ordered_workqueue("msm", 0);
201 init_waitqueue_head(&priv
->fence_event
);
202 init_waitqueue_head(&priv
->pending_crtcs_event
);
204 INIT_LIST_HEAD(&priv
->inactive_list
);
205 INIT_LIST_HEAD(&priv
->fence_cbs
);
207 drm_mode_config_init(dev
);
209 /* if we have no IOMMU, then we need to use carveout allocator.
210 * Grab the entire CMA chunk carved out in early startup in
213 if (!iommu_present(&platform_bus_type
)) {
214 DEFINE_DMA_ATTRS(attrs
);
218 DBG("using %s VRAM carveout", vram
);
219 size
= memparse(vram
, NULL
);
220 priv
->vram
.size
= size
;
222 drm_mm_init(&priv
->vram
.mm
, 0, (size
>> PAGE_SHIFT
) - 1);
224 dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING
, &attrs
);
225 dma_set_attr(DMA_ATTR_WRITE_COMBINE
, &attrs
);
227 /* note that for no-kernel-mapping, the vaddr returned
228 * is bogus, but non-null if allocation succeeded:
230 p
= dma_alloc_attrs(dev
->dev
, size
,
231 &priv
->vram
.paddr
, GFP_KERNEL
, &attrs
);
233 dev_err(dev
->dev
, "failed to allocate VRAM\n");
234 priv
->vram
.paddr
= 0;
239 dev_info(dev
->dev
, "VRAM: %08x->%08x\n",
240 (uint32_t)priv
->vram
.paddr
,
241 (uint32_t)(priv
->vram
.paddr
+ size
));
244 platform_set_drvdata(pdev
, dev
);
246 /* Bind all our sub-components: */
247 ret
= component_bind_all(dev
->dev
, dev
);
251 switch (get_mdp_ver(pdev
)) {
253 kms
= mdp4_kms_init(dev
);
256 kms
= mdp5_kms_init(dev
);
259 kms
= ERR_PTR(-ENODEV
);
265 * NOTE: once we have GPU support, having no kms should not
266 * be considered fatal.. ideally we would still support gpu
267 * and (for example) use dmabuf/prime to share buffers with
268 * imx drm driver on iMX5
270 dev_err(dev
->dev
, "failed to load kms\n");
278 pm_runtime_enable(dev
->dev
);
279 ret
= kms
->funcs
->hw_init(kms
);
281 dev_err(dev
->dev
, "kms hw init failed: %d\n", ret
);
286 dev
->mode_config
.min_width
= 0;
287 dev
->mode_config
.min_height
= 0;
288 dev
->mode_config
.max_width
= 2048;
289 dev
->mode_config
.max_height
= 2048;
290 dev
->mode_config
.funcs
= &mode_config_funcs
;
292 ret
= drm_vblank_init(dev
, priv
->num_crtcs
);
294 dev_err(dev
->dev
, "failed to initialize vblank\n");
298 pm_runtime_get_sync(dev
->dev
);
299 ret
= drm_irq_install(dev
, platform_get_irq(dev
->platformdev
, 0));
300 pm_runtime_put_sync(dev
->dev
);
302 dev_err(dev
->dev
, "failed to install IRQ handler\n");
306 drm_mode_config_reset(dev
);
308 #ifdef CONFIG_DRM_MSM_FBDEV
310 priv
->fbdev
= msm_fbdev_init(dev
);
313 ret
= msm_debugfs_late_init(dev
);
317 drm_kms_helper_poll_init(dev
);
326 static void load_gpu(struct drm_device
*dev
)
328 static DEFINE_MUTEX(init_lock
);
329 struct msm_drm_private
*priv
= dev
->dev_private
;
331 mutex_lock(&init_lock
);
334 priv
->gpu
= adreno_load_gpu(dev
);
336 mutex_unlock(&init_lock
);
339 static int msm_open(struct drm_device
*dev
, struct drm_file
*file
)
341 struct msm_file_private
*ctx
;
343 /* For now, load gpu on open.. to avoid the requirement of having
344 * firmware in the initrd.
348 ctx
= kzalloc(sizeof(*ctx
), GFP_KERNEL
);
352 file
->driver_priv
= ctx
;
357 static void msm_preclose(struct drm_device
*dev
, struct drm_file
*file
)
359 struct msm_drm_private
*priv
= dev
->dev_private
;
360 struct msm_file_private
*ctx
= file
->driver_priv
;
361 struct msm_kms
*kms
= priv
->kms
;
364 kms
->funcs
->preclose(kms
, file
);
366 mutex_lock(&dev
->struct_mutex
);
367 if (ctx
== priv
->lastctx
)
368 priv
->lastctx
= NULL
;
369 mutex_unlock(&dev
->struct_mutex
);
374 static void msm_lastclose(struct drm_device
*dev
)
376 struct msm_drm_private
*priv
= dev
->dev_private
;
378 drm_fb_helper_restore_fbdev_mode_unlocked(priv
->fbdev
);
381 static irqreturn_t
msm_irq(int irq
, void *arg
)
383 struct drm_device
*dev
= arg
;
384 struct msm_drm_private
*priv
= dev
->dev_private
;
385 struct msm_kms
*kms
= priv
->kms
;
387 return kms
->funcs
->irq(kms
);
390 static void msm_irq_preinstall(struct drm_device
*dev
)
392 struct msm_drm_private
*priv
= dev
->dev_private
;
393 struct msm_kms
*kms
= priv
->kms
;
395 kms
->funcs
->irq_preinstall(kms
);
398 static int msm_irq_postinstall(struct drm_device
*dev
)
400 struct msm_drm_private
*priv
= dev
->dev_private
;
401 struct msm_kms
*kms
= priv
->kms
;
403 return kms
->funcs
->irq_postinstall(kms
);
406 static void msm_irq_uninstall(struct drm_device
*dev
)
408 struct msm_drm_private
*priv
= dev
->dev_private
;
409 struct msm_kms
*kms
= priv
->kms
;
411 kms
->funcs
->irq_uninstall(kms
);
414 static int msm_enable_vblank(struct drm_device
*dev
, int crtc_id
)
416 struct msm_drm_private
*priv
= dev
->dev_private
;
417 struct msm_kms
*kms
= priv
->kms
;
420 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
421 return kms
->funcs
->enable_vblank(kms
, priv
->crtcs
[crtc_id
]);
424 static void msm_disable_vblank(struct drm_device
*dev
, int crtc_id
)
426 struct msm_drm_private
*priv
= dev
->dev_private
;
427 struct msm_kms
*kms
= priv
->kms
;
430 DBG("dev=%p, crtc=%d", dev
, crtc_id
);
431 kms
->funcs
->disable_vblank(kms
, priv
->crtcs
[crtc_id
]);
438 #ifdef CONFIG_DEBUG_FS
439 static int msm_gpu_show(struct drm_device
*dev
, struct seq_file
*m
)
441 struct msm_drm_private
*priv
= dev
->dev_private
;
442 struct msm_gpu
*gpu
= priv
->gpu
;
445 seq_printf(m
, "%s Status:\n", gpu
->name
);
446 gpu
->funcs
->show(gpu
, m
);
452 static int msm_gem_show(struct drm_device
*dev
, struct seq_file
*m
)
454 struct msm_drm_private
*priv
= dev
->dev_private
;
455 struct msm_gpu
*gpu
= priv
->gpu
;
458 seq_printf(m
, "Active Objects (%s):\n", gpu
->name
);
459 msm_gem_describe_objects(&gpu
->active_list
, m
);
462 seq_printf(m
, "Inactive Objects:\n");
463 msm_gem_describe_objects(&priv
->inactive_list
, m
);
468 static int msm_mm_show(struct drm_device
*dev
, struct seq_file
*m
)
470 return drm_mm_dump_table(m
, &dev
->vma_offset_manager
->vm_addr_space_mm
);
473 static int msm_fb_show(struct drm_device
*dev
, struct seq_file
*m
)
475 struct msm_drm_private
*priv
= dev
->dev_private
;
476 struct drm_framebuffer
*fb
, *fbdev_fb
= NULL
;
479 seq_printf(m
, "fbcon ");
480 fbdev_fb
= priv
->fbdev
->fb
;
481 msm_framebuffer_describe(fbdev_fb
, m
);
484 mutex_lock(&dev
->mode_config
.fb_lock
);
485 list_for_each_entry(fb
, &dev
->mode_config
.fb_list
, head
) {
489 seq_printf(m
, "user ");
490 msm_framebuffer_describe(fb
, m
);
492 mutex_unlock(&dev
->mode_config
.fb_lock
);
497 static int show_locked(struct seq_file
*m
, void *arg
)
499 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
500 struct drm_device
*dev
= node
->minor
->dev
;
501 int (*show
)(struct drm_device
*dev
, struct seq_file
*m
) =
502 node
->info_ent
->data
;
505 ret
= mutex_lock_interruptible(&dev
->struct_mutex
);
511 mutex_unlock(&dev
->struct_mutex
);
516 static struct drm_info_list msm_debugfs_list
[] = {
517 {"gpu", show_locked
, 0, msm_gpu_show
},
518 {"gem", show_locked
, 0, msm_gem_show
},
519 { "mm", show_locked
, 0, msm_mm_show
},
520 { "fb", show_locked
, 0, msm_fb_show
},
523 static int late_init_minor(struct drm_minor
*minor
)
530 ret
= msm_rd_debugfs_init(minor
);
532 dev_err(minor
->dev
->dev
, "could not install rd debugfs\n");
536 ret
= msm_perf_debugfs_init(minor
);
538 dev_err(minor
->dev
->dev
, "could not install perf debugfs\n");
545 int msm_debugfs_late_init(struct drm_device
*dev
)
548 ret
= late_init_minor(dev
->primary
);
551 ret
= late_init_minor(dev
->render
);
554 ret
= late_init_minor(dev
->control
);
558 static int msm_debugfs_init(struct drm_minor
*minor
)
560 struct drm_device
*dev
= minor
->dev
;
563 ret
= drm_debugfs_create_files(msm_debugfs_list
,
564 ARRAY_SIZE(msm_debugfs_list
),
565 minor
->debugfs_root
, minor
);
568 dev_err(dev
->dev
, "could not install msm_debugfs_list\n");
575 static void msm_debugfs_cleanup(struct drm_minor
*minor
)
577 drm_debugfs_remove_files(msm_debugfs_list
,
578 ARRAY_SIZE(msm_debugfs_list
), minor
);
579 if (!minor
->dev
->dev_private
)
581 msm_rd_debugfs_cleanup(minor
);
582 msm_perf_debugfs_cleanup(minor
);
590 int msm_wait_fence_interruptable(struct drm_device
*dev
, uint32_t fence
,
591 struct timespec
*timeout
)
593 struct msm_drm_private
*priv
= dev
->dev_private
;
599 if (fence
> priv
->gpu
->submitted_fence
) {
600 DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
601 fence
, priv
->gpu
->submitted_fence
);
607 ret
= fence_completed(dev
, fence
) ? 0 : -EBUSY
;
609 unsigned long timeout_jiffies
= timespec_to_jiffies(timeout
);
610 unsigned long start_jiffies
= jiffies
;
611 unsigned long remaining_jiffies
;
613 if (time_after(start_jiffies
, timeout_jiffies
))
614 remaining_jiffies
= 0;
616 remaining_jiffies
= timeout_jiffies
- start_jiffies
;
618 ret
= wait_event_interruptible_timeout(priv
->fence_event
,
619 fence_completed(dev
, fence
),
623 DBG("timeout waiting for fence: %u (completed: %u)",
624 fence
, priv
->completed_fence
);
626 } else if (ret
!= -ERESTARTSYS
) {
634 int msm_queue_fence_cb(struct drm_device
*dev
,
635 struct msm_fence_cb
*cb
, uint32_t fence
)
637 struct msm_drm_private
*priv
= dev
->dev_private
;
640 mutex_lock(&dev
->struct_mutex
);
641 if (!list_empty(&cb
->work
.entry
)) {
643 } else if (fence
> priv
->completed_fence
) {
645 list_add_tail(&cb
->work
.entry
, &priv
->fence_cbs
);
647 queue_work(priv
->wq
, &cb
->work
);
649 mutex_unlock(&dev
->struct_mutex
);
654 /* called from workqueue */
655 void msm_update_fence(struct drm_device
*dev
, uint32_t fence
)
657 struct msm_drm_private
*priv
= dev
->dev_private
;
659 mutex_lock(&dev
->struct_mutex
);
660 priv
->completed_fence
= max(fence
, priv
->completed_fence
);
662 while (!list_empty(&priv
->fence_cbs
)) {
663 struct msm_fence_cb
*cb
;
665 cb
= list_first_entry(&priv
->fence_cbs
,
666 struct msm_fence_cb
, work
.entry
);
668 if (cb
->fence
> priv
->completed_fence
)
671 list_del_init(&cb
->work
.entry
);
672 queue_work(priv
->wq
, &cb
->work
);
675 mutex_unlock(&dev
->struct_mutex
);
677 wake_up_all(&priv
->fence_event
);
680 void __msm_fence_worker(struct work_struct
*work
)
682 struct msm_fence_cb
*cb
= container_of(work
, struct msm_fence_cb
, work
);
690 static int msm_ioctl_get_param(struct drm_device
*dev
, void *data
,
691 struct drm_file
*file
)
693 struct msm_drm_private
*priv
= dev
->dev_private
;
694 struct drm_msm_param
*args
= data
;
697 /* for now, we just have 3d pipe.. eventually this would need to
698 * be more clever to dispatch to appropriate gpu module:
700 if (args
->pipe
!= MSM_PIPE_3D0
)
708 return gpu
->funcs
->get_param(gpu
, args
->param
, &args
->value
);
711 static int msm_ioctl_gem_new(struct drm_device
*dev
, void *data
,
712 struct drm_file
*file
)
714 struct drm_msm_gem_new
*args
= data
;
716 if (args
->flags
& ~MSM_BO_FLAGS
) {
717 DRM_ERROR("invalid flags: %08x\n", args
->flags
);
721 return msm_gem_new_handle(dev
, file
, args
->size
,
722 args
->flags
, &args
->handle
);
725 #define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
727 static int msm_ioctl_gem_cpu_prep(struct drm_device
*dev
, void *data
,
728 struct drm_file
*file
)
730 struct drm_msm_gem_cpu_prep
*args
= data
;
731 struct drm_gem_object
*obj
;
734 if (args
->op
& ~MSM_PREP_FLAGS
) {
735 DRM_ERROR("invalid op: %08x\n", args
->op
);
739 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
743 ret
= msm_gem_cpu_prep(obj
, args
->op
, &TS(args
->timeout
));
745 drm_gem_object_unreference_unlocked(obj
);
750 static int msm_ioctl_gem_cpu_fini(struct drm_device
*dev
, void *data
,
751 struct drm_file
*file
)
753 struct drm_msm_gem_cpu_fini
*args
= data
;
754 struct drm_gem_object
*obj
;
757 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
761 ret
= msm_gem_cpu_fini(obj
);
763 drm_gem_object_unreference_unlocked(obj
);
768 static int msm_ioctl_gem_info(struct drm_device
*dev
, void *data
,
769 struct drm_file
*file
)
771 struct drm_msm_gem_info
*args
= data
;
772 struct drm_gem_object
*obj
;
778 obj
= drm_gem_object_lookup(dev
, file
, args
->handle
);
782 args
->offset
= msm_gem_mmap_offset(obj
);
784 drm_gem_object_unreference_unlocked(obj
);
789 static int msm_ioctl_wait_fence(struct drm_device
*dev
, void *data
,
790 struct drm_file
*file
)
792 struct drm_msm_wait_fence
*args
= data
;
795 DRM_ERROR("invalid pad: %08x\n", args
->pad
);
799 return msm_wait_fence_interruptable(dev
, args
->fence
,
803 static const struct drm_ioctl_desc msm_ioctls
[] = {
804 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM
, msm_ioctl_get_param
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
805 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW
, msm_ioctl_gem_new
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
806 DRM_IOCTL_DEF_DRV(MSM_GEM_INFO
, msm_ioctl_gem_info
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
807 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP
, msm_ioctl_gem_cpu_prep
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
808 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI
, msm_ioctl_gem_cpu_fini
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
809 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT
, msm_ioctl_gem_submit
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
810 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE
, msm_ioctl_wait_fence
, DRM_UNLOCKED
|DRM_AUTH
|DRM_RENDER_ALLOW
),
813 static const struct vm_operations_struct vm_ops
= {
814 .fault
= msm_gem_fault
,
815 .open
= drm_gem_vm_open
,
816 .close
= drm_gem_vm_close
,
819 static const struct file_operations fops
= {
820 .owner
= THIS_MODULE
,
822 .release
= drm_release
,
823 .unlocked_ioctl
= drm_ioctl
,
825 .compat_ioctl
= drm_compat_ioctl
,
830 .mmap
= msm_gem_mmap
,
833 static struct drm_driver msm_driver
= {
834 .driver_features
= DRIVER_HAVE_IRQ
|
840 .unload
= msm_unload
,
842 .preclose
= msm_preclose
,
843 .lastclose
= msm_lastclose
,
844 .set_busid
= drm_platform_set_busid
,
845 .irq_handler
= msm_irq
,
846 .irq_preinstall
= msm_irq_preinstall
,
847 .irq_postinstall
= msm_irq_postinstall
,
848 .irq_uninstall
= msm_irq_uninstall
,
849 .get_vblank_counter
= drm_vblank_count
,
850 .enable_vblank
= msm_enable_vblank
,
851 .disable_vblank
= msm_disable_vblank
,
852 .gem_free_object
= msm_gem_free_object
,
853 .gem_vm_ops
= &vm_ops
,
854 .dumb_create
= msm_gem_dumb_create
,
855 .dumb_map_offset
= msm_gem_dumb_map_offset
,
856 .dumb_destroy
= drm_gem_dumb_destroy
,
857 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
858 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
859 .gem_prime_export
= drm_gem_prime_export
,
860 .gem_prime_import
= drm_gem_prime_import
,
861 .gem_prime_pin
= msm_gem_prime_pin
,
862 .gem_prime_unpin
= msm_gem_prime_unpin
,
863 .gem_prime_get_sg_table
= msm_gem_prime_get_sg_table
,
864 .gem_prime_import_sg_table
= msm_gem_prime_import_sg_table
,
865 .gem_prime_vmap
= msm_gem_prime_vmap
,
866 .gem_prime_vunmap
= msm_gem_prime_vunmap
,
867 .gem_prime_mmap
= msm_gem_prime_mmap
,
868 #ifdef CONFIG_DEBUG_FS
869 .debugfs_init
= msm_debugfs_init
,
870 .debugfs_cleanup
= msm_debugfs_cleanup
,
872 .ioctls
= msm_ioctls
,
873 .num_ioctls
= DRM_MSM_NUM_IOCTLS
,
876 .desc
= "MSM Snapdragon DRM",
882 #ifdef CONFIG_PM_SLEEP
883 static int msm_pm_suspend(struct device
*dev
)
885 struct drm_device
*ddev
= dev_get_drvdata(dev
);
887 drm_kms_helper_poll_disable(ddev
);
892 static int msm_pm_resume(struct device
*dev
)
894 struct drm_device
*ddev
= dev_get_drvdata(dev
);
896 drm_kms_helper_poll_enable(ddev
);
902 static const struct dev_pm_ops msm_pm_ops
= {
903 SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend
, msm_pm_resume
)
907 * Componentized driver support:
911 /* NOTE: the CONFIG_OF case duplicates the same code as exynos or imx
912 * (or probably any other).. so probably some room for some helpers
914 static int compare_of(struct device
*dev
, void *data
)
916 return dev
->of_node
== data
;
919 static int add_components(struct device
*dev
, struct component_match
**matchptr
,
922 struct device_node
*np
= dev
->of_node
;
926 struct device_node
*node
;
928 node
= of_parse_phandle(np
, name
, i
);
932 component_match_add(dev
, matchptr
, compare_of
, node
);
938 static int compare_dev(struct device
*dev
, void *data
)
944 static int msm_drm_bind(struct device
*dev
)
946 return drm_platform_init(&msm_driver
, to_platform_device(dev
));
949 static void msm_drm_unbind(struct device
*dev
)
951 drm_put_dev(platform_get_drvdata(to_platform_device(dev
)));
954 static const struct component_master_ops msm_drm_ops
= {
955 .bind
= msm_drm_bind
,
956 .unbind
= msm_drm_unbind
,
963 static int msm_pdev_probe(struct platform_device
*pdev
)
965 struct component_match
*match
= NULL
;
967 add_components(&pdev
->dev
, &match
, "connectors");
968 add_components(&pdev
->dev
, &match
, "gpus");
970 /* For non-DT case, it kinda sucks. We don't actually have a way
971 * to know whether or not we are waiting for certain devices (or if
972 * they are simply not present). But for non-DT we only need to
973 * care about apq8064/apq8060/etc (all mdp4/a3xx):
975 static const char *devnames
[] = {
976 "hdmi_msm.0", "kgsl-3d0.0",
980 DBG("Adding components..");
982 for (i
= 0; i
< ARRAY_SIZE(devnames
); i
++) {
985 dev
= bus_find_device_by_name(&platform_bus_type
,
988 dev_info(&pdev
->dev
, "still waiting for %s\n", devnames
[i
]);
989 return -EPROBE_DEFER
;
992 component_match_add(&pdev
->dev
, &match
, compare_dev
, dev
);
996 pdev
->dev
.coherent_dma_mask
= DMA_BIT_MASK(32);
997 return component_master_add_with_match(&pdev
->dev
, &msm_drm_ops
, match
);
1000 static int msm_pdev_remove(struct platform_device
*pdev
)
1002 component_master_del(&pdev
->dev
, &msm_drm_ops
);
1007 static const struct platform_device_id msm_id
[] = {
1012 static const struct of_device_id dt_match
[] = {
1013 { .compatible
= "qcom,mdp" }, /* mdp4 */
1014 { .compatible
= "qcom,mdss_mdp" }, /* mdp5 */
1017 MODULE_DEVICE_TABLE(of
, dt_match
);
1019 static struct platform_driver msm_platform_driver
= {
1020 .probe
= msm_pdev_probe
,
1021 .remove
= msm_pdev_remove
,
1024 .of_match_table
= dt_match
,
1030 static int __init
msm_drm_register(void)
1036 return platform_driver_register(&msm_platform_driver
);
1039 static void __exit
msm_drm_unregister(void)
1042 platform_driver_unregister(&msm_platform_driver
);
1044 adreno_unregister();
1045 msm_edp_unregister();
1048 module_init(msm_drm_register
);
1049 module_exit(msm_drm_unregister
);
1051 MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
1052 MODULE_DESCRIPTION("MSM DRM Driver");
1053 MODULE_LICENSE("GPL");