1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/debugfs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/of_irq.h>
14 #include <drm/drm_crtc.h>
15 #include <drm/drm_file.h>
22 #include "dpu_core_irq.h"
23 #include "dpu_formats.h"
24 #include "dpu_hw_vbif.h"
26 #include "dpu_encoder.h"
27 #include "dpu_plane.h"
30 #define CREATE_TRACE_POINTS
31 #include "dpu_trace.h"
34 * To enable overall DRM driver logging
35 * # echo 0x2 > /sys/module/drm/parameters/debug
37 * To enable DRM driver h/w logging
38 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
40 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
42 #define DPU_DEBUGFS_DIR "msm_dpu"
43 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
45 static int dpu_kms_hw_init(struct msm_kms
*kms
);
46 static void _dpu_kms_mmu_destroy(struct dpu_kms
*dpu_kms
);
48 static unsigned long dpu_iomap_size(struct platform_device
*pdev
,
53 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, name
);
55 DRM_ERROR("failed to get memory resource: %s\n", name
);
59 return resource_size(res
);
62 #ifdef CONFIG_DEBUG_FS
63 static int _dpu_danger_signal_status(struct seq_file
*s
,
66 struct dpu_kms
*kms
= (struct dpu_kms
*)s
->private;
67 struct dpu_danger_safe_status status
;
71 DPU_ERROR("invalid arg(s)\n");
75 memset(&status
, 0, sizeof(struct dpu_danger_safe_status
));
77 pm_runtime_get_sync(&kms
->pdev
->dev
);
79 seq_puts(s
, "\nDanger signal status:\n");
80 if (kms
->hw_mdp
->ops
.get_danger_status
)
81 kms
->hw_mdp
->ops
.get_danger_status(kms
->hw_mdp
,
84 seq_puts(s
, "\nSafe signal status:\n");
85 if (kms
->hw_mdp
->ops
.get_danger_status
)
86 kms
->hw_mdp
->ops
.get_danger_status(kms
->hw_mdp
,
89 pm_runtime_put_sync(&kms
->pdev
->dev
);
91 seq_printf(s
, "MDP : 0x%x\n", status
.mdp
);
93 for (i
= SSPP_VIG0
; i
< SSPP_MAX
; i
++)
94 seq_printf(s
, "SSPP%d : 0x%x \t", i
- SSPP_VIG0
,
101 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
102 static int __prefix ## _open(struct inode *inode, struct file *file) \
104 return single_open(file, __prefix ## _show, inode->i_private); \
106 static const struct file_operations __prefix ## _fops = { \
107 .owner = THIS_MODULE, \
108 .open = __prefix ## _open, \
109 .release = single_release, \
111 .llseek = seq_lseek, \
114 static int dpu_debugfs_danger_stats_show(struct seq_file
*s
, void *v
)
116 return _dpu_danger_signal_status(s
, true);
118 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats
);
120 static int dpu_debugfs_safe_stats_show(struct seq_file
*s
, void *v
)
122 return _dpu_danger_signal_status(s
, false);
124 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats
);
126 static void dpu_debugfs_danger_init(struct dpu_kms
*dpu_kms
,
127 struct dentry
*parent
)
129 struct dentry
*entry
= debugfs_create_dir("danger", parent
);
131 debugfs_create_file("danger_status", 0600, entry
,
132 dpu_kms
, &dpu_debugfs_danger_stats_fops
);
133 debugfs_create_file("safe_status", 0600, entry
,
134 dpu_kms
, &dpu_debugfs_safe_stats_fops
);
137 static int _dpu_debugfs_show_regset32(struct seq_file
*s
, void *data
)
139 struct dpu_debugfs_regset32
*regset
= s
->private;
140 struct dpu_kms
*dpu_kms
= regset
->dpu_kms
;
141 struct drm_device
*dev
;
142 struct msm_drm_private
*priv
;
150 priv
= dev
->dev_private
;
151 base
= dpu_kms
->mmio
+ regset
->offset
;
153 /* insert padding spaces, if needed */
154 if (regset
->offset
& 0xF) {
155 seq_printf(s
, "[%x]", regset
->offset
& ~0xF);
156 for (i
= 0; i
< (regset
->offset
& 0xF); i
+= 4)
160 pm_runtime_get_sync(&dpu_kms
->pdev
->dev
);
162 /* main register output */
163 for (i
= 0; i
< regset
->blk_len
; i
+= 4) {
164 addr
= regset
->offset
+ i
;
165 if ((addr
& 0xF) == 0x0)
166 seq_printf(s
, i
? "\n[%x]" : "[%x]", addr
);
167 seq_printf(s
, " %08x", readl_relaxed(base
+ i
));
170 pm_runtime_put_sync(&dpu_kms
->pdev
->dev
);
175 static int dpu_debugfs_open_regset32(struct inode
*inode
,
178 return single_open(file
, _dpu_debugfs_show_regset32
, inode
->i_private
);
181 static const struct file_operations dpu_fops_regset32
= {
182 .open
= dpu_debugfs_open_regset32
,
185 .release
= single_release
,
188 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32
*regset
,
189 uint32_t offset
, uint32_t length
, struct dpu_kms
*dpu_kms
)
192 regset
->offset
= offset
;
193 regset
->blk_len
= length
;
194 regset
->dpu_kms
= dpu_kms
;
198 void dpu_debugfs_create_regset32(const char *name
, umode_t mode
,
199 void *parent
, struct dpu_debugfs_regset32
*regset
)
201 if (!name
|| !regset
|| !regset
->dpu_kms
|| !regset
->blk_len
)
204 /* make sure offset is a multiple of 4 */
205 regset
->offset
= round_down(regset
->offset
, 4);
207 debugfs_create_file(name
, mode
, parent
, regset
, &dpu_fops_regset32
);
210 static int dpu_kms_debugfs_init(struct msm_kms
*kms
, struct drm_minor
*minor
)
212 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
213 void *p
= dpu_hw_util_get_log_mask_ptr();
214 struct dentry
*entry
;
219 entry
= debugfs_create_dir("debug", minor
->debugfs_root
);
221 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME
, 0600, entry
, p
);
223 dpu_debugfs_danger_init(dpu_kms
, entry
);
224 dpu_debugfs_vbif_init(dpu_kms
, entry
);
225 dpu_debugfs_core_irq_init(dpu_kms
, entry
);
227 return dpu_core_perf_debugfs_init(dpu_kms
, entry
);
231 static int dpu_kms_enable_vblank(struct msm_kms
*kms
, struct drm_crtc
*crtc
)
233 return dpu_crtc_vblank(crtc
, true);
236 static void dpu_kms_disable_vblank(struct msm_kms
*kms
, struct drm_crtc
*crtc
)
238 dpu_crtc_vblank(crtc
, false);
241 static void dpu_kms_enable_commit(struct msm_kms
*kms
)
243 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
244 pm_runtime_get_sync(&dpu_kms
->pdev
->dev
);
247 static void dpu_kms_disable_commit(struct msm_kms
*kms
)
249 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
250 pm_runtime_put_sync(&dpu_kms
->pdev
->dev
);
253 static ktime_t
dpu_kms_vsync_time(struct msm_kms
*kms
, struct drm_crtc
*crtc
)
255 struct drm_encoder
*encoder
;
257 drm_for_each_encoder_mask(encoder
, crtc
->dev
, crtc
->state
->encoder_mask
) {
260 if (dpu_encoder_vsync_time(encoder
, &vsync_time
) == 0)
267 static void dpu_kms_prepare_commit(struct msm_kms
*kms
,
268 struct drm_atomic_state
*state
)
270 struct dpu_kms
*dpu_kms
;
271 struct drm_device
*dev
;
272 struct drm_crtc
*crtc
;
273 struct drm_crtc_state
*crtc_state
;
274 struct drm_encoder
*encoder
;
279 dpu_kms
= to_dpu_kms(kms
);
282 /* Call prepare_commit for all affected encoders */
283 for_each_new_crtc_in_state(state
, crtc
, crtc_state
, i
) {
284 drm_for_each_encoder_mask(encoder
, crtc
->dev
,
285 crtc_state
->encoder_mask
) {
286 dpu_encoder_prepare_commit(encoder
);
291 static void dpu_kms_flush_commit(struct msm_kms
*kms
, unsigned crtc_mask
)
293 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
294 struct drm_crtc
*crtc
;
296 for_each_crtc_mask(dpu_kms
->dev
, crtc
, crtc_mask
) {
297 if (!crtc
->state
->active
)
300 trace_dpu_kms_commit(DRMID(crtc
));
301 dpu_crtc_commit_kickoff(crtc
);
306 * Override the encoder enable since we need to setup the inline rotator and do
307 * some crtc magic before enabling any bridge that might be present.
309 void dpu_kms_encoder_enable(struct drm_encoder
*encoder
)
311 const struct drm_encoder_helper_funcs
*funcs
= encoder
->helper_private
;
312 struct drm_device
*dev
= encoder
->dev
;
313 struct drm_crtc
*crtc
;
315 /* Forward this enable call to the commit hook */
316 if (funcs
&& funcs
->commit
)
317 funcs
->commit(encoder
);
319 drm_for_each_crtc(crtc
, dev
) {
320 if (!(crtc
->state
->encoder_mask
& drm_encoder_mask(encoder
)))
323 trace_dpu_kms_enc_enable(DRMID(crtc
));
327 static void dpu_kms_complete_commit(struct msm_kms
*kms
, unsigned crtc_mask
)
329 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
330 struct drm_crtc
*crtc
;
332 DPU_ATRACE_BEGIN("kms_complete_commit");
334 for_each_crtc_mask(dpu_kms
->dev
, crtc
, crtc_mask
)
335 dpu_crtc_complete_commit(crtc
);
337 DPU_ATRACE_END("kms_complete_commit");
340 static void dpu_kms_wait_for_commit_done(struct msm_kms
*kms
,
341 struct drm_crtc
*crtc
)
343 struct drm_encoder
*encoder
;
344 struct drm_device
*dev
;
347 if (!kms
|| !crtc
|| !crtc
->state
) {
348 DPU_ERROR("invalid params\n");
354 if (!crtc
->state
->enable
) {
355 DPU_DEBUG("[crtc:%d] not enable\n", crtc
->base
.id
);
359 if (!crtc
->state
->active
) {
360 DPU_DEBUG("[crtc:%d] not active\n", crtc
->base
.id
);
364 list_for_each_entry(encoder
, &dev
->mode_config
.encoder_list
, head
) {
365 if (encoder
->crtc
!= crtc
)
368 * Wait for post-flush if necessary to delay before
369 * plane_cleanup. For example, wait for vsync in case of video
370 * mode panels. This may be a no-op for command mode panels.
372 trace_dpu_kms_wait_for_commit_done(DRMID(crtc
));
373 ret
= dpu_encoder_wait_for_event(encoder
, MSM_ENC_COMMIT_DONE
);
374 if (ret
&& ret
!= -EWOULDBLOCK
) {
375 DPU_ERROR("wait for commit done returned %d\n", ret
);
381 static void dpu_kms_wait_flush(struct msm_kms
*kms
, unsigned crtc_mask
)
383 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
384 struct drm_crtc
*crtc
;
386 for_each_crtc_mask(dpu_kms
->dev
, crtc
, crtc_mask
)
387 dpu_kms_wait_for_commit_done(kms
, crtc
);
390 static int _dpu_kms_initialize_dsi(struct drm_device
*dev
,
391 struct msm_drm_private
*priv
,
392 struct dpu_kms
*dpu_kms
)
394 struct drm_encoder
*encoder
= NULL
;
397 if (!(priv
->dsi
[0] || priv
->dsi
[1]))
400 /*TODO: Support two independent DSI connectors */
401 encoder
= dpu_encoder_init(dev
, DRM_MODE_ENCODER_DSI
);
402 if (IS_ERR(encoder
)) {
403 DPU_ERROR("encoder init failed for dsi display\n");
404 return PTR_ERR(encoder
);
407 priv
->encoders
[priv
->num_encoders
++] = encoder
;
409 for (i
= 0; i
< ARRAY_SIZE(priv
->dsi
); i
++) {
413 rc
= msm_dsi_modeset_init(priv
->dsi
[i
], dev
, encoder
);
415 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
425 * _dpu_kms_setup_displays - create encoders, bridges and connectors
426 * for underlying displays
427 * @dev: Pointer to drm device structure
428 * @priv: Pointer to private drm device data
429 * @dpu_kms: Pointer to dpu kms structure
430 * Returns: Zero on success
432 static int _dpu_kms_setup_displays(struct drm_device
*dev
,
433 struct msm_drm_private
*priv
,
434 struct dpu_kms
*dpu_kms
)
437 * Extend this function to initialize other
441 return _dpu_kms_initialize_dsi(dev
, priv
, dpu_kms
);
444 static void _dpu_kms_drm_obj_destroy(struct dpu_kms
*dpu_kms
)
446 struct msm_drm_private
*priv
;
449 priv
= dpu_kms
->dev
->dev_private
;
451 for (i
= 0; i
< priv
->num_crtcs
; i
++)
452 priv
->crtcs
[i
]->funcs
->destroy(priv
->crtcs
[i
]);
455 for (i
= 0; i
< priv
->num_planes
; i
++)
456 priv
->planes
[i
]->funcs
->destroy(priv
->planes
[i
]);
457 priv
->num_planes
= 0;
459 for (i
= 0; i
< priv
->num_connectors
; i
++)
460 priv
->connectors
[i
]->funcs
->destroy(priv
->connectors
[i
]);
461 priv
->num_connectors
= 0;
463 for (i
= 0; i
< priv
->num_encoders
; i
++)
464 priv
->encoders
[i
]->funcs
->destroy(priv
->encoders
[i
]);
465 priv
->num_encoders
= 0;
468 static int _dpu_kms_drm_obj_init(struct dpu_kms
*dpu_kms
)
470 struct drm_device
*dev
;
471 struct drm_plane
*primary_planes
[MAX_PLANES
], *plane
;
472 struct drm_plane
*cursor_planes
[MAX_PLANES
] = { NULL
};
473 struct drm_crtc
*crtc
;
475 struct msm_drm_private
*priv
;
476 struct dpu_mdss_cfg
*catalog
;
478 int primary_planes_idx
= 0, cursor_planes_idx
= 0, i
, ret
;
481 priv
= dev
->dev_private
;
482 catalog
= dpu_kms
->catalog
;
485 * Create encoder and query display drivers to create
486 * bridges and connectors
488 ret
= _dpu_kms_setup_displays(dev
, priv
, dpu_kms
);
492 max_crtc_count
= min(catalog
->mixer_count
, priv
->num_encoders
);
494 /* Create the planes, keeping track of one primary/cursor per crtc */
495 for (i
= 0; i
< catalog
->sspp_count
; i
++) {
496 enum drm_plane_type type
;
498 if ((catalog
->sspp
[i
].features
& BIT(DPU_SSPP_CURSOR
))
499 && cursor_planes_idx
< max_crtc_count
)
500 type
= DRM_PLANE_TYPE_CURSOR
;
501 else if (primary_planes_idx
< max_crtc_count
)
502 type
= DRM_PLANE_TYPE_PRIMARY
;
504 type
= DRM_PLANE_TYPE_OVERLAY
;
506 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
507 type
, catalog
->sspp
[i
].features
,
508 catalog
->sspp
[i
].features
& BIT(DPU_SSPP_CURSOR
));
510 plane
= dpu_plane_init(dev
, catalog
->sspp
[i
].id
, type
,
511 (1UL << max_crtc_count
) - 1, 0);
513 DPU_ERROR("dpu_plane_init failed\n");
514 ret
= PTR_ERR(plane
);
517 priv
->planes
[priv
->num_planes
++] = plane
;
519 if (type
== DRM_PLANE_TYPE_CURSOR
)
520 cursor_planes
[cursor_planes_idx
++] = plane
;
521 else if (type
== DRM_PLANE_TYPE_PRIMARY
)
522 primary_planes
[primary_planes_idx
++] = plane
;
525 max_crtc_count
= min(max_crtc_count
, primary_planes_idx
);
527 /* Create one CRTC per encoder */
528 for (i
= 0; i
< max_crtc_count
; i
++) {
529 crtc
= dpu_crtc_init(dev
, primary_planes
[i
], cursor_planes
[i
]);
534 priv
->crtcs
[priv
->num_crtcs
++] = crtc
;
537 /* All CRTCs are compatible with all encoders */
538 for (i
= 0; i
< priv
->num_encoders
; i
++)
539 priv
->encoders
[i
]->possible_crtcs
= (1 << priv
->num_crtcs
) - 1;
543 _dpu_kms_drm_obj_destroy(dpu_kms
);
547 static long dpu_kms_round_pixclk(struct msm_kms
*kms
, unsigned long rate
,
548 struct drm_encoder
*encoder
)
553 static void _dpu_kms_hw_destroy(struct dpu_kms
*dpu_kms
)
555 struct drm_device
*dev
;
560 if (dpu_kms
->hw_intr
)
561 dpu_hw_intr_destroy(dpu_kms
->hw_intr
);
562 dpu_kms
->hw_intr
= NULL
;
564 /* safe to call these more than once during shutdown */
565 _dpu_kms_mmu_destroy(dpu_kms
);
567 if (dpu_kms
->catalog
) {
568 for (i
= 0; i
< dpu_kms
->catalog
->vbif_count
; i
++) {
569 u32 vbif_idx
= dpu_kms
->catalog
->vbif
[i
].id
;
571 if ((vbif_idx
< VBIF_MAX
) && dpu_kms
->hw_vbif
[vbif_idx
])
572 dpu_hw_vbif_destroy(dpu_kms
->hw_vbif
[vbif_idx
]);
576 if (dpu_kms
->rm_init
)
577 dpu_rm_destroy(&dpu_kms
->rm
);
578 dpu_kms
->rm_init
= false;
580 if (dpu_kms
->catalog
)
581 dpu_hw_catalog_deinit(dpu_kms
->catalog
);
582 dpu_kms
->catalog
= NULL
;
584 if (dpu_kms
->vbif
[VBIF_NRT
])
585 devm_iounmap(&dpu_kms
->pdev
->dev
, dpu_kms
->vbif
[VBIF_NRT
]);
586 dpu_kms
->vbif
[VBIF_NRT
] = NULL
;
588 if (dpu_kms
->vbif
[VBIF_RT
])
589 devm_iounmap(&dpu_kms
->pdev
->dev
, dpu_kms
->vbif
[VBIF_RT
]);
590 dpu_kms
->vbif
[VBIF_RT
] = NULL
;
593 dpu_hw_mdp_destroy(dpu_kms
->hw_mdp
);
594 dpu_kms
->hw_mdp
= NULL
;
597 devm_iounmap(&dpu_kms
->pdev
->dev
, dpu_kms
->mmio
);
598 dpu_kms
->mmio
= NULL
;
601 static void dpu_kms_destroy(struct msm_kms
*kms
)
603 struct dpu_kms
*dpu_kms
;
606 DPU_ERROR("invalid kms\n");
610 dpu_kms
= to_dpu_kms(kms
);
612 _dpu_kms_hw_destroy(dpu_kms
);
615 static void _dpu_kms_set_encoder_mode(struct msm_kms
*kms
,
616 struct drm_encoder
*encoder
,
619 struct msm_display_info info
;
620 struct msm_drm_private
*priv
= encoder
->dev
->dev_private
;
623 memset(&info
, 0, sizeof(info
));
625 info
.intf_type
= encoder
->encoder_type
;
626 info
.capabilities
= cmd_mode
? MSM_DISPLAY_CAP_CMD_MODE
:
627 MSM_DISPLAY_CAP_VID_MODE
;
629 /* TODO: No support for DSI swap */
630 for (i
= 0; i
< ARRAY_SIZE(priv
->dsi
); i
++) {
632 info
.h_tile_instance
[info
.num_of_h_tiles
] = i
;
633 info
.num_of_h_tiles
++;
637 rc
= dpu_encoder_setup(encoder
->dev
, encoder
, &info
);
639 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
640 encoder
->base
.id
, rc
);
643 static irqreturn_t
dpu_irq(struct msm_kms
*kms
)
645 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
647 return dpu_core_irq(dpu_kms
);
650 static void dpu_irq_preinstall(struct msm_kms
*kms
)
652 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
654 dpu_core_irq_preinstall(dpu_kms
);
657 static void dpu_irq_uninstall(struct msm_kms
*kms
)
659 struct dpu_kms
*dpu_kms
= to_dpu_kms(kms
);
661 dpu_core_irq_uninstall(dpu_kms
);
664 static const struct msm_kms_funcs kms_funcs
= {
665 .hw_init
= dpu_kms_hw_init
,
666 .irq_preinstall
= dpu_irq_preinstall
,
667 .irq_uninstall
= dpu_irq_uninstall
,
669 .enable_commit
= dpu_kms_enable_commit
,
670 .disable_commit
= dpu_kms_disable_commit
,
671 .vsync_time
= dpu_kms_vsync_time
,
672 .prepare_commit
= dpu_kms_prepare_commit
,
673 .flush_commit
= dpu_kms_flush_commit
,
674 .wait_flush
= dpu_kms_wait_flush
,
675 .complete_commit
= dpu_kms_complete_commit
,
676 .enable_vblank
= dpu_kms_enable_vblank
,
677 .disable_vblank
= dpu_kms_disable_vblank
,
678 .check_modified_format
= dpu_format_check_modified_format
,
679 .get_format
= dpu_get_msm_format
,
680 .round_pixclk
= dpu_kms_round_pixclk
,
681 .destroy
= dpu_kms_destroy
,
682 .set_encoder_mode
= _dpu_kms_set_encoder_mode
,
683 #ifdef CONFIG_DEBUG_FS
684 .debugfs_init
= dpu_kms_debugfs_init
,
688 static void _dpu_kms_mmu_destroy(struct dpu_kms
*dpu_kms
)
692 if (!dpu_kms
->base
.aspace
)
695 mmu
= dpu_kms
->base
.aspace
->mmu
;
697 mmu
->funcs
->detach(mmu
);
698 msm_gem_address_space_put(dpu_kms
->base
.aspace
);
700 dpu_kms
->base
.aspace
= NULL
;
703 static int _dpu_kms_mmu_init(struct dpu_kms
*dpu_kms
)
705 struct iommu_domain
*domain
;
706 struct msm_gem_address_space
*aspace
;
709 domain
= iommu_domain_alloc(&platform_bus_type
);
713 domain
->geometry
.aperture_start
= 0x1000;
714 domain
->geometry
.aperture_end
= 0xffffffff;
716 aspace
= msm_gem_address_space_create(dpu_kms
->dev
->dev
,
718 if (IS_ERR(aspace
)) {
719 iommu_domain_free(domain
);
720 return PTR_ERR(aspace
);
723 ret
= aspace
->mmu
->funcs
->attach(aspace
->mmu
);
725 DPU_ERROR("failed to attach iommu %d\n", ret
);
726 msm_gem_address_space_put(aspace
);
730 dpu_kms
->base
.aspace
= aspace
;
734 static struct dss_clk
*_dpu_kms_get_clk(struct dpu_kms
*dpu_kms
,
737 struct dss_module_power
*mp
= &dpu_kms
->mp
;
740 for (i
= 0; i
< mp
->num_clk
; i
++) {
741 if (!strcmp(mp
->clk_config
[i
].clk_name
, clock_name
))
742 return &mp
->clk_config
[i
];
748 u64
dpu_kms_get_clk_rate(struct dpu_kms
*dpu_kms
, char *clock_name
)
752 clk
= _dpu_kms_get_clk(dpu_kms
, clock_name
);
756 return clk_get_rate(clk
->clk
);
759 static int dpu_kms_hw_init(struct msm_kms
*kms
)
761 struct dpu_kms
*dpu_kms
;
762 struct drm_device
*dev
;
763 struct msm_drm_private
*priv
;
767 DPU_ERROR("invalid kms\n");
771 dpu_kms
= to_dpu_kms(kms
);
773 priv
= dev
->dev_private
;
775 atomic_set(&dpu_kms
->bandwidth_ref
, 0);
777 dpu_kms
->mmio
= msm_ioremap(dpu_kms
->pdev
, "mdp", "mdp");
778 if (IS_ERR(dpu_kms
->mmio
)) {
779 rc
= PTR_ERR(dpu_kms
->mmio
);
780 DPU_ERROR("mdp register memory map failed: %d\n", rc
);
781 dpu_kms
->mmio
= NULL
;
784 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms
->mmio
);
785 dpu_kms
->mmio_len
= dpu_iomap_size(dpu_kms
->pdev
, "mdp");
787 dpu_kms
->vbif
[VBIF_RT
] = msm_ioremap(dpu_kms
->pdev
, "vbif", "vbif");
788 if (IS_ERR(dpu_kms
->vbif
[VBIF_RT
])) {
789 rc
= PTR_ERR(dpu_kms
->vbif
[VBIF_RT
]);
790 DPU_ERROR("vbif register memory map failed: %d\n", rc
);
791 dpu_kms
->vbif
[VBIF_RT
] = NULL
;
794 dpu_kms
->vbif_len
[VBIF_RT
] = dpu_iomap_size(dpu_kms
->pdev
, "vbif");
795 dpu_kms
->vbif
[VBIF_NRT
] = msm_ioremap(dpu_kms
->pdev
, "vbif_nrt", "vbif_nrt");
796 if (IS_ERR(dpu_kms
->vbif
[VBIF_NRT
])) {
797 dpu_kms
->vbif
[VBIF_NRT
] = NULL
;
798 DPU_DEBUG("VBIF NRT is not defined");
800 dpu_kms
->vbif_len
[VBIF_NRT
] = dpu_iomap_size(dpu_kms
->pdev
,
804 dpu_kms
->reg_dma
= msm_ioremap(dpu_kms
->pdev
, "regdma", "regdma");
805 if (IS_ERR(dpu_kms
->reg_dma
)) {
806 dpu_kms
->reg_dma
= NULL
;
807 DPU_DEBUG("REG_DMA is not defined");
809 dpu_kms
->reg_dma_len
= dpu_iomap_size(dpu_kms
->pdev
, "regdma");
812 pm_runtime_get_sync(&dpu_kms
->pdev
->dev
);
814 dpu_kms
->core_rev
= readl_relaxed(dpu_kms
->mmio
+ 0x0);
816 pr_info("dpu hardware revision:0x%x\n", dpu_kms
->core_rev
);
818 dpu_kms
->catalog
= dpu_hw_catalog_init(dpu_kms
->core_rev
);
819 if (IS_ERR_OR_NULL(dpu_kms
->catalog
)) {
820 rc
= PTR_ERR(dpu_kms
->catalog
);
821 if (!dpu_kms
->catalog
)
823 DPU_ERROR("catalog init failed: %d\n", rc
);
824 dpu_kms
->catalog
= NULL
;
829 * Now we need to read the HW catalog and initialize resources such as
830 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
832 rc
= _dpu_kms_mmu_init(dpu_kms
);
834 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc
);
838 rc
= dpu_rm_init(&dpu_kms
->rm
, dpu_kms
->catalog
, dpu_kms
->mmio
);
840 DPU_ERROR("rm init failed: %d\n", rc
);
844 dpu_kms
->rm_init
= true;
846 dpu_kms
->hw_mdp
= dpu_hw_mdptop_init(MDP_TOP
, dpu_kms
->mmio
,
848 if (IS_ERR(dpu_kms
->hw_mdp
)) {
849 rc
= PTR_ERR(dpu_kms
->hw_mdp
);
850 DPU_ERROR("failed to get hw_mdp: %d\n", rc
);
851 dpu_kms
->hw_mdp
= NULL
;
855 for (i
= 0; i
< dpu_kms
->catalog
->vbif_count
; i
++) {
856 u32 vbif_idx
= dpu_kms
->catalog
->vbif
[i
].id
;
858 dpu_kms
->hw_vbif
[i
] = dpu_hw_vbif_init(vbif_idx
,
859 dpu_kms
->vbif
[vbif_idx
], dpu_kms
->catalog
);
860 if (IS_ERR_OR_NULL(dpu_kms
->hw_vbif
[vbif_idx
])) {
861 rc
= PTR_ERR(dpu_kms
->hw_vbif
[vbif_idx
]);
862 if (!dpu_kms
->hw_vbif
[vbif_idx
])
864 DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx
, rc
);
865 dpu_kms
->hw_vbif
[vbif_idx
] = NULL
;
870 rc
= dpu_core_perf_init(&dpu_kms
->perf
, dev
, dpu_kms
->catalog
,
871 _dpu_kms_get_clk(dpu_kms
, "core"));
873 DPU_ERROR("failed to init perf %d\n", rc
);
877 dpu_kms
->hw_intr
= dpu_hw_intr_init(dpu_kms
->mmio
, dpu_kms
->catalog
);
878 if (IS_ERR_OR_NULL(dpu_kms
->hw_intr
)) {
879 rc
= PTR_ERR(dpu_kms
->hw_intr
);
880 DPU_ERROR("hw_intr init failed: %d\n", rc
);
881 dpu_kms
->hw_intr
= NULL
;
882 goto hw_intr_init_err
;
885 dev
->mode_config
.min_width
= 0;
886 dev
->mode_config
.min_height
= 0;
889 * max crtc width is equal to the max mixer width * 2 and max height is
892 dev
->mode_config
.max_width
=
893 dpu_kms
->catalog
->caps
->max_mixer_width
* 2;
894 dev
->mode_config
.max_height
= 4096;
897 * Support format modifiers for compression etc.
899 dev
->mode_config
.allow_fb_modifiers
= true;
902 * _dpu_kms_drm_obj_init should create the DRM related objects
903 * i.e. CRTCs, planes, encoders, connectors and so forth
905 rc
= _dpu_kms_drm_obj_init(dpu_kms
);
907 DPU_ERROR("modeset init failed: %d\n", rc
);
908 goto drm_obj_init_err
;
911 dpu_vbif_init_memtypes(dpu_kms
);
913 pm_runtime_put_sync(&dpu_kms
->pdev
->dev
);
918 dpu_core_perf_destroy(&dpu_kms
->perf
);
922 pm_runtime_put_sync(&dpu_kms
->pdev
->dev
);
924 _dpu_kms_hw_destroy(dpu_kms
);
929 struct msm_kms
*dpu_kms_init(struct drm_device
*dev
)
931 struct msm_drm_private
*priv
;
932 struct dpu_kms
*dpu_kms
;
936 DPU_ERROR("drm device node invalid\n");
937 return ERR_PTR(-EINVAL
);
940 priv
= dev
->dev_private
;
941 dpu_kms
= to_dpu_kms(priv
->kms
);
943 irq
= irq_of_parse_and_map(dpu_kms
->pdev
->dev
.of_node
, 0);
945 DPU_ERROR("failed to get irq: %d\n", irq
);
948 dpu_kms
->base
.irq
= irq
;
950 return &dpu_kms
->base
;
953 static int dpu_bind(struct device
*dev
, struct device
*master
, void *data
)
955 struct drm_device
*ddev
= dev_get_drvdata(master
);
956 struct platform_device
*pdev
= to_platform_device(dev
);
957 struct msm_drm_private
*priv
= ddev
->dev_private
;
958 struct dpu_kms
*dpu_kms
;
959 struct dss_module_power
*mp
;
962 dpu_kms
= devm_kzalloc(&pdev
->dev
, sizeof(*dpu_kms
), GFP_KERNEL
);
967 ret
= msm_dss_parse_clock(pdev
, mp
);
969 DPU_ERROR("failed to parse clocks, ret=%d\n", ret
);
973 platform_set_drvdata(pdev
, dpu_kms
);
975 msm_kms_init(&dpu_kms
->base
, &kms_funcs
);
977 dpu_kms
->pdev
= pdev
;
979 pm_runtime_enable(&pdev
->dev
);
980 dpu_kms
->rpm_enabled
= true;
982 priv
->kms
= &dpu_kms
->base
;
986 static void dpu_unbind(struct device
*dev
, struct device
*master
, void *data
)
988 struct platform_device
*pdev
= to_platform_device(dev
);
989 struct dpu_kms
*dpu_kms
= platform_get_drvdata(pdev
);
990 struct dss_module_power
*mp
= &dpu_kms
->mp
;
992 msm_dss_put_clk(mp
->clk_config
, mp
->num_clk
);
993 devm_kfree(&pdev
->dev
, mp
->clk_config
);
996 if (dpu_kms
->rpm_enabled
)
997 pm_runtime_disable(&pdev
->dev
);
1000 static const struct component_ops dpu_ops
= {
1002 .unbind
= dpu_unbind
,
1005 static int dpu_dev_probe(struct platform_device
*pdev
)
1007 return component_add(&pdev
->dev
, &dpu_ops
);
1010 static int dpu_dev_remove(struct platform_device
*pdev
)
1012 component_del(&pdev
->dev
, &dpu_ops
);
1016 static int __maybe_unused
dpu_runtime_suspend(struct device
*dev
)
1019 struct platform_device
*pdev
= to_platform_device(dev
);
1020 struct dpu_kms
*dpu_kms
= platform_get_drvdata(pdev
);
1021 struct drm_device
*ddev
;
1022 struct dss_module_power
*mp
= &dpu_kms
->mp
;
1024 ddev
= dpu_kms
->dev
;
1025 rc
= msm_dss_enable_clk(mp
->clk_config
, mp
->num_clk
, false);
1027 DPU_ERROR("clock disable failed rc:%d\n", rc
);
1032 static int __maybe_unused
dpu_runtime_resume(struct device
*dev
)
1035 struct platform_device
*pdev
= to_platform_device(dev
);
1036 struct dpu_kms
*dpu_kms
= platform_get_drvdata(pdev
);
1037 struct drm_encoder
*encoder
;
1038 struct drm_device
*ddev
;
1039 struct dss_module_power
*mp
= &dpu_kms
->mp
;
1041 ddev
= dpu_kms
->dev
;
1042 rc
= msm_dss_enable_clk(mp
->clk_config
, mp
->num_clk
, true);
1044 DPU_ERROR("clock enable failed rc:%d\n", rc
);
1048 dpu_vbif_init_memtypes(dpu_kms
);
1050 drm_for_each_encoder(encoder
, ddev
)
1051 dpu_encoder_virt_runtime_resume(encoder
);
1056 static const struct dev_pm_ops dpu_pm_ops
= {
1057 SET_RUNTIME_PM_OPS(dpu_runtime_suspend
, dpu_runtime_resume
, NULL
)
1060 static const struct of_device_id dpu_dt_match
[] = {
1061 { .compatible
= "qcom,sdm845-dpu", },
1062 { .compatible
= "qcom,sc7180-dpu", },
1065 MODULE_DEVICE_TABLE(of
, dpu_dt_match
);
1067 static struct platform_driver dpu_driver
= {
1068 .probe
= dpu_dev_probe
,
1069 .remove
= dpu_dev_remove
,
1072 .of_match_table
= dpu_dt_match
,
1077 void __init
msm_dpu_register(void)
1079 platform_driver_register(&dpu_driver
);
1082 void __exit
msm_dpu_unregister(void)
1084 platform_driver_unregister(&dpu_driver
);