treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / msm / disp / dpu1 / dpu_kms.c
blobcb08fafb1dc144195027be506be679794390ccd4
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
10 #include <linux/debugfs.h>
11 #include <linux/dma-buf.h>
12 #include <linux/of_irq.h>
14 #include <drm/drm_crtc.h>
15 #include <drm/drm_file.h>
17 #include "msm_drv.h"
18 #include "msm_mmu.h"
19 #include "msm_gem.h"
21 #include "dpu_kms.h"
22 #include "dpu_core_irq.h"
23 #include "dpu_formats.h"
24 #include "dpu_hw_vbif.h"
25 #include "dpu_vbif.h"
26 #include "dpu_encoder.h"
27 #include "dpu_plane.h"
28 #include "dpu_crtc.h"
30 #define CREATE_TRACE_POINTS
31 #include "dpu_trace.h"
34 * To enable overall DRM driver logging
35 * # echo 0x2 > /sys/module/drm/parameters/debug
37 * To enable DRM driver h/w logging
38 * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
40 * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
42 #define DPU_DEBUGFS_DIR "msm_dpu"
43 #define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
45 static int dpu_kms_hw_init(struct msm_kms *kms);
46 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
48 static unsigned long dpu_iomap_size(struct platform_device *pdev,
49 const char *name)
51 struct resource *res;
53 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
54 if (!res) {
55 DRM_ERROR("failed to get memory resource: %s\n", name);
56 return 0;
59 return resource_size(res);
62 #ifdef CONFIG_DEBUG_FS
63 static int _dpu_danger_signal_status(struct seq_file *s,
64 bool danger_status)
66 struct dpu_kms *kms = (struct dpu_kms *)s->private;
67 struct dpu_danger_safe_status status;
68 int i;
70 if (!kms->hw_mdp) {
71 DPU_ERROR("invalid arg(s)\n");
72 return 0;
75 memset(&status, 0, sizeof(struct dpu_danger_safe_status));
77 pm_runtime_get_sync(&kms->pdev->dev);
78 if (danger_status) {
79 seq_puts(s, "\nDanger signal status:\n");
80 if (kms->hw_mdp->ops.get_danger_status)
81 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
82 &status);
83 } else {
84 seq_puts(s, "\nSafe signal status:\n");
85 if (kms->hw_mdp->ops.get_danger_status)
86 kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
87 &status);
89 pm_runtime_put_sync(&kms->pdev->dev);
91 seq_printf(s, "MDP : 0x%x\n", status.mdp);
93 for (i = SSPP_VIG0; i < SSPP_MAX; i++)
94 seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
95 status.sspp[i]);
96 seq_puts(s, "\n");
98 return 0;
101 #define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
102 static int __prefix ## _open(struct inode *inode, struct file *file) \
104 return single_open(file, __prefix ## _show, inode->i_private); \
106 static const struct file_operations __prefix ## _fops = { \
107 .owner = THIS_MODULE, \
108 .open = __prefix ## _open, \
109 .release = single_release, \
110 .read = seq_read, \
111 .llseek = seq_lseek, \
114 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
116 return _dpu_danger_signal_status(s, true);
118 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
120 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
122 return _dpu_danger_signal_status(s, false);
124 DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
126 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
127 struct dentry *parent)
129 struct dentry *entry = debugfs_create_dir("danger", parent);
131 debugfs_create_file("danger_status", 0600, entry,
132 dpu_kms, &dpu_debugfs_danger_stats_fops);
133 debugfs_create_file("safe_status", 0600, entry,
134 dpu_kms, &dpu_debugfs_safe_stats_fops);
137 static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
139 struct dpu_debugfs_regset32 *regset = s->private;
140 struct dpu_kms *dpu_kms = regset->dpu_kms;
141 struct drm_device *dev;
142 struct msm_drm_private *priv;
143 void __iomem *base;
144 uint32_t i, addr;
146 if (!dpu_kms->mmio)
147 return 0;
149 dev = dpu_kms->dev;
150 priv = dev->dev_private;
151 base = dpu_kms->mmio + regset->offset;
153 /* insert padding spaces, if needed */
154 if (regset->offset & 0xF) {
155 seq_printf(s, "[%x]", regset->offset & ~0xF);
156 for (i = 0; i < (regset->offset & 0xF); i += 4)
157 seq_puts(s, " ");
160 pm_runtime_get_sync(&dpu_kms->pdev->dev);
162 /* main register output */
163 for (i = 0; i < regset->blk_len; i += 4) {
164 addr = regset->offset + i;
165 if ((addr & 0xF) == 0x0)
166 seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
167 seq_printf(s, " %08x", readl_relaxed(base + i));
169 seq_puts(s, "\n");
170 pm_runtime_put_sync(&dpu_kms->pdev->dev);
172 return 0;
175 static int dpu_debugfs_open_regset32(struct inode *inode,
176 struct file *file)
178 return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
181 static const struct file_operations dpu_fops_regset32 = {
182 .open = dpu_debugfs_open_regset32,
183 .read = seq_read,
184 .llseek = seq_lseek,
185 .release = single_release,
188 void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
189 uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
191 if (regset) {
192 regset->offset = offset;
193 regset->blk_len = length;
194 regset->dpu_kms = dpu_kms;
198 void dpu_debugfs_create_regset32(const char *name, umode_t mode,
199 void *parent, struct dpu_debugfs_regset32 *regset)
201 if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
202 return;
204 /* make sure offset is a multiple of 4 */
205 regset->offset = round_down(regset->offset, 4);
207 debugfs_create_file(name, mode, parent, regset, &dpu_fops_regset32);
210 static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
212 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
213 void *p = dpu_hw_util_get_log_mask_ptr();
214 struct dentry *entry;
216 if (!p)
217 return -EINVAL;
219 entry = debugfs_create_dir("debug", minor->debugfs_root);
221 debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
223 dpu_debugfs_danger_init(dpu_kms, entry);
224 dpu_debugfs_vbif_init(dpu_kms, entry);
225 dpu_debugfs_core_irq_init(dpu_kms, entry);
227 return dpu_core_perf_debugfs_init(dpu_kms, entry);
229 #endif
231 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
233 return dpu_crtc_vblank(crtc, true);
236 static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
238 dpu_crtc_vblank(crtc, false);
241 static void dpu_kms_enable_commit(struct msm_kms *kms)
243 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
244 pm_runtime_get_sync(&dpu_kms->pdev->dev);
247 static void dpu_kms_disable_commit(struct msm_kms *kms)
249 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
250 pm_runtime_put_sync(&dpu_kms->pdev->dev);
253 static ktime_t dpu_kms_vsync_time(struct msm_kms *kms, struct drm_crtc *crtc)
255 struct drm_encoder *encoder;
257 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
258 ktime_t vsync_time;
260 if (dpu_encoder_vsync_time(encoder, &vsync_time) == 0)
261 return vsync_time;
264 return ktime_get();
267 static void dpu_kms_prepare_commit(struct msm_kms *kms,
268 struct drm_atomic_state *state)
270 struct dpu_kms *dpu_kms;
271 struct drm_device *dev;
272 struct drm_crtc *crtc;
273 struct drm_crtc_state *crtc_state;
274 struct drm_encoder *encoder;
275 int i;
277 if (!kms)
278 return;
279 dpu_kms = to_dpu_kms(kms);
280 dev = dpu_kms->dev;
282 /* Call prepare_commit for all affected encoders */
283 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
284 drm_for_each_encoder_mask(encoder, crtc->dev,
285 crtc_state->encoder_mask) {
286 dpu_encoder_prepare_commit(encoder);
291 static void dpu_kms_flush_commit(struct msm_kms *kms, unsigned crtc_mask)
293 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
294 struct drm_crtc *crtc;
296 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask) {
297 if (!crtc->state->active)
298 continue;
300 trace_dpu_kms_commit(DRMID(crtc));
301 dpu_crtc_commit_kickoff(crtc);
306 * Override the encoder enable since we need to setup the inline rotator and do
307 * some crtc magic before enabling any bridge that might be present.
309 void dpu_kms_encoder_enable(struct drm_encoder *encoder)
311 const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
312 struct drm_device *dev = encoder->dev;
313 struct drm_crtc *crtc;
315 /* Forward this enable call to the commit hook */
316 if (funcs && funcs->commit)
317 funcs->commit(encoder);
319 drm_for_each_crtc(crtc, dev) {
320 if (!(crtc->state->encoder_mask & drm_encoder_mask(encoder)))
321 continue;
323 trace_dpu_kms_enc_enable(DRMID(crtc));
327 static void dpu_kms_complete_commit(struct msm_kms *kms, unsigned crtc_mask)
329 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
330 struct drm_crtc *crtc;
332 DPU_ATRACE_BEGIN("kms_complete_commit");
334 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
335 dpu_crtc_complete_commit(crtc);
337 DPU_ATRACE_END("kms_complete_commit");
340 static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
341 struct drm_crtc *crtc)
343 struct drm_encoder *encoder;
344 struct drm_device *dev;
345 int ret;
347 if (!kms || !crtc || !crtc->state) {
348 DPU_ERROR("invalid params\n");
349 return;
352 dev = crtc->dev;
354 if (!crtc->state->enable) {
355 DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
356 return;
359 if (!crtc->state->active) {
360 DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
361 return;
364 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
365 if (encoder->crtc != crtc)
366 continue;
368 * Wait for post-flush if necessary to delay before
369 * plane_cleanup. For example, wait for vsync in case of video
370 * mode panels. This may be a no-op for command mode panels.
372 trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
373 ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
374 if (ret && ret != -EWOULDBLOCK) {
375 DPU_ERROR("wait for commit done returned %d\n", ret);
376 break;
381 static void dpu_kms_wait_flush(struct msm_kms *kms, unsigned crtc_mask)
383 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
384 struct drm_crtc *crtc;
386 for_each_crtc_mask(dpu_kms->dev, crtc, crtc_mask)
387 dpu_kms_wait_for_commit_done(kms, crtc);
390 static int _dpu_kms_initialize_dsi(struct drm_device *dev,
391 struct msm_drm_private *priv,
392 struct dpu_kms *dpu_kms)
394 struct drm_encoder *encoder = NULL;
395 int i, rc = 0;
397 if (!(priv->dsi[0] || priv->dsi[1]))
398 return rc;
400 /*TODO: Support two independent DSI connectors */
401 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI);
402 if (IS_ERR(encoder)) {
403 DPU_ERROR("encoder init failed for dsi display\n");
404 return PTR_ERR(encoder);
407 priv->encoders[priv->num_encoders++] = encoder;
409 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
410 if (!priv->dsi[i])
411 continue;
413 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
414 if (rc) {
415 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
416 i, rc);
417 break;
421 return rc;
425 * _dpu_kms_setup_displays - create encoders, bridges and connectors
426 * for underlying displays
427 * @dev: Pointer to drm device structure
428 * @priv: Pointer to private drm device data
429 * @dpu_kms: Pointer to dpu kms structure
430 * Returns: Zero on success
432 static int _dpu_kms_setup_displays(struct drm_device *dev,
433 struct msm_drm_private *priv,
434 struct dpu_kms *dpu_kms)
437 * Extend this function to initialize other
438 * types of displays
441 return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
444 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
446 struct msm_drm_private *priv;
447 int i;
449 priv = dpu_kms->dev->dev_private;
451 for (i = 0; i < priv->num_crtcs; i++)
452 priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
453 priv->num_crtcs = 0;
455 for (i = 0; i < priv->num_planes; i++)
456 priv->planes[i]->funcs->destroy(priv->planes[i]);
457 priv->num_planes = 0;
459 for (i = 0; i < priv->num_connectors; i++)
460 priv->connectors[i]->funcs->destroy(priv->connectors[i]);
461 priv->num_connectors = 0;
463 for (i = 0; i < priv->num_encoders; i++)
464 priv->encoders[i]->funcs->destroy(priv->encoders[i]);
465 priv->num_encoders = 0;
468 static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
470 struct drm_device *dev;
471 struct drm_plane *primary_planes[MAX_PLANES], *plane;
472 struct drm_plane *cursor_planes[MAX_PLANES] = { NULL };
473 struct drm_crtc *crtc;
475 struct msm_drm_private *priv;
476 struct dpu_mdss_cfg *catalog;
478 int primary_planes_idx = 0, cursor_planes_idx = 0, i, ret;
479 int max_crtc_count;
480 dev = dpu_kms->dev;
481 priv = dev->dev_private;
482 catalog = dpu_kms->catalog;
485 * Create encoder and query display drivers to create
486 * bridges and connectors
488 ret = _dpu_kms_setup_displays(dev, priv, dpu_kms);
489 if (ret)
490 goto fail;
492 max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
494 /* Create the planes, keeping track of one primary/cursor per crtc */
495 for (i = 0; i < catalog->sspp_count; i++) {
496 enum drm_plane_type type;
498 if ((catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR))
499 && cursor_planes_idx < max_crtc_count)
500 type = DRM_PLANE_TYPE_CURSOR;
501 else if (primary_planes_idx < max_crtc_count)
502 type = DRM_PLANE_TYPE_PRIMARY;
503 else
504 type = DRM_PLANE_TYPE_OVERLAY;
506 DPU_DEBUG("Create plane type %d with features %lx (cur %lx)\n",
507 type, catalog->sspp[i].features,
508 catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR));
510 plane = dpu_plane_init(dev, catalog->sspp[i].id, type,
511 (1UL << max_crtc_count) - 1, 0);
512 if (IS_ERR(plane)) {
513 DPU_ERROR("dpu_plane_init failed\n");
514 ret = PTR_ERR(plane);
515 goto fail;
517 priv->planes[priv->num_planes++] = plane;
519 if (type == DRM_PLANE_TYPE_CURSOR)
520 cursor_planes[cursor_planes_idx++] = plane;
521 else if (type == DRM_PLANE_TYPE_PRIMARY)
522 primary_planes[primary_planes_idx++] = plane;
525 max_crtc_count = min(max_crtc_count, primary_planes_idx);
527 /* Create one CRTC per encoder */
528 for (i = 0; i < max_crtc_count; i++) {
529 crtc = dpu_crtc_init(dev, primary_planes[i], cursor_planes[i]);
530 if (IS_ERR(crtc)) {
531 ret = PTR_ERR(crtc);
532 goto fail;
534 priv->crtcs[priv->num_crtcs++] = crtc;
537 /* All CRTCs are compatible with all encoders */
538 for (i = 0; i < priv->num_encoders; i++)
539 priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
541 return 0;
542 fail:
543 _dpu_kms_drm_obj_destroy(dpu_kms);
544 return ret;
547 static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
548 struct drm_encoder *encoder)
550 return rate;
553 static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
555 struct drm_device *dev;
556 int i;
558 dev = dpu_kms->dev;
560 if (dpu_kms->hw_intr)
561 dpu_hw_intr_destroy(dpu_kms->hw_intr);
562 dpu_kms->hw_intr = NULL;
564 /* safe to call these more than once during shutdown */
565 _dpu_kms_mmu_destroy(dpu_kms);
567 if (dpu_kms->catalog) {
568 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
569 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
571 if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
572 dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
576 if (dpu_kms->rm_init)
577 dpu_rm_destroy(&dpu_kms->rm);
578 dpu_kms->rm_init = false;
580 if (dpu_kms->catalog)
581 dpu_hw_catalog_deinit(dpu_kms->catalog);
582 dpu_kms->catalog = NULL;
584 if (dpu_kms->vbif[VBIF_NRT])
585 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
586 dpu_kms->vbif[VBIF_NRT] = NULL;
588 if (dpu_kms->vbif[VBIF_RT])
589 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
590 dpu_kms->vbif[VBIF_RT] = NULL;
592 if (dpu_kms->hw_mdp)
593 dpu_hw_mdp_destroy(dpu_kms->hw_mdp);
594 dpu_kms->hw_mdp = NULL;
596 if (dpu_kms->mmio)
597 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
598 dpu_kms->mmio = NULL;
601 static void dpu_kms_destroy(struct msm_kms *kms)
603 struct dpu_kms *dpu_kms;
605 if (!kms) {
606 DPU_ERROR("invalid kms\n");
607 return;
610 dpu_kms = to_dpu_kms(kms);
612 _dpu_kms_hw_destroy(dpu_kms);
615 static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
616 struct drm_encoder *encoder,
617 bool cmd_mode)
619 struct msm_display_info info;
620 struct msm_drm_private *priv = encoder->dev->dev_private;
621 int i, rc = 0;
623 memset(&info, 0, sizeof(info));
625 info.intf_type = encoder->encoder_type;
626 info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
627 MSM_DISPLAY_CAP_VID_MODE;
629 /* TODO: No support for DSI swap */
630 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
631 if (priv->dsi[i]) {
632 info.h_tile_instance[info.num_of_h_tiles] = i;
633 info.num_of_h_tiles++;
637 rc = dpu_encoder_setup(encoder->dev, encoder, &info);
638 if (rc)
639 DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
640 encoder->base.id, rc);
643 static irqreturn_t dpu_irq(struct msm_kms *kms)
645 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
647 return dpu_core_irq(dpu_kms);
650 static void dpu_irq_preinstall(struct msm_kms *kms)
652 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
654 dpu_core_irq_preinstall(dpu_kms);
657 static void dpu_irq_uninstall(struct msm_kms *kms)
659 struct dpu_kms *dpu_kms = to_dpu_kms(kms);
661 dpu_core_irq_uninstall(dpu_kms);
664 static const struct msm_kms_funcs kms_funcs = {
665 .hw_init = dpu_kms_hw_init,
666 .irq_preinstall = dpu_irq_preinstall,
667 .irq_uninstall = dpu_irq_uninstall,
668 .irq = dpu_irq,
669 .enable_commit = dpu_kms_enable_commit,
670 .disable_commit = dpu_kms_disable_commit,
671 .vsync_time = dpu_kms_vsync_time,
672 .prepare_commit = dpu_kms_prepare_commit,
673 .flush_commit = dpu_kms_flush_commit,
674 .wait_flush = dpu_kms_wait_flush,
675 .complete_commit = dpu_kms_complete_commit,
676 .enable_vblank = dpu_kms_enable_vblank,
677 .disable_vblank = dpu_kms_disable_vblank,
678 .check_modified_format = dpu_format_check_modified_format,
679 .get_format = dpu_get_msm_format,
680 .round_pixclk = dpu_kms_round_pixclk,
681 .destroy = dpu_kms_destroy,
682 .set_encoder_mode = _dpu_kms_set_encoder_mode,
683 #ifdef CONFIG_DEBUG_FS
684 .debugfs_init = dpu_kms_debugfs_init,
685 #endif
688 static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
690 struct msm_mmu *mmu;
692 if (!dpu_kms->base.aspace)
693 return;
695 mmu = dpu_kms->base.aspace->mmu;
697 mmu->funcs->detach(mmu);
698 msm_gem_address_space_put(dpu_kms->base.aspace);
700 dpu_kms->base.aspace = NULL;
703 static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
705 struct iommu_domain *domain;
706 struct msm_gem_address_space *aspace;
707 int ret;
709 domain = iommu_domain_alloc(&platform_bus_type);
710 if (!domain)
711 return 0;
713 domain->geometry.aperture_start = 0x1000;
714 domain->geometry.aperture_end = 0xffffffff;
716 aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
717 domain, "dpu1");
718 if (IS_ERR(aspace)) {
719 iommu_domain_free(domain);
720 return PTR_ERR(aspace);
723 ret = aspace->mmu->funcs->attach(aspace->mmu);
724 if (ret) {
725 DPU_ERROR("failed to attach iommu %d\n", ret);
726 msm_gem_address_space_put(aspace);
727 return ret;
730 dpu_kms->base.aspace = aspace;
731 return 0;
734 static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
735 char *clock_name)
737 struct dss_module_power *mp = &dpu_kms->mp;
738 int i;
740 for (i = 0; i < mp->num_clk; i++) {
741 if (!strcmp(mp->clk_config[i].clk_name, clock_name))
742 return &mp->clk_config[i];
745 return NULL;
748 u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
750 struct dss_clk *clk;
752 clk = _dpu_kms_get_clk(dpu_kms, clock_name);
753 if (!clk)
754 return -EINVAL;
756 return clk_get_rate(clk->clk);
759 static int dpu_kms_hw_init(struct msm_kms *kms)
761 struct dpu_kms *dpu_kms;
762 struct drm_device *dev;
763 struct msm_drm_private *priv;
764 int i, rc = -EINVAL;
766 if (!kms) {
767 DPU_ERROR("invalid kms\n");
768 return rc;
771 dpu_kms = to_dpu_kms(kms);
772 dev = dpu_kms->dev;
773 priv = dev->dev_private;
775 atomic_set(&dpu_kms->bandwidth_ref, 0);
777 dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
778 if (IS_ERR(dpu_kms->mmio)) {
779 rc = PTR_ERR(dpu_kms->mmio);
780 DPU_ERROR("mdp register memory map failed: %d\n", rc);
781 dpu_kms->mmio = NULL;
782 goto error;
784 DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
785 dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
787 dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
788 if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
789 rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
790 DPU_ERROR("vbif register memory map failed: %d\n", rc);
791 dpu_kms->vbif[VBIF_RT] = NULL;
792 goto error;
794 dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
795 dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
796 if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
797 dpu_kms->vbif[VBIF_NRT] = NULL;
798 DPU_DEBUG("VBIF NRT is not defined");
799 } else {
800 dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
801 "vbif_nrt");
804 dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
805 if (IS_ERR(dpu_kms->reg_dma)) {
806 dpu_kms->reg_dma = NULL;
807 DPU_DEBUG("REG_DMA is not defined");
808 } else {
809 dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
812 pm_runtime_get_sync(&dpu_kms->pdev->dev);
814 dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
816 pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
818 dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
819 if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
820 rc = PTR_ERR(dpu_kms->catalog);
821 if (!dpu_kms->catalog)
822 rc = -EINVAL;
823 DPU_ERROR("catalog init failed: %d\n", rc);
824 dpu_kms->catalog = NULL;
825 goto power_error;
829 * Now we need to read the HW catalog and initialize resources such as
830 * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
832 rc = _dpu_kms_mmu_init(dpu_kms);
833 if (rc) {
834 DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
835 goto power_error;
838 rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio);
839 if (rc) {
840 DPU_ERROR("rm init failed: %d\n", rc);
841 goto power_error;
844 dpu_kms->rm_init = true;
846 dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio,
847 dpu_kms->catalog);
848 if (IS_ERR(dpu_kms->hw_mdp)) {
849 rc = PTR_ERR(dpu_kms->hw_mdp);
850 DPU_ERROR("failed to get hw_mdp: %d\n", rc);
851 dpu_kms->hw_mdp = NULL;
852 goto power_error;
855 for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
856 u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
858 dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
859 dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
860 if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
861 rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
862 if (!dpu_kms->hw_vbif[vbif_idx])
863 rc = -EINVAL;
864 DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
865 dpu_kms->hw_vbif[vbif_idx] = NULL;
866 goto power_error;
870 rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
871 _dpu_kms_get_clk(dpu_kms, "core"));
872 if (rc) {
873 DPU_ERROR("failed to init perf %d\n", rc);
874 goto perf_err;
877 dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
878 if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
879 rc = PTR_ERR(dpu_kms->hw_intr);
880 DPU_ERROR("hw_intr init failed: %d\n", rc);
881 dpu_kms->hw_intr = NULL;
882 goto hw_intr_init_err;
885 dev->mode_config.min_width = 0;
886 dev->mode_config.min_height = 0;
889 * max crtc width is equal to the max mixer width * 2 and max height is
890 * is 4K
892 dev->mode_config.max_width =
893 dpu_kms->catalog->caps->max_mixer_width * 2;
894 dev->mode_config.max_height = 4096;
897 * Support format modifiers for compression etc.
899 dev->mode_config.allow_fb_modifiers = true;
902 * _dpu_kms_drm_obj_init should create the DRM related objects
903 * i.e. CRTCs, planes, encoders, connectors and so forth
905 rc = _dpu_kms_drm_obj_init(dpu_kms);
906 if (rc) {
907 DPU_ERROR("modeset init failed: %d\n", rc);
908 goto drm_obj_init_err;
911 dpu_vbif_init_memtypes(dpu_kms);
913 pm_runtime_put_sync(&dpu_kms->pdev->dev);
915 return 0;
917 drm_obj_init_err:
918 dpu_core_perf_destroy(&dpu_kms->perf);
919 hw_intr_init_err:
920 perf_err:
921 power_error:
922 pm_runtime_put_sync(&dpu_kms->pdev->dev);
923 error:
924 _dpu_kms_hw_destroy(dpu_kms);
926 return rc;
929 struct msm_kms *dpu_kms_init(struct drm_device *dev)
931 struct msm_drm_private *priv;
932 struct dpu_kms *dpu_kms;
933 int irq;
935 if (!dev) {
936 DPU_ERROR("drm device node invalid\n");
937 return ERR_PTR(-EINVAL);
940 priv = dev->dev_private;
941 dpu_kms = to_dpu_kms(priv->kms);
943 irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
944 if (irq < 0) {
945 DPU_ERROR("failed to get irq: %d\n", irq);
946 return ERR_PTR(irq);
948 dpu_kms->base.irq = irq;
950 return &dpu_kms->base;
953 static int dpu_bind(struct device *dev, struct device *master, void *data)
955 struct drm_device *ddev = dev_get_drvdata(master);
956 struct platform_device *pdev = to_platform_device(dev);
957 struct msm_drm_private *priv = ddev->dev_private;
958 struct dpu_kms *dpu_kms;
959 struct dss_module_power *mp;
960 int ret = 0;
962 dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
963 if (!dpu_kms)
964 return -ENOMEM;
966 mp = &dpu_kms->mp;
967 ret = msm_dss_parse_clock(pdev, mp);
968 if (ret) {
969 DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
970 return ret;
973 platform_set_drvdata(pdev, dpu_kms);
975 msm_kms_init(&dpu_kms->base, &kms_funcs);
976 dpu_kms->dev = ddev;
977 dpu_kms->pdev = pdev;
979 pm_runtime_enable(&pdev->dev);
980 dpu_kms->rpm_enabled = true;
982 priv->kms = &dpu_kms->base;
983 return ret;
986 static void dpu_unbind(struct device *dev, struct device *master, void *data)
988 struct platform_device *pdev = to_platform_device(dev);
989 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
990 struct dss_module_power *mp = &dpu_kms->mp;
992 msm_dss_put_clk(mp->clk_config, mp->num_clk);
993 devm_kfree(&pdev->dev, mp->clk_config);
994 mp->num_clk = 0;
996 if (dpu_kms->rpm_enabled)
997 pm_runtime_disable(&pdev->dev);
1000 static const struct component_ops dpu_ops = {
1001 .bind = dpu_bind,
1002 .unbind = dpu_unbind,
1005 static int dpu_dev_probe(struct platform_device *pdev)
1007 return component_add(&pdev->dev, &dpu_ops);
1010 static int dpu_dev_remove(struct platform_device *pdev)
1012 component_del(&pdev->dev, &dpu_ops);
1013 return 0;
1016 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
1018 int rc = -1;
1019 struct platform_device *pdev = to_platform_device(dev);
1020 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1021 struct drm_device *ddev;
1022 struct dss_module_power *mp = &dpu_kms->mp;
1024 ddev = dpu_kms->dev;
1025 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
1026 if (rc)
1027 DPU_ERROR("clock disable failed rc:%d\n", rc);
1029 return rc;
1032 static int __maybe_unused dpu_runtime_resume(struct device *dev)
1034 int rc = -1;
1035 struct platform_device *pdev = to_platform_device(dev);
1036 struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
1037 struct drm_encoder *encoder;
1038 struct drm_device *ddev;
1039 struct dss_module_power *mp = &dpu_kms->mp;
1041 ddev = dpu_kms->dev;
1042 rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
1043 if (rc) {
1044 DPU_ERROR("clock enable failed rc:%d\n", rc);
1045 return rc;
1048 dpu_vbif_init_memtypes(dpu_kms);
1050 drm_for_each_encoder(encoder, ddev)
1051 dpu_encoder_virt_runtime_resume(encoder);
1053 return rc;
1056 static const struct dev_pm_ops dpu_pm_ops = {
1057 SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
1060 static const struct of_device_id dpu_dt_match[] = {
1061 { .compatible = "qcom,sdm845-dpu", },
1062 { .compatible = "qcom,sc7180-dpu", },
1065 MODULE_DEVICE_TABLE(of, dpu_dt_match);
1067 static struct platform_driver dpu_driver = {
1068 .probe = dpu_dev_probe,
1069 .remove = dpu_dev_remove,
1070 .driver = {
1071 .name = "msm_dpu",
1072 .of_match_table = dpu_dt_match,
1073 .pm = &dpu_pm_ops,
1077 void __init msm_dpu_register(void)
1079 platform_driver_register(&dpu_driver);
1082 void __exit msm_dpu_unregister(void)
1084 platform_driver_unregister(&dpu_driver);