2 * SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018, The Linux Foundation
6 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/interconnect.h>
10 #include <linux/irq.h>
11 #include <linux/irqchip.h>
12 #include <linux/irqdesc.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/of_platform.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/reset.h>
22 #include <generated/mdss.xml.h>
24 #define MIN_IB_BW 400000000UL /* Min ib vote 400MB */
26 #define DEFAULT_REG_BW 153600 /* Used in mdss fbdev driver */
32 struct clk_bulk_data
*clocks
;
36 unsigned long enabled_mask
;
37 struct irq_domain
*domain
;
39 const struct msm_mdss_data
*mdss_data
;
40 struct icc_path
*mdp_path
[2];
42 struct icc_path
*reg_bus_path
;
45 static int msm_mdss_parse_data_bus_icc_path(struct device
*dev
,
46 struct msm_mdss
*msm_mdss
)
48 struct icc_path
*path0
;
49 struct icc_path
*path1
;
50 struct icc_path
*reg_bus_path
;
52 path0
= devm_of_icc_get(dev
, "mdp0-mem");
53 if (IS_ERR_OR_NULL(path0
))
54 return PTR_ERR_OR_ZERO(path0
);
56 msm_mdss
->mdp_path
[0] = path0
;
57 msm_mdss
->num_mdp_paths
= 1;
59 path1
= devm_of_icc_get(dev
, "mdp1-mem");
60 if (!IS_ERR_OR_NULL(path1
)) {
61 msm_mdss
->mdp_path
[1] = path1
;
62 msm_mdss
->num_mdp_paths
++;
65 reg_bus_path
= of_icc_get(dev
, "cpu-cfg");
66 if (!IS_ERR_OR_NULL(reg_bus_path
))
67 msm_mdss
->reg_bus_path
= reg_bus_path
;
72 static void msm_mdss_irq(struct irq_desc
*desc
)
74 struct msm_mdss
*msm_mdss
= irq_desc_get_handler_data(desc
);
75 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
78 chained_irq_enter(chip
, desc
);
80 interrupts
= readl_relaxed(msm_mdss
->mmio
+ REG_MDSS_HW_INTR_STATUS
);
83 irq_hw_number_t hwirq
= fls(interrupts
) - 1;
86 rc
= generic_handle_domain_irq(msm_mdss
->irq_controller
.domain
,
89 dev_err(msm_mdss
->dev
, "handle irq fail: irq=%lu rc=%d\n",
94 interrupts
&= ~(1 << hwirq
);
97 chained_irq_exit(chip
, desc
);
100 static void msm_mdss_irq_mask(struct irq_data
*irqd
)
102 struct msm_mdss
*msm_mdss
= irq_data_get_irq_chip_data(irqd
);
105 smp_mb__before_atomic();
106 clear_bit(irqd
->hwirq
, &msm_mdss
->irq_controller
.enabled_mask
);
108 smp_mb__after_atomic();
111 static void msm_mdss_irq_unmask(struct irq_data
*irqd
)
113 struct msm_mdss
*msm_mdss
= irq_data_get_irq_chip_data(irqd
);
116 smp_mb__before_atomic();
117 set_bit(irqd
->hwirq
, &msm_mdss
->irq_controller
.enabled_mask
);
119 smp_mb__after_atomic();
122 static struct irq_chip msm_mdss_irq_chip
= {
124 .irq_mask
= msm_mdss_irq_mask
,
125 .irq_unmask
= msm_mdss_irq_unmask
,
128 static struct lock_class_key msm_mdss_lock_key
, msm_mdss_request_key
;
130 static int msm_mdss_irqdomain_map(struct irq_domain
*domain
,
131 unsigned int irq
, irq_hw_number_t hwirq
)
133 struct msm_mdss
*msm_mdss
= domain
->host_data
;
135 irq_set_lockdep_class(irq
, &msm_mdss_lock_key
, &msm_mdss_request_key
);
136 irq_set_chip_and_handler(irq
, &msm_mdss_irq_chip
, handle_level_irq
);
138 return irq_set_chip_data(irq
, msm_mdss
);
141 static const struct irq_domain_ops msm_mdss_irqdomain_ops
= {
142 .map
= msm_mdss_irqdomain_map
,
143 .xlate
= irq_domain_xlate_onecell
,
146 static int _msm_mdss_irq_domain_add(struct msm_mdss
*msm_mdss
)
149 struct irq_domain
*domain
;
153 domain
= irq_domain_add_linear(dev
->of_node
, 32,
154 &msm_mdss_irqdomain_ops
, msm_mdss
);
156 dev_err(dev
, "failed to add irq_domain\n");
160 msm_mdss
->irq_controller
.enabled_mask
= 0;
161 msm_mdss
->irq_controller
.domain
= domain
;
166 static void msm_mdss_setup_ubwc_dec_20(struct msm_mdss
*msm_mdss
)
168 const struct msm_mdss_data
*data
= msm_mdss
->mdss_data
;
170 writel_relaxed(data
->ubwc_static
, msm_mdss
->mmio
+ REG_MDSS_UBWC_STATIC
);
173 static void msm_mdss_setup_ubwc_dec_30(struct msm_mdss
*msm_mdss
)
175 const struct msm_mdss_data
*data
= msm_mdss
->mdss_data
;
176 u32 value
= (data
->ubwc_swizzle
& 0x1) |
177 (data
->highest_bank_bit
& 0x3) << 4 |
178 (data
->macrotile_mode
& 0x1) << 12;
180 if (data
->ubwc_enc_version
== UBWC_3_0
)
183 if (data
->ubwc_enc_version
== UBWC_1_0
)
186 writel_relaxed(value
, msm_mdss
->mmio
+ REG_MDSS_UBWC_STATIC
);
189 static void msm_mdss_setup_ubwc_dec_40(struct msm_mdss
*msm_mdss
)
191 const struct msm_mdss_data
*data
= msm_mdss
->mdss_data
;
192 u32 value
= (data
->ubwc_swizzle
& 0x7) |
193 (data
->ubwc_static
& 0x1) << 3 |
194 (data
->highest_bank_bit
& 0x7) << 4 |
195 (data
->macrotile_mode
& 0x1) << 12;
197 writel_relaxed(value
, msm_mdss
->mmio
+ REG_MDSS_UBWC_STATIC
);
199 if (data
->ubwc_enc_version
== UBWC_3_0
) {
200 writel_relaxed(1, msm_mdss
->mmio
+ REG_MDSS_UBWC_CTRL_2
);
201 writel_relaxed(0, msm_mdss
->mmio
+ REG_MDSS_UBWC_PREDICTION_MODE
);
203 if (data
->ubwc_dec_version
== UBWC_4_3
)
204 writel_relaxed(3, msm_mdss
->mmio
+ REG_MDSS_UBWC_CTRL_2
);
206 writel_relaxed(2, msm_mdss
->mmio
+ REG_MDSS_UBWC_CTRL_2
);
207 writel_relaxed(1, msm_mdss
->mmio
+ REG_MDSS_UBWC_PREDICTION_MODE
);
211 #define MDSS_HW_MAJ_MIN \
212 (MDSS_HW_VERSION_MAJOR__MASK | MDSS_HW_VERSION_MINOR__MASK)
214 #define MDSS_HW_MSM8996 0x1007
215 #define MDSS_HW_MSM8937 0x100e
216 #define MDSS_HW_MSM8953 0x1010
217 #define MDSS_HW_MSM8998 0x3000
218 #define MDSS_HW_SDM660 0x3002
219 #define MDSS_HW_SDM630 0x3003
222 * MDP5 platforms use generic qcom,mdp5 compat string, so we have to generate this data
224 static const struct msm_mdss_data
*msm_mdss_generate_mdp5_mdss_data(struct msm_mdss
*mdss
)
226 struct msm_mdss_data
*data
;
229 data
= devm_kzalloc(mdss
->dev
, sizeof(*data
), GFP_KERNEL
);
233 hw_rev
= readl_relaxed(mdss
->mmio
+ REG_MDSS_HW_VERSION
);
234 hw_rev
= FIELD_GET(MDSS_HW_MAJ_MIN
, hw_rev
);
236 if (hw_rev
== MDSS_HW_MSM8996
||
237 hw_rev
== MDSS_HW_MSM8937
||
238 hw_rev
== MDSS_HW_MSM8953
||
239 hw_rev
== MDSS_HW_MSM8998
||
240 hw_rev
== MDSS_HW_SDM660
||
241 hw_rev
== MDSS_HW_SDM630
) {
242 data
->ubwc_dec_version
= UBWC_1_0
;
243 data
->ubwc_enc_version
= UBWC_1_0
;
246 if (hw_rev
== MDSS_HW_MSM8996
||
247 hw_rev
== MDSS_HW_MSM8998
)
248 data
->highest_bank_bit
= 2;
250 data
->highest_bank_bit
= 1;
255 const struct msm_mdss_data
*msm_mdss_get_mdss_data(struct device
*dev
)
257 struct msm_mdss
*mdss
;
260 return ERR_PTR(-EINVAL
);
262 mdss
= dev_get_drvdata(dev
);
265 * We could not do it at the probe time, since hw revision register was
266 * not readable. Fill data structure now for the MDP5 platforms.
268 if (!mdss
->mdss_data
&& mdss
->is_mdp5
)
269 mdss
->mdss_data
= msm_mdss_generate_mdp5_mdss_data(mdss
);
271 return mdss
->mdss_data
;
274 static int msm_mdss_enable(struct msm_mdss
*msm_mdss
)
279 * Several components have AXI clocks that can only be turned on if
280 * the interconnect is enabled (non-zero bandwidth). Let's make sure
281 * that the interconnects are at least at a minimum amount.
283 for (i
= 0; i
< msm_mdss
->num_mdp_paths
; i
++)
284 icc_set_bw(msm_mdss
->mdp_path
[i
], 0, Bps_to_icc(MIN_IB_BW
));
286 if (msm_mdss
->mdss_data
&& msm_mdss
->mdss_data
->reg_bus_bw
)
287 icc_set_bw(msm_mdss
->reg_bus_path
, 0,
288 msm_mdss
->mdss_data
->reg_bus_bw
);
290 icc_set_bw(msm_mdss
->reg_bus_path
, 0,
293 ret
= clk_bulk_prepare_enable(msm_mdss
->num_clocks
, msm_mdss
->clocks
);
295 dev_err(msm_mdss
->dev
, "clock enable failed, ret:%d\n", ret
);
300 * Register access requires MDSS_MDP_CLK, which is not enabled by the
301 * mdss on mdp5 hardware. Skip it for now.
303 if (msm_mdss
->is_mdp5
|| !msm_mdss
->mdss_data
)
307 * ubwc config is part of the "mdss" region which is not accessible
308 * from the rest of the driver. hardcode known configurations here
310 * Decoder version can be read from the UBWC_DEC_HW_VERSION reg,
311 * UBWC_n and the rest of params comes from hw data.
313 switch (msm_mdss
->mdss_data
->ubwc_dec_version
) {
314 case 0: /* no UBWC */
319 msm_mdss_setup_ubwc_dec_20(msm_mdss
);
322 msm_mdss_setup_ubwc_dec_30(msm_mdss
);
326 msm_mdss_setup_ubwc_dec_40(msm_mdss
);
329 dev_err(msm_mdss
->dev
, "Unsupported UBWC decoder version %x\n",
330 msm_mdss
->mdss_data
->ubwc_dec_version
);
331 dev_err(msm_mdss
->dev
, "HW_REV: 0x%x\n",
332 readl_relaxed(msm_mdss
->mmio
+ REG_MDSS_HW_VERSION
));
333 dev_err(msm_mdss
->dev
, "UBWC_DEC_HW_VERSION: 0x%x\n",
334 readl_relaxed(msm_mdss
->mmio
+ REG_MDSS_UBWC_DEC_HW_VERSION
));
341 static int msm_mdss_disable(struct msm_mdss
*msm_mdss
)
345 clk_bulk_disable_unprepare(msm_mdss
->num_clocks
, msm_mdss
->clocks
);
347 for (i
= 0; i
< msm_mdss
->num_mdp_paths
; i
++)
348 icc_set_bw(msm_mdss
->mdp_path
[i
], 0, 0);
350 if (msm_mdss
->reg_bus_path
)
351 icc_set_bw(msm_mdss
->reg_bus_path
, 0, 0);
356 static void msm_mdss_destroy(struct msm_mdss
*msm_mdss
)
358 struct platform_device
*pdev
= to_platform_device(msm_mdss
->dev
);
361 pm_runtime_suspend(msm_mdss
->dev
);
362 pm_runtime_disable(msm_mdss
->dev
);
363 irq_domain_remove(msm_mdss
->irq_controller
.domain
);
364 msm_mdss
->irq_controller
.domain
= NULL
;
365 irq
= platform_get_irq(pdev
, 0);
366 irq_set_chained_handler_and_data(irq
, NULL
, NULL
);
369 static int msm_mdss_reset(struct device
*dev
)
371 struct reset_control
*reset
;
373 reset
= reset_control_get_optional_exclusive(dev
, NULL
);
375 /* Optional reset not specified */
377 } else if (IS_ERR(reset
)) {
378 return dev_err_probe(dev
, PTR_ERR(reset
),
379 "failed to acquire mdss reset\n");
382 reset_control_assert(reset
);
384 * Tests indicate that reset has to be held for some period of time,
385 * make it one frame in a typical system
388 reset_control_deassert(reset
);
390 reset_control_put(reset
);
396 * MDP5 MDSS uses at most three specified clocks.
398 #define MDP5_MDSS_NUM_CLOCKS 3
399 static int mdp5_mdss_parse_clock(struct platform_device
*pdev
, struct clk_bulk_data
**clocks
)
401 struct clk_bulk_data
*bulk
;
408 bulk
= devm_kcalloc(&pdev
->dev
, MDP5_MDSS_NUM_CLOCKS
, sizeof(struct clk_bulk_data
), GFP_KERNEL
);
412 bulk
[num_clocks
++].id
= "iface";
413 bulk
[num_clocks
++].id
= "bus";
414 bulk
[num_clocks
++].id
= "vsync";
416 ret
= devm_clk_bulk_get_optional(&pdev
->dev
, num_clocks
, bulk
);
425 static struct msm_mdss
*msm_mdss_init(struct platform_device
*pdev
, bool is_mdp5
)
427 struct msm_mdss
*msm_mdss
;
431 ret
= msm_mdss_reset(&pdev
->dev
);
435 msm_mdss
= devm_kzalloc(&pdev
->dev
, sizeof(*msm_mdss
), GFP_KERNEL
);
437 return ERR_PTR(-ENOMEM
);
439 msm_mdss
->mdss_data
= of_device_get_match_data(&pdev
->dev
);
441 msm_mdss
->mmio
= devm_platform_ioremap_resource_byname(pdev
, is_mdp5
? "mdss_phys" : "mdss");
442 if (IS_ERR(msm_mdss
->mmio
))
443 return ERR_CAST(msm_mdss
->mmio
);
445 dev_dbg(&pdev
->dev
, "mapped mdss address space @%pK\n", msm_mdss
->mmio
);
447 ret
= msm_mdss_parse_data_bus_icc_path(&pdev
->dev
, msm_mdss
);
452 ret
= mdp5_mdss_parse_clock(pdev
, &msm_mdss
->clocks
);
454 ret
= devm_clk_bulk_get_all(&pdev
->dev
, &msm_mdss
->clocks
);
456 dev_err(&pdev
->dev
, "failed to parse clocks, ret=%d\n", ret
);
459 msm_mdss
->num_clocks
= ret
;
460 msm_mdss
->is_mdp5
= is_mdp5
;
462 msm_mdss
->dev
= &pdev
->dev
;
464 irq
= platform_get_irq(pdev
, 0);
468 ret
= _msm_mdss_irq_domain_add(msm_mdss
);
472 irq_set_chained_handler_and_data(irq
, msm_mdss_irq
,
475 pm_runtime_enable(&pdev
->dev
);
480 static int __maybe_unused
mdss_runtime_suspend(struct device
*dev
)
482 struct msm_mdss
*mdss
= dev_get_drvdata(dev
);
486 return msm_mdss_disable(mdss
);
489 static int __maybe_unused
mdss_runtime_resume(struct device
*dev
)
491 struct msm_mdss
*mdss
= dev_get_drvdata(dev
);
495 return msm_mdss_enable(mdss
);
498 static int __maybe_unused
mdss_pm_suspend(struct device
*dev
)
501 if (pm_runtime_suspended(dev
))
504 return mdss_runtime_suspend(dev
);
507 static int __maybe_unused
mdss_pm_resume(struct device
*dev
)
509 if (pm_runtime_suspended(dev
))
512 return mdss_runtime_resume(dev
);
515 static const struct dev_pm_ops mdss_pm_ops
= {
516 SET_SYSTEM_SLEEP_PM_OPS(mdss_pm_suspend
, mdss_pm_resume
)
517 SET_RUNTIME_PM_OPS(mdss_runtime_suspend
, mdss_runtime_resume
, NULL
)
520 static int mdss_probe(struct platform_device
*pdev
)
522 struct msm_mdss
*mdss
;
523 bool is_mdp5
= of_device_is_compatible(pdev
->dev
.of_node
, "qcom,mdss");
524 struct device
*dev
= &pdev
->dev
;
527 mdss
= msm_mdss_init(pdev
, is_mdp5
);
529 return PTR_ERR(mdss
);
531 platform_set_drvdata(pdev
, mdss
);
534 * MDP5/DPU based devices don't have a flat hierarchy. There is a top
535 * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
536 * Populate the children devices, find the MDP5/DPU node, and then add
537 * the interfaces to our components list.
539 ret
= of_platform_populate(dev
->of_node
, NULL
, NULL
, dev
);
541 DRM_DEV_ERROR(dev
, "failed to populate children devices\n");
542 msm_mdss_destroy(mdss
);
549 static void mdss_remove(struct platform_device
*pdev
)
551 struct msm_mdss
*mdss
= platform_get_drvdata(pdev
);
553 of_platform_depopulate(&pdev
->dev
);
555 msm_mdss_destroy(mdss
);
558 static const struct msm_mdss_data msm8998_data
= {
559 .ubwc_enc_version
= UBWC_1_0
,
560 .ubwc_dec_version
= UBWC_1_0
,
561 .highest_bank_bit
= 2,
565 static const struct msm_mdss_data qcm2290_data
= {
567 .highest_bank_bit
= 0x2,
571 static const struct msm_mdss_data sa8775p_data
= {
572 .ubwc_enc_version
= UBWC_4_0
,
573 .ubwc_dec_version
= UBWC_4_0
,
576 .highest_bank_bit
= 0,
581 static const struct msm_mdss_data sc7180_data
= {
582 .ubwc_enc_version
= UBWC_2_0
,
583 .ubwc_dec_version
= UBWC_2_0
,
585 .highest_bank_bit
= 0x1,
589 static const struct msm_mdss_data sc7280_data
= {
590 .ubwc_enc_version
= UBWC_3_0
,
591 .ubwc_dec_version
= UBWC_4_0
,
594 .highest_bank_bit
= 1,
599 static const struct msm_mdss_data sc8180x_data
= {
600 .ubwc_enc_version
= UBWC_3_0
,
601 .ubwc_dec_version
= UBWC_3_0
,
602 .highest_bank_bit
= 3,
607 static const struct msm_mdss_data sc8280xp_data
= {
608 .ubwc_enc_version
= UBWC_4_0
,
609 .ubwc_dec_version
= UBWC_4_0
,
612 .highest_bank_bit
= 3,
617 static const struct msm_mdss_data sdm670_data
= {
618 .ubwc_enc_version
= UBWC_2_0
,
619 .ubwc_dec_version
= UBWC_2_0
,
620 .highest_bank_bit
= 1,
624 static const struct msm_mdss_data sdm845_data
= {
625 .ubwc_enc_version
= UBWC_2_0
,
626 .ubwc_dec_version
= UBWC_2_0
,
627 .highest_bank_bit
= 2,
631 static const struct msm_mdss_data sm6350_data
= {
632 .ubwc_enc_version
= UBWC_2_0
,
633 .ubwc_dec_version
= UBWC_2_0
,
636 .highest_bank_bit
= 1,
640 static const struct msm_mdss_data sm7150_data
= {
641 .ubwc_enc_version
= UBWC_2_0
,
642 .ubwc_dec_version
= UBWC_2_0
,
643 .highest_bank_bit
= 1,
647 static const struct msm_mdss_data sm8150_data
= {
648 .ubwc_enc_version
= UBWC_3_0
,
649 .ubwc_dec_version
= UBWC_3_0
,
650 .highest_bank_bit
= 2,
654 static const struct msm_mdss_data sm6115_data
= {
655 .ubwc_enc_version
= UBWC_1_0
,
656 .ubwc_dec_version
= UBWC_2_0
,
658 .ubwc_static
= 0x11f,
659 .highest_bank_bit
= 0x1,
663 static const struct msm_mdss_data sm6125_data
= {
664 .ubwc_enc_version
= UBWC_1_0
,
665 .ubwc_dec_version
= UBWC_3_0
,
667 .highest_bank_bit
= 1,
670 static const struct msm_mdss_data sm8250_data
= {
671 .ubwc_enc_version
= UBWC_4_0
,
672 .ubwc_dec_version
= UBWC_4_0
,
675 /* TODO: highest_bank_bit = 2 for LP_DDR4 */
676 .highest_bank_bit
= 3,
681 static const struct msm_mdss_data sm8350_data
= {
682 .ubwc_enc_version
= UBWC_4_0
,
683 .ubwc_dec_version
= UBWC_4_0
,
686 /* TODO: highest_bank_bit = 2 for LP_DDR4 */
687 .highest_bank_bit
= 3,
692 static const struct msm_mdss_data sm8550_data
= {
693 .ubwc_enc_version
= UBWC_4_0
,
694 .ubwc_dec_version
= UBWC_4_3
,
697 /* TODO: highest_bank_bit = 2 for LP_DDR4 */
698 .highest_bank_bit
= 3,
703 static const struct msm_mdss_data x1e80100_data
= {
704 .ubwc_enc_version
= UBWC_4_0
,
705 .ubwc_dec_version
= UBWC_4_3
,
708 /* TODO: highest_bank_bit = 2 for LP_DDR4 */
709 .highest_bank_bit
= 3,
711 /* TODO: Add reg_bus_bw with real value */
714 static const struct of_device_id mdss_dt_match
[] = {
715 { .compatible
= "qcom,mdss" },
716 { .compatible
= "qcom,msm8998-mdss", .data
= &msm8998_data
},
717 { .compatible
= "qcom,qcm2290-mdss", .data
= &qcm2290_data
},
718 { .compatible
= "qcom,sa8775p-mdss", .data
= &sa8775p_data
},
719 { .compatible
= "qcom,sdm670-mdss", .data
= &sdm670_data
},
720 { .compatible
= "qcom,sdm845-mdss", .data
= &sdm845_data
},
721 { .compatible
= "qcom,sc7180-mdss", .data
= &sc7180_data
},
722 { .compatible
= "qcom,sc7280-mdss", .data
= &sc7280_data
},
723 { .compatible
= "qcom,sc8180x-mdss", .data
= &sc8180x_data
},
724 { .compatible
= "qcom,sc8280xp-mdss", .data
= &sc8280xp_data
},
725 { .compatible
= "qcom,sm6115-mdss", .data
= &sm6115_data
},
726 { .compatible
= "qcom,sm6125-mdss", .data
= &sm6125_data
},
727 { .compatible
= "qcom,sm6350-mdss", .data
= &sm6350_data
},
728 { .compatible
= "qcom,sm6375-mdss", .data
= &sm6350_data
},
729 { .compatible
= "qcom,sm7150-mdss", .data
= &sm7150_data
},
730 { .compatible
= "qcom,sm8150-mdss", .data
= &sm8150_data
},
731 { .compatible
= "qcom,sm8250-mdss", .data
= &sm8250_data
},
732 { .compatible
= "qcom,sm8350-mdss", .data
= &sm8350_data
},
733 { .compatible
= "qcom,sm8450-mdss", .data
= &sm8350_data
},
734 { .compatible
= "qcom,sm8550-mdss", .data
= &sm8550_data
},
735 { .compatible
= "qcom,sm8650-mdss", .data
= &sm8550_data
},
736 { .compatible
= "qcom,x1e80100-mdss", .data
= &x1e80100_data
},
739 MODULE_DEVICE_TABLE(of
, mdss_dt_match
);
741 static struct platform_driver mdss_platform_driver
= {
743 .remove
= mdss_remove
,
746 .of_match_table
= mdss_dt_match
,
751 void __init
msm_mdss_register(void)
753 platform_driver_register(&mdss_platform_driver
);
756 void __exit
msm_mdss_unregister(void)
758 platform_driver_unregister(&mdss_platform_driver
);