Linux 4.2.1
[linux/fpc-iii.git] / drivers / gpu / drm / msm / dsi / dsi_phy.c
blob2d3b33ce1cc54f68ac117699c39bba64e3ce2087
1 /*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/platform_device.h>
15 #include <linux/regulator/consumer.h>
17 #include "dsi.h"
18 #include "dsi.xml.h"
20 #define dsi_phy_read(offset) msm_readl((offset))
21 #define dsi_phy_write(offset, data) msm_writel((data), (offset))
23 struct dsi_phy_ops {
24 int (*enable)(struct msm_dsi_phy *phy, bool is_dual_panel,
25 const unsigned long bit_rate, const unsigned long esc_rate);
26 int (*disable)(struct msm_dsi_phy *phy);
29 struct dsi_phy_cfg {
30 enum msm_dsi_phy_type type;
31 struct dsi_reg_config reg_cfg;
32 struct dsi_phy_ops ops;
35 struct dsi_dphy_timing {
36 u32 clk_pre;
37 u32 clk_post;
38 u32 clk_zero;
39 u32 clk_trail;
40 u32 clk_prepare;
41 u32 hs_exit;
42 u32 hs_zero;
43 u32 hs_prepare;
44 u32 hs_trail;
45 u32 hs_rqst;
46 u32 ta_go;
47 u32 ta_sure;
48 u32 ta_get;
51 struct msm_dsi_phy {
52 struct platform_device *pdev;
53 void __iomem *base;
54 void __iomem *reg_base;
55 int id;
57 struct clk *ahb_clk;
58 struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
60 struct dsi_dphy_timing timing;
61 const struct dsi_phy_cfg *cfg;
63 struct msm_dsi_pll *pll;
66 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
68 struct regulator_bulk_data *s = phy->supplies;
69 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
70 struct device *dev = &phy->pdev->dev;
71 int num = phy->cfg->reg_cfg.num;
72 int i, ret;
74 for (i = 0; i < num; i++)
75 s[i].supply = regs[i].name;
77 ret = devm_regulator_bulk_get(&phy->pdev->dev, num, s);
78 if (ret < 0) {
79 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
80 __func__, ret);
81 return ret;
84 for (i = 0; i < num; i++) {
85 if ((regs[i].min_voltage >= 0) && (regs[i].max_voltage >= 0)) {
86 ret = regulator_set_voltage(s[i].consumer,
87 regs[i].min_voltage, regs[i].max_voltage);
88 if (ret < 0) {
89 dev_err(dev,
90 "regulator %d set voltage failed, %d\n",
91 i, ret);
92 return ret;
97 return 0;
100 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
102 struct regulator_bulk_data *s = phy->supplies;
103 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
104 int num = phy->cfg->reg_cfg.num;
105 int i;
107 DBG("");
108 for (i = num - 1; i >= 0; i--)
109 if (regs[i].disable_load >= 0)
110 regulator_set_load(s[i].consumer,
111 regs[i].disable_load);
113 regulator_bulk_disable(num, s);
116 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
118 struct regulator_bulk_data *s = phy->supplies;
119 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
120 struct device *dev = &phy->pdev->dev;
121 int num = phy->cfg->reg_cfg.num;
122 int ret, i;
124 DBG("");
125 for (i = 0; i < num; i++) {
126 if (regs[i].enable_load >= 0) {
127 ret = regulator_set_load(s[i].consumer,
128 regs[i].enable_load);
129 if (ret < 0) {
130 dev_err(dev,
131 "regulator %d set op mode failed, %d\n",
132 i, ret);
133 goto fail;
138 ret = regulator_bulk_enable(num, s);
139 if (ret < 0) {
140 dev_err(dev, "regulator enable failed, %d\n", ret);
141 goto fail;
144 return 0;
146 fail:
147 for (i--; i >= 0; i--)
148 regulator_set_load(s[i].consumer, regs[i].disable_load);
149 return ret;
152 #define S_DIV_ROUND_UP(n, d) \
153 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
155 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
156 s32 min_result, bool even)
158 s32 v;
159 v = (tmax - tmin) * percent;
160 v = S_DIV_ROUND_UP(v, 100) + tmin;
161 if (even && (v & 0x1))
162 return max_t(s32, min_result, v - 1);
163 else
164 return max_t(s32, min_result, v);
167 static void dsi_dphy_timing_calc_clk_zero(struct dsi_dphy_timing *timing,
168 s32 ui, s32 coeff, s32 pcnt)
170 s32 tmax, tmin, clk_z;
171 s32 temp;
173 /* reset */
174 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
175 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
176 if (tmin > 255) {
177 tmax = 511;
178 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
179 } else {
180 tmax = 255;
181 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
184 /* adjust */
185 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
186 timing->clk_zero = clk_z + 8 - temp;
189 static int dsi_dphy_timing_calc(struct dsi_dphy_timing *timing,
190 const unsigned long bit_rate, const unsigned long esc_rate)
192 s32 ui, lpx;
193 s32 tmax, tmin;
194 s32 pcnt0 = 10;
195 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
196 s32 pcnt2 = 10;
197 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
198 s32 coeff = 1000; /* Precision, should avoid overflow */
199 s32 temp;
201 if (!bit_rate || !esc_rate)
202 return -EINVAL;
204 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
205 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
207 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
208 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
209 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
211 temp = lpx / ui;
212 if (temp & 0x1)
213 timing->hs_rqst = temp;
214 else
215 timing->hs_rqst = max_t(s32, 0, temp - 2);
217 /* Calculate clk_zero after clk_prepare and hs_rqst */
218 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
220 temp = 105 * coeff + 12 * ui - 20 * coeff;
221 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
222 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
223 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
225 temp = 85 * coeff + 6 * ui;
226 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
227 temp = 40 * coeff + 4 * ui;
228 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
229 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
231 tmax = 255;
232 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
233 temp = 145 * coeff + 10 * ui - temp;
234 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
235 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
237 temp = 105 * coeff + 12 * ui - 20 * coeff;
238 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
239 temp = 60 * coeff + 4 * ui;
240 tmin = DIV_ROUND_UP(temp, ui) - 2;
241 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
243 tmax = 255;
244 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
245 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
247 tmax = 63;
248 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
249 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
250 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
251 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
253 tmax = 63;
254 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
255 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
256 temp += 8 * ui + lpx;
257 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
258 if (tmin > tmax) {
259 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false) >> 1;
260 timing->clk_pre = temp >> 1;
261 temp = (2 * tmax - tmin) * pcnt2;
262 } else {
263 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
266 timing->ta_go = 3;
267 timing->ta_sure = 0;
268 timing->ta_get = 4;
270 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
271 timing->clk_pre, timing->clk_post, timing->clk_zero,
272 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
273 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
274 timing->hs_rqst);
276 return 0;
279 static void dsi_28nm_phy_regulator_ctrl(struct msm_dsi_phy *phy, bool enable)
281 void __iomem *base = phy->reg_base;
283 if (!enable) {
284 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 0);
285 return;
288 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x0);
289 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CAL_PWR_CFG, 1);
290 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_5, 0);
291 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_3, 0);
292 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_2, 0x3);
293 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_1, 0x9);
294 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_0, 0x7);
295 dsi_phy_write(base + REG_DSI_28nm_PHY_REGULATOR_CTRL_4, 0x20);
298 static int dsi_28nm_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
299 const unsigned long bit_rate, const unsigned long esc_rate)
301 struct dsi_dphy_timing *timing = &phy->timing;
302 int i;
303 void __iomem *base = phy->base;
305 DBG("");
307 if (dsi_dphy_timing_calc(timing, bit_rate, esc_rate)) {
308 pr_err("%s: D-PHY timing calculation failed\n", __func__);
309 return -EINVAL;
312 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_0, 0xff);
314 dsi_28nm_phy_regulator_ctrl(phy, true);
316 dsi_phy_write(base + REG_DSI_28nm_PHY_LDO_CNTRL, 0x00);
318 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_0,
319 DSI_28nm_PHY_TIMING_CTRL_0_CLK_ZERO(timing->clk_zero));
320 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_1,
321 DSI_28nm_PHY_TIMING_CTRL_1_CLK_TRAIL(timing->clk_trail));
322 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_2,
323 DSI_28nm_PHY_TIMING_CTRL_2_CLK_PREPARE(timing->clk_prepare));
324 if (timing->clk_zero & BIT(8))
325 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_3,
326 DSI_28nm_PHY_TIMING_CTRL_3_CLK_ZERO_8);
327 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_4,
328 DSI_28nm_PHY_TIMING_CTRL_4_HS_EXIT(timing->hs_exit));
329 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_5,
330 DSI_28nm_PHY_TIMING_CTRL_5_HS_ZERO(timing->hs_zero));
331 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_6,
332 DSI_28nm_PHY_TIMING_CTRL_6_HS_PREPARE(timing->hs_prepare));
333 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_7,
334 DSI_28nm_PHY_TIMING_CTRL_7_HS_TRAIL(timing->hs_trail));
335 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_8,
336 DSI_28nm_PHY_TIMING_CTRL_8_HS_RQST(timing->hs_rqst));
337 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_9,
338 DSI_28nm_PHY_TIMING_CTRL_9_TA_GO(timing->ta_go) |
339 DSI_28nm_PHY_TIMING_CTRL_9_TA_SURE(timing->ta_sure));
340 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_10,
341 DSI_28nm_PHY_TIMING_CTRL_10_TA_GET(timing->ta_get));
342 dsi_phy_write(base + REG_DSI_28nm_PHY_TIMING_CTRL_11,
343 DSI_28nm_PHY_TIMING_CTRL_11_TRIG3_CMD(0));
345 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_1, 0x00);
346 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
348 dsi_phy_write(base + REG_DSI_28nm_PHY_STRENGTH_1, 0x6);
350 for (i = 0; i < 4; i++) {
351 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_0(i), 0);
352 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_1(i), 0);
353 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_2(i), 0);
354 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_3(i), 0);
355 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_DATAPATH(i), 0);
356 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_DEBUG_SEL(i), 0);
357 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_0(i), 0x1);
358 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_TEST_STR_1(i), 0x97);
360 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(0), 0);
361 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(1), 0x5);
362 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(2), 0xa);
363 dsi_phy_write(base + REG_DSI_28nm_PHY_LN_CFG_4(3), 0xf);
365 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_CFG_1, 0xc0);
366 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR0, 0x1);
367 dsi_phy_write(base + REG_DSI_28nm_PHY_LNCK_TEST_STR1, 0xbb);
369 dsi_phy_write(base + REG_DSI_28nm_PHY_CTRL_0, 0x5f);
371 if (is_dual_panel && (phy->id != DSI_CLOCK_MASTER))
372 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x00);
373 else
374 dsi_phy_write(base + REG_DSI_28nm_PHY_GLBL_TEST_CTRL, 0x01);
376 return 0;
379 static int dsi_28nm_phy_disable(struct msm_dsi_phy *phy)
381 dsi_phy_write(phy->base + REG_DSI_28nm_PHY_CTRL_0, 0);
382 dsi_28nm_phy_regulator_ctrl(phy, false);
385 * Wait for the registers writes to complete in order to
386 * ensure that the phy is completely disabled
388 wmb();
390 return 0;
393 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
395 int ret;
397 pm_runtime_get_sync(&phy->pdev->dev);
399 ret = clk_prepare_enable(phy->ahb_clk);
400 if (ret) {
401 pr_err("%s: can't enable ahb clk, %d\n", __func__, ret);
402 pm_runtime_put_sync(&phy->pdev->dev);
405 return ret;
408 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
410 clk_disable_unprepare(phy->ahb_clk);
411 pm_runtime_put_sync(&phy->pdev->dev);
414 static const struct dsi_phy_cfg dsi_phy_cfgs[MSM_DSI_PHY_MAX] = {
415 [MSM_DSI_PHY_28NM_HPM] = {
416 .type = MSM_DSI_PHY_28NM_HPM,
417 .reg_cfg = {
418 .num = 1,
419 .regs = {
420 {"vddio", 1800000, 1800000, 100000, 100},
423 .ops = {
424 .enable = dsi_28nm_phy_enable,
425 .disable = dsi_28nm_phy_disable,
428 [MSM_DSI_PHY_28NM_LP] = {
429 .type = MSM_DSI_PHY_28NM_LP,
430 .reg_cfg = {
431 .num = 1,
432 .regs = {
433 {"vddio", 1800000, 1800000, 100000, 100},
436 .ops = {
437 .enable = dsi_28nm_phy_enable,
438 .disable = dsi_28nm_phy_disable,
443 static const struct of_device_id dsi_phy_dt_match[] = {
444 { .compatible = "qcom,dsi-phy-28nm-hpm",
445 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_HPM],},
446 { .compatible = "qcom,dsi-phy-28nm-lp",
447 .data = &dsi_phy_cfgs[MSM_DSI_PHY_28NM_LP],},
451 static int dsi_phy_driver_probe(struct platform_device *pdev)
453 struct msm_dsi_phy *phy;
454 const struct of_device_id *match;
455 int ret;
457 phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL);
458 if (!phy)
459 return -ENOMEM;
461 match = of_match_node(dsi_phy_dt_match, pdev->dev.of_node);
462 if (!match)
463 return -ENODEV;
465 phy->cfg = match->data;
466 phy->pdev = pdev;
468 ret = of_property_read_u32(pdev->dev.of_node,
469 "qcom,dsi-phy-index", &phy->id);
470 if (ret) {
471 dev_err(&pdev->dev,
472 "%s: PHY index not specified, ret=%d\n",
473 __func__, ret);
474 goto fail;
477 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
478 if (IS_ERR(phy->base)) {
479 dev_err(&pdev->dev, "%s: failed to map phy base\n", __func__);
480 ret = -ENOMEM;
481 goto fail;
483 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator", "DSI_PHY_REG");
484 if (IS_ERR(phy->reg_base)) {
485 dev_err(&pdev->dev,
486 "%s: failed to map phy regulator base\n", __func__);
487 ret = -ENOMEM;
488 goto fail;
491 ret = dsi_phy_regulator_init(phy);
492 if (ret) {
493 dev_err(&pdev->dev, "%s: failed to init regulator\n", __func__);
494 goto fail;
497 phy->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
498 if (IS_ERR(phy->ahb_clk)) {
499 pr_err("%s: Unable to get ahb clk\n", __func__);
500 ret = PTR_ERR(phy->ahb_clk);
501 goto fail;
504 /* PLL init will call into clk_register which requires
505 * register access, so we need to enable power and ahb clock.
507 ret = dsi_phy_enable_resource(phy);
508 if (ret)
509 goto fail;
511 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
512 if (!phy->pll)
513 dev_info(&pdev->dev,
514 "%s: pll init failed, need separate pll clk driver\n",
515 __func__);
517 dsi_phy_disable_resource(phy);
519 platform_set_drvdata(pdev, phy);
521 return 0;
523 fail:
524 return ret;
527 static int dsi_phy_driver_remove(struct platform_device *pdev)
529 struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
531 if (phy && phy->pll) {
532 msm_dsi_pll_destroy(phy->pll);
533 phy->pll = NULL;
536 platform_set_drvdata(pdev, NULL);
538 return 0;
541 static struct platform_driver dsi_phy_platform_driver = {
542 .probe = dsi_phy_driver_probe,
543 .remove = dsi_phy_driver_remove,
544 .driver = {
545 .name = "msm_dsi_phy",
546 .of_match_table = dsi_phy_dt_match,
550 void __init msm_dsi_phy_driver_register(void)
552 platform_driver_register(&dsi_phy_platform_driver);
555 void __exit msm_dsi_phy_driver_unregister(void)
557 platform_driver_unregister(&dsi_phy_platform_driver);
560 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, bool is_dual_panel,
561 const unsigned long bit_rate, const unsigned long esc_rate)
563 int ret;
565 if (!phy || !phy->cfg->ops.enable)
566 return -EINVAL;
568 ret = dsi_phy_regulator_enable(phy);
569 if (ret) {
570 dev_err(&phy->pdev->dev, "%s: regulator enable failed, %d\n",
571 __func__, ret);
572 return ret;
575 return phy->cfg->ops.enable(phy, is_dual_panel, bit_rate, esc_rate);
578 int msm_dsi_phy_disable(struct msm_dsi_phy *phy)
580 if (!phy || !phy->cfg->ops.disable)
581 return -EINVAL;
583 phy->cfg->ops.disable(phy);
584 dsi_phy_regulator_disable(phy);
586 return 0;
589 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
590 u32 *clk_pre, u32 *clk_post)
592 if (!phy)
593 return;
594 if (clk_pre)
595 *clk_pre = phy->timing.clk_pre;
596 if (clk_post)
597 *clk_post = phy->timing.clk_post;
600 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
602 if (!phy)
603 return NULL;
605 return phy->pll;