EDAC: i7core, sb_edac: Don't return NOTIFY_BAD from mce_decoder callback
[linux/fpc-iii.git] / drivers / gpu / drm / msm / dsi / phy / dsi_phy.c
blob91a95fb04a4a00394874e0ef2825ead71e2d74d4
1 /*
2 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/platform_device.h>
16 #include "dsi_phy.h"
18 #define S_DIV_ROUND_UP(n, d) \
19 (((n) >= 0) ? (((n) + (d) - 1) / (d)) : (((n) - (d) + 1) / (d)))
21 static inline s32 linear_inter(s32 tmax, s32 tmin, s32 percent,
22 s32 min_result, bool even)
24 s32 v;
26 v = (tmax - tmin) * percent;
27 v = S_DIV_ROUND_UP(v, 100) + tmin;
28 if (even && (v & 0x1))
29 return max_t(s32, min_result, v - 1);
30 else
31 return max_t(s32, min_result, v);
34 static void dsi_dphy_timing_calc_clk_zero(struct msm_dsi_dphy_timing *timing,
35 s32 ui, s32 coeff, s32 pcnt)
37 s32 tmax, tmin, clk_z;
38 s32 temp;
40 /* reset */
41 temp = 300 * coeff - ((timing->clk_prepare >> 1) + 1) * 2 * ui;
42 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
43 if (tmin > 255) {
44 tmax = 511;
45 clk_z = linear_inter(2 * tmin, tmin, pcnt, 0, true);
46 } else {
47 tmax = 255;
48 clk_z = linear_inter(tmax, tmin, pcnt, 0, true);
51 /* adjust */
52 temp = (timing->hs_rqst + timing->clk_prepare + clk_z) & 0x7;
53 timing->clk_zero = clk_z + 8 - temp;
56 int msm_dsi_dphy_timing_calc(struct msm_dsi_dphy_timing *timing,
57 const unsigned long bit_rate, const unsigned long esc_rate)
59 s32 ui, lpx;
60 s32 tmax, tmin;
61 s32 pcnt0 = 10;
62 s32 pcnt1 = (bit_rate > 1200000000) ? 15 : 10;
63 s32 pcnt2 = 10;
64 s32 pcnt3 = (bit_rate > 180000000) ? 10 : 40;
65 s32 coeff = 1000; /* Precision, should avoid overflow */
66 s32 temp;
68 if (!bit_rate || !esc_rate)
69 return -EINVAL;
71 ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
72 lpx = mult_frac(NSEC_PER_MSEC, coeff, esc_rate / 1000);
74 tmax = S_DIV_ROUND_UP(95 * coeff, ui) - 2;
75 tmin = S_DIV_ROUND_UP(38 * coeff, ui) - 2;
76 timing->clk_prepare = linear_inter(tmax, tmin, pcnt0, 0, true);
78 temp = lpx / ui;
79 if (temp & 0x1)
80 timing->hs_rqst = temp;
81 else
82 timing->hs_rqst = max_t(s32, 0, temp - 2);
84 /* Calculate clk_zero after clk_prepare and hs_rqst */
85 dsi_dphy_timing_calc_clk_zero(timing, ui, coeff, pcnt2);
87 temp = 105 * coeff + 12 * ui - 20 * coeff;
88 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
89 tmin = S_DIV_ROUND_UP(60 * coeff, ui) - 2;
90 timing->clk_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
92 temp = 85 * coeff + 6 * ui;
93 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
94 temp = 40 * coeff + 4 * ui;
95 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
96 timing->hs_prepare = linear_inter(tmax, tmin, pcnt1, 0, true);
98 tmax = 255;
99 temp = ((timing->hs_prepare >> 1) + 1) * 2 * ui + 2 * ui;
100 temp = 145 * coeff + 10 * ui - temp;
101 tmin = S_DIV_ROUND_UP(temp, ui) - 2;
102 timing->hs_zero = linear_inter(tmax, tmin, pcnt2, 24, true);
104 temp = 105 * coeff + 12 * ui - 20 * coeff;
105 tmax = S_DIV_ROUND_UP(temp, ui) - 2;
106 temp = 60 * coeff + 4 * ui;
107 tmin = DIV_ROUND_UP(temp, ui) - 2;
108 timing->hs_trail = linear_inter(tmax, tmin, pcnt3, 0, true);
110 tmax = 255;
111 tmin = S_DIV_ROUND_UP(100 * coeff, ui) - 2;
112 timing->hs_exit = linear_inter(tmax, tmin, pcnt2, 0, true);
114 tmax = 63;
115 temp = ((timing->hs_exit >> 1) + 1) * 2 * ui;
116 temp = 60 * coeff + 52 * ui - 24 * ui - temp;
117 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
118 timing->clk_post = linear_inter(tmax, tmin, pcnt2, 0, false);
120 tmax = 63;
121 temp = ((timing->clk_prepare >> 1) + 1) * 2 * ui;
122 temp += ((timing->clk_zero >> 1) + 1) * 2 * ui;
123 temp += 8 * ui + lpx;
124 tmin = S_DIV_ROUND_UP(temp, 8 * ui) - 1;
125 if (tmin > tmax) {
126 temp = linear_inter(2 * tmax, tmin, pcnt2, 0, false);
127 timing->clk_pre = temp >> 1;
128 } else {
129 timing->clk_pre = linear_inter(tmax, tmin, pcnt2, 0, false);
132 timing->ta_go = 3;
133 timing->ta_sure = 0;
134 timing->ta_get = 4;
136 DBG("PHY timings: %d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
137 timing->clk_pre, timing->clk_post, timing->clk_zero,
138 timing->clk_trail, timing->clk_prepare, timing->hs_exit,
139 timing->hs_zero, timing->hs_prepare, timing->hs_trail,
140 timing->hs_rqst);
142 return 0;
145 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
146 u32 bit_mask)
148 int phy_id = phy->id;
149 u32 val;
151 if ((phy_id >= DSI_MAX) || (pll_id >= DSI_MAX))
152 return;
154 val = dsi_phy_read(phy->base + reg);
156 if (phy->cfg->src_pll_truthtable[phy_id][pll_id])
157 dsi_phy_write(phy->base + reg, val | bit_mask);
158 else
159 dsi_phy_write(phy->base + reg, val & (~bit_mask));
162 static int dsi_phy_regulator_init(struct msm_dsi_phy *phy)
164 struct regulator_bulk_data *s = phy->supplies;
165 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
166 struct device *dev = &phy->pdev->dev;
167 int num = phy->cfg->reg_cfg.num;
168 int i, ret;
170 for (i = 0; i < num; i++)
171 s[i].supply = regs[i].name;
173 ret = devm_regulator_bulk_get(dev, num, s);
174 if (ret < 0) {
175 dev_err(dev, "%s: failed to init regulator, ret=%d\n",
176 __func__, ret);
177 return ret;
180 for (i = 0; i < num; i++) {
181 if (regulator_can_change_voltage(s[i].consumer)) {
182 ret = regulator_set_voltage(s[i].consumer,
183 regs[i].min_voltage, regs[i].max_voltage);
184 if (ret < 0) {
185 dev_err(dev,
186 "regulator %d set voltage failed, %d\n",
187 i, ret);
188 return ret;
193 return 0;
196 static void dsi_phy_regulator_disable(struct msm_dsi_phy *phy)
198 struct regulator_bulk_data *s = phy->supplies;
199 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
200 int num = phy->cfg->reg_cfg.num;
201 int i;
203 DBG("");
204 for (i = num - 1; i >= 0; i--)
205 if (regs[i].disable_load >= 0)
206 regulator_set_load(s[i].consumer, regs[i].disable_load);
208 regulator_bulk_disable(num, s);
211 static int dsi_phy_regulator_enable(struct msm_dsi_phy *phy)
213 struct regulator_bulk_data *s = phy->supplies;
214 const struct dsi_reg_entry *regs = phy->cfg->reg_cfg.regs;
215 struct device *dev = &phy->pdev->dev;
216 int num = phy->cfg->reg_cfg.num;
217 int ret, i;
219 DBG("");
220 for (i = 0; i < num; i++) {
221 if (regs[i].enable_load >= 0) {
222 ret = regulator_set_load(s[i].consumer,
223 regs[i].enable_load);
224 if (ret < 0) {
225 dev_err(dev,
226 "regulator %d set op mode failed, %d\n",
227 i, ret);
228 goto fail;
233 ret = regulator_bulk_enable(num, s);
234 if (ret < 0) {
235 dev_err(dev, "regulator enable failed, %d\n", ret);
236 goto fail;
239 return 0;
241 fail:
242 for (i--; i >= 0; i--)
243 regulator_set_load(s[i].consumer, regs[i].disable_load);
244 return ret;
247 static int dsi_phy_enable_resource(struct msm_dsi_phy *phy)
249 struct device *dev = &phy->pdev->dev;
250 int ret;
252 pm_runtime_get_sync(dev);
254 ret = clk_prepare_enable(phy->ahb_clk);
255 if (ret) {
256 dev_err(dev, "%s: can't enable ahb clk, %d\n", __func__, ret);
257 pm_runtime_put_sync(dev);
260 return ret;
263 static void dsi_phy_disable_resource(struct msm_dsi_phy *phy)
265 clk_disable_unprepare(phy->ahb_clk);
266 pm_runtime_put_sync(&phy->pdev->dev);
269 static const struct of_device_id dsi_phy_dt_match[] = {
270 #ifdef CONFIG_DRM_MSM_DSI_28NM_PHY
271 { .compatible = "qcom,dsi-phy-28nm-hpm",
272 .data = &dsi_phy_28nm_hpm_cfgs },
273 { .compatible = "qcom,dsi-phy-28nm-lp",
274 .data = &dsi_phy_28nm_lp_cfgs },
275 #endif
276 #ifdef CONFIG_DRM_MSM_DSI_20NM_PHY
277 { .compatible = "qcom,dsi-phy-20nm",
278 .data = &dsi_phy_20nm_cfgs },
279 #endif
280 #ifdef CONFIG_DRM_MSM_DSI_28NM_8960_PHY
281 { .compatible = "qcom,dsi-phy-28nm-8960",
282 .data = &dsi_phy_28nm_8960_cfgs },
283 #endif
287 static int dsi_phy_driver_probe(struct platform_device *pdev)
289 struct msm_dsi_phy *phy;
290 struct device *dev = &pdev->dev;
291 const struct of_device_id *match;
292 int ret;
294 phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
295 if (!phy)
296 return -ENOMEM;
298 match = of_match_node(dsi_phy_dt_match, dev->of_node);
299 if (!match)
300 return -ENODEV;
302 phy->cfg = match->data;
303 phy->pdev = pdev;
305 ret = of_property_read_u32(dev->of_node,
306 "qcom,dsi-phy-index", &phy->id);
307 if (ret) {
308 dev_err(dev, "%s: PHY index not specified, %d\n",
309 __func__, ret);
310 goto fail;
313 phy->regulator_ldo_mode = of_property_read_bool(dev->of_node,
314 "qcom,dsi-phy-regulator-ldo-mode");
316 phy->base = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
317 if (IS_ERR(phy->base)) {
318 dev_err(dev, "%s: failed to map phy base\n", __func__);
319 ret = -ENOMEM;
320 goto fail;
323 phy->reg_base = msm_ioremap(pdev, "dsi_phy_regulator",
324 "DSI_PHY_REG");
325 if (IS_ERR(phy->reg_base)) {
326 dev_err(dev, "%s: failed to map phy regulator base\n",
327 __func__);
328 ret = -ENOMEM;
329 goto fail;
332 ret = dsi_phy_regulator_init(phy);
333 if (ret) {
334 dev_err(dev, "%s: failed to init regulator\n", __func__);
335 goto fail;
338 phy->ahb_clk = devm_clk_get(dev, "iface_clk");
339 if (IS_ERR(phy->ahb_clk)) {
340 dev_err(dev, "%s: Unable to get ahb clk\n", __func__);
341 ret = PTR_ERR(phy->ahb_clk);
342 goto fail;
345 /* PLL init will call into clk_register which requires
346 * register access, so we need to enable power and ahb clock.
348 ret = dsi_phy_enable_resource(phy);
349 if (ret)
350 goto fail;
352 phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
353 if (!phy->pll)
354 dev_info(dev,
355 "%s: pll init failed, need separate pll clk driver\n",
356 __func__);
358 dsi_phy_disable_resource(phy);
360 platform_set_drvdata(pdev, phy);
362 return 0;
364 fail:
365 return ret;
368 static int dsi_phy_driver_remove(struct platform_device *pdev)
370 struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
372 if (phy && phy->pll) {
373 msm_dsi_pll_destroy(phy->pll);
374 phy->pll = NULL;
377 platform_set_drvdata(pdev, NULL);
379 return 0;
382 static struct platform_driver dsi_phy_platform_driver = {
383 .probe = dsi_phy_driver_probe,
384 .remove = dsi_phy_driver_remove,
385 .driver = {
386 .name = "msm_dsi_phy",
387 .of_match_table = dsi_phy_dt_match,
391 void __init msm_dsi_phy_driver_register(void)
393 platform_driver_register(&dsi_phy_platform_driver);
396 void __exit msm_dsi_phy_driver_unregister(void)
398 platform_driver_unregister(&dsi_phy_platform_driver);
401 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
402 const unsigned long bit_rate, const unsigned long esc_rate)
404 struct device *dev = &phy->pdev->dev;
405 int ret;
407 if (!phy || !phy->cfg->ops.enable)
408 return -EINVAL;
410 ret = dsi_phy_regulator_enable(phy);
411 if (ret) {
412 dev_err(dev, "%s: regulator enable failed, %d\n",
413 __func__, ret);
414 return ret;
417 ret = phy->cfg->ops.enable(phy, src_pll_id, bit_rate, esc_rate);
418 if (ret) {
419 dev_err(dev, "%s: phy enable failed, %d\n", __func__, ret);
420 dsi_phy_regulator_disable(phy);
421 return ret;
424 return 0;
427 void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
429 if (!phy || !phy->cfg->ops.disable)
430 return;
432 phy->cfg->ops.disable(phy);
434 dsi_phy_regulator_disable(phy);
437 void msm_dsi_phy_get_clk_pre_post(struct msm_dsi_phy *phy,
438 u32 *clk_pre, u32 *clk_post)
440 if (!phy)
441 return;
443 if (clk_pre)
444 *clk_pre = phy->timing.clk_pre;
445 if (clk_post)
446 *clk_post = phy->timing.clk_post;
449 struct msm_dsi_pll *msm_dsi_phy_get_pll(struct msm_dsi_phy *phy)
451 if (!phy)
452 return NULL;
454 return phy->pll;