treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / clk / qcom / clk-rcg2.c
blob8f4b9bec29565b0ca93f8f0e1c6ce60c5338b3eb
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2013, 2018, The Linux Foundation. All rights reserved.
4 */
6 #include <linux/kernel.h>
7 #include <linux/bitops.h>
8 #include <linux/err.h>
9 #include <linux/bug.h>
10 #include <linux/export.h>
11 #include <linux/clk-provider.h>
12 #include <linux/delay.h>
13 #include <linux/regmap.h>
14 #include <linux/math64.h>
15 #include <linux/slab.h>
17 #include <asm/div64.h>
19 #include "clk-rcg.h"
20 #include "common.h"
22 #define CMD_REG 0x0
23 #define CMD_UPDATE BIT(0)
24 #define CMD_ROOT_EN BIT(1)
25 #define CMD_DIRTY_CFG BIT(4)
26 #define CMD_DIRTY_N BIT(5)
27 #define CMD_DIRTY_M BIT(6)
28 #define CMD_DIRTY_D BIT(7)
29 #define CMD_ROOT_OFF BIT(31)
31 #define CFG_REG 0x4
32 #define CFG_SRC_DIV_SHIFT 0
33 #define CFG_SRC_SEL_SHIFT 8
34 #define CFG_SRC_SEL_MASK (0x7 << CFG_SRC_SEL_SHIFT)
35 #define CFG_MODE_SHIFT 12
36 #define CFG_MODE_MASK (0x3 << CFG_MODE_SHIFT)
37 #define CFG_MODE_DUAL_EDGE (0x2 << CFG_MODE_SHIFT)
38 #define CFG_HW_CLK_CTRL_MASK BIT(20)
40 #define M_REG 0x8
41 #define N_REG 0xc
42 #define D_REG 0x10
44 #define RCG_CFG_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + CFG_REG)
45 #define RCG_M_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + M_REG)
46 #define RCG_N_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + N_REG)
47 #define RCG_D_OFFSET(rcg) ((rcg)->cmd_rcgr + (rcg)->cfg_off + D_REG)
49 /* Dynamic Frequency Scaling */
50 #define MAX_PERF_LEVEL 8
51 #define SE_CMD_DFSR_OFFSET 0x14
52 #define SE_CMD_DFS_EN BIT(0)
53 #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level))
54 #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level))
55 #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level))
57 enum freq_policy {
58 FLOOR,
59 CEIL,
62 static int clk_rcg2_is_enabled(struct clk_hw *hw)
64 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
65 u32 cmd;
66 int ret;
68 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
69 if (ret)
70 return ret;
72 return (cmd & CMD_ROOT_OFF) == 0;
75 static u8 clk_rcg2_get_parent(struct clk_hw *hw)
77 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
78 int num_parents = clk_hw_get_num_parents(hw);
79 u32 cfg;
80 int i, ret;
82 ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
83 if (ret)
84 goto err;
86 cfg &= CFG_SRC_SEL_MASK;
87 cfg >>= CFG_SRC_SEL_SHIFT;
89 for (i = 0; i < num_parents; i++)
90 if (cfg == rcg->parent_map[i].cfg)
91 return i;
93 err:
94 pr_debug("%s: Clock %s has invalid parent, using default.\n",
95 __func__, clk_hw_get_name(hw));
96 return 0;
99 static int update_config(struct clk_rcg2 *rcg)
101 int count, ret;
102 u32 cmd;
103 struct clk_hw *hw = &rcg->clkr.hw;
104 const char *name = clk_hw_get_name(hw);
106 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
107 CMD_UPDATE, CMD_UPDATE);
108 if (ret)
109 return ret;
111 /* Wait for update to take effect */
112 for (count = 500; count > 0; count--) {
113 ret = regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG, &cmd);
114 if (ret)
115 return ret;
116 if (!(cmd & CMD_UPDATE))
117 return 0;
118 udelay(1);
121 WARN(1, "%s: rcg didn't update its configuration.", name);
122 return -EBUSY;
125 static int clk_rcg2_set_parent(struct clk_hw *hw, u8 index)
127 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
128 int ret;
129 u32 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
131 ret = regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
132 CFG_SRC_SEL_MASK, cfg);
133 if (ret)
134 return ret;
136 return update_config(rcg);
140 * Calculate m/n:d rate
142 * parent_rate m
143 * rate = ----------- x ---
144 * hid_div n
146 static unsigned long
147 calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
149 if (hid_div) {
150 rate *= 2;
151 rate /= hid_div + 1;
154 if (mode) {
155 u64 tmp = rate;
156 tmp *= m;
157 do_div(tmp, n);
158 rate = tmp;
161 return rate;
164 static unsigned long
165 clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
167 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
168 u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
170 regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
172 if (rcg->mnd_width) {
173 mask = BIT(rcg->mnd_width) - 1;
174 regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
175 m &= mask;
176 regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &n);
177 n = ~n;
178 n &= mask;
179 n += m;
180 mode = cfg & CFG_MODE_MASK;
181 mode >>= CFG_MODE_SHIFT;
184 mask = BIT(rcg->hid_width) - 1;
185 hid_div = cfg >> CFG_SRC_DIV_SHIFT;
186 hid_div &= mask;
188 return calc_rate(parent_rate, m, n, mode, hid_div);
191 static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
192 struct clk_rate_request *req,
193 enum freq_policy policy)
195 unsigned long clk_flags, rate = req->rate;
196 struct clk_hw *p;
197 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
198 int index;
200 switch (policy) {
201 case FLOOR:
202 f = qcom_find_freq_floor(f, rate);
203 break;
204 case CEIL:
205 f = qcom_find_freq(f, rate);
206 break;
207 default:
208 return -EINVAL;
211 if (!f)
212 return -EINVAL;
214 index = qcom_find_src_index(hw, rcg->parent_map, f->src);
215 if (index < 0)
216 return index;
218 clk_flags = clk_hw_get_flags(hw);
219 p = clk_hw_get_parent_by_index(hw, index);
220 if (clk_flags & CLK_SET_RATE_PARENT) {
221 rate = f->freq;
222 if (f->pre_div) {
223 if (!rate)
224 rate = req->rate;
225 rate /= 2;
226 rate *= f->pre_div + 1;
229 if (f->n) {
230 u64 tmp = rate;
231 tmp = tmp * f->n;
232 do_div(tmp, f->m);
233 rate = tmp;
235 } else {
236 rate = clk_hw_get_rate(p);
238 req->best_parent_hw = p;
239 req->best_parent_rate = rate;
240 req->rate = f->freq;
242 return 0;
245 static int clk_rcg2_determine_rate(struct clk_hw *hw,
246 struct clk_rate_request *req)
248 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
250 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, CEIL);
253 static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
254 struct clk_rate_request *req)
256 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
258 return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
261 static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
263 u32 cfg, mask;
264 struct clk_hw *hw = &rcg->clkr.hw;
265 int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
267 if (index < 0)
268 return index;
270 if (rcg->mnd_width && f->n) {
271 mask = BIT(rcg->mnd_width) - 1;
272 ret = regmap_update_bits(rcg->clkr.regmap,
273 RCG_M_OFFSET(rcg), mask, f->m);
274 if (ret)
275 return ret;
277 ret = regmap_update_bits(rcg->clkr.regmap,
278 RCG_N_OFFSET(rcg), mask, ~(f->n - f->m));
279 if (ret)
280 return ret;
282 ret = regmap_update_bits(rcg->clkr.regmap,
283 RCG_D_OFFSET(rcg), mask, ~f->n);
284 if (ret)
285 return ret;
288 mask = BIT(rcg->hid_width) - 1;
289 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
290 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
291 cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
292 if (rcg->mnd_width && f->n && (f->m != f->n))
293 cfg |= CFG_MODE_DUAL_EDGE;
294 return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
295 mask, cfg);
298 static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
300 int ret;
302 ret = __clk_rcg2_configure(rcg, f);
303 if (ret)
304 return ret;
306 return update_config(rcg);
309 static int __clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
310 enum freq_policy policy)
312 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
313 const struct freq_tbl *f;
315 switch (policy) {
316 case FLOOR:
317 f = qcom_find_freq_floor(rcg->freq_tbl, rate);
318 break;
319 case CEIL:
320 f = qcom_find_freq(rcg->freq_tbl, rate);
321 break;
322 default:
323 return -EINVAL;
326 if (!f)
327 return -EINVAL;
329 return clk_rcg2_configure(rcg, f);
332 static int clk_rcg2_set_rate(struct clk_hw *hw, unsigned long rate,
333 unsigned long parent_rate)
335 return __clk_rcg2_set_rate(hw, rate, CEIL);
338 static int clk_rcg2_set_floor_rate(struct clk_hw *hw, unsigned long rate,
339 unsigned long parent_rate)
341 return __clk_rcg2_set_rate(hw, rate, FLOOR);
344 static int clk_rcg2_set_rate_and_parent(struct clk_hw *hw,
345 unsigned long rate, unsigned long parent_rate, u8 index)
347 return __clk_rcg2_set_rate(hw, rate, CEIL);
350 static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
351 unsigned long rate, unsigned long parent_rate, u8 index)
353 return __clk_rcg2_set_rate(hw, rate, FLOOR);
356 const struct clk_ops clk_rcg2_ops = {
357 .is_enabled = clk_rcg2_is_enabled,
358 .get_parent = clk_rcg2_get_parent,
359 .set_parent = clk_rcg2_set_parent,
360 .recalc_rate = clk_rcg2_recalc_rate,
361 .determine_rate = clk_rcg2_determine_rate,
362 .set_rate = clk_rcg2_set_rate,
363 .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
365 EXPORT_SYMBOL_GPL(clk_rcg2_ops);
367 const struct clk_ops clk_rcg2_floor_ops = {
368 .is_enabled = clk_rcg2_is_enabled,
369 .get_parent = clk_rcg2_get_parent,
370 .set_parent = clk_rcg2_set_parent,
371 .recalc_rate = clk_rcg2_recalc_rate,
372 .determine_rate = clk_rcg2_determine_floor_rate,
373 .set_rate = clk_rcg2_set_floor_rate,
374 .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
376 EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
378 struct frac_entry {
379 int num;
380 int den;
383 static const struct frac_entry frac_table_675m[] = { /* link rate of 270M */
384 { 52, 295 }, /* 119 M */
385 { 11, 57 }, /* 130.25 M */
386 { 63, 307 }, /* 138.50 M */
387 { 11, 50 }, /* 148.50 M */
388 { 47, 206 }, /* 154 M */
389 { 31, 100 }, /* 205.25 M */
390 { 107, 269 }, /* 268.50 M */
391 { },
394 static struct frac_entry frac_table_810m[] = { /* Link rate of 162M */
395 { 31, 211 }, /* 119 M */
396 { 32, 199 }, /* 130.25 M */
397 { 63, 307 }, /* 138.50 M */
398 { 11, 60 }, /* 148.50 M */
399 { 50, 263 }, /* 154 M */
400 { 31, 120 }, /* 205.25 M */
401 { 119, 359 }, /* 268.50 M */
402 { },
405 static int clk_edp_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
406 unsigned long parent_rate)
408 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
409 struct freq_tbl f = *rcg->freq_tbl;
410 const struct frac_entry *frac;
411 int delta = 100000;
412 s64 src_rate = parent_rate;
413 s64 request;
414 u32 mask = BIT(rcg->hid_width) - 1;
415 u32 hid_div;
417 if (src_rate == 810000000)
418 frac = frac_table_810m;
419 else
420 frac = frac_table_675m;
422 for (; frac->num; frac++) {
423 request = rate;
424 request *= frac->den;
425 request = div_s64(request, frac->num);
426 if ((src_rate < (request - delta)) ||
427 (src_rate > (request + delta)))
428 continue;
430 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
431 &hid_div);
432 f.pre_div = hid_div;
433 f.pre_div >>= CFG_SRC_DIV_SHIFT;
434 f.pre_div &= mask;
435 f.m = frac->num;
436 f.n = frac->den;
438 return clk_rcg2_configure(rcg, &f);
441 return -EINVAL;
444 static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
445 unsigned long rate, unsigned long parent_rate, u8 index)
447 /* Parent index is set statically in frequency table */
448 return clk_edp_pixel_set_rate(hw, rate, parent_rate);
451 static int clk_edp_pixel_determine_rate(struct clk_hw *hw,
452 struct clk_rate_request *req)
454 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
455 const struct freq_tbl *f = rcg->freq_tbl;
456 const struct frac_entry *frac;
457 int delta = 100000;
458 s64 request;
459 u32 mask = BIT(rcg->hid_width) - 1;
460 u32 hid_div;
461 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
463 /* Force the correct parent */
464 req->best_parent_hw = clk_hw_get_parent_by_index(hw, index);
465 req->best_parent_rate = clk_hw_get_rate(req->best_parent_hw);
467 if (req->best_parent_rate == 810000000)
468 frac = frac_table_810m;
469 else
470 frac = frac_table_675m;
472 for (; frac->num; frac++) {
473 request = req->rate;
474 request *= frac->den;
475 request = div_s64(request, frac->num);
476 if ((req->best_parent_rate < (request - delta)) ||
477 (req->best_parent_rate > (request + delta)))
478 continue;
480 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
481 &hid_div);
482 hid_div >>= CFG_SRC_DIV_SHIFT;
483 hid_div &= mask;
485 req->rate = calc_rate(req->best_parent_rate,
486 frac->num, frac->den,
487 !!frac->den, hid_div);
488 return 0;
491 return -EINVAL;
494 const struct clk_ops clk_edp_pixel_ops = {
495 .is_enabled = clk_rcg2_is_enabled,
496 .get_parent = clk_rcg2_get_parent,
497 .set_parent = clk_rcg2_set_parent,
498 .recalc_rate = clk_rcg2_recalc_rate,
499 .set_rate = clk_edp_pixel_set_rate,
500 .set_rate_and_parent = clk_edp_pixel_set_rate_and_parent,
501 .determine_rate = clk_edp_pixel_determine_rate,
503 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
505 static int clk_byte_determine_rate(struct clk_hw *hw,
506 struct clk_rate_request *req)
508 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
509 const struct freq_tbl *f = rcg->freq_tbl;
510 int index = qcom_find_src_index(hw, rcg->parent_map, f->src);
511 unsigned long parent_rate, div;
512 u32 mask = BIT(rcg->hid_width) - 1;
513 struct clk_hw *p;
515 if (req->rate == 0)
516 return -EINVAL;
518 req->best_parent_hw = p = clk_hw_get_parent_by_index(hw, index);
519 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, req->rate);
521 div = DIV_ROUND_UP((2 * parent_rate), req->rate) - 1;
522 div = min_t(u32, div, mask);
524 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
526 return 0;
529 static int clk_byte_set_rate(struct clk_hw *hw, unsigned long rate,
530 unsigned long parent_rate)
532 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
533 struct freq_tbl f = *rcg->freq_tbl;
534 unsigned long div;
535 u32 mask = BIT(rcg->hid_width) - 1;
537 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
538 div = min_t(u32, div, mask);
540 f.pre_div = div;
542 return clk_rcg2_configure(rcg, &f);
545 static int clk_byte_set_rate_and_parent(struct clk_hw *hw,
546 unsigned long rate, unsigned long parent_rate, u8 index)
548 /* Parent index is set statically in frequency table */
549 return clk_byte_set_rate(hw, rate, parent_rate);
552 const struct clk_ops clk_byte_ops = {
553 .is_enabled = clk_rcg2_is_enabled,
554 .get_parent = clk_rcg2_get_parent,
555 .set_parent = clk_rcg2_set_parent,
556 .recalc_rate = clk_rcg2_recalc_rate,
557 .set_rate = clk_byte_set_rate,
558 .set_rate_and_parent = clk_byte_set_rate_and_parent,
559 .determine_rate = clk_byte_determine_rate,
561 EXPORT_SYMBOL_GPL(clk_byte_ops);
563 static int clk_byte2_determine_rate(struct clk_hw *hw,
564 struct clk_rate_request *req)
566 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
567 unsigned long parent_rate, div;
568 u32 mask = BIT(rcg->hid_width) - 1;
569 struct clk_hw *p;
570 unsigned long rate = req->rate;
572 if (rate == 0)
573 return -EINVAL;
575 p = req->best_parent_hw;
576 req->best_parent_rate = parent_rate = clk_hw_round_rate(p, rate);
578 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
579 div = min_t(u32, div, mask);
581 req->rate = calc_rate(parent_rate, 0, 0, 0, div);
583 return 0;
586 static int clk_byte2_set_rate(struct clk_hw *hw, unsigned long rate,
587 unsigned long parent_rate)
589 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
590 struct freq_tbl f = { 0 };
591 unsigned long div;
592 int i, num_parents = clk_hw_get_num_parents(hw);
593 u32 mask = BIT(rcg->hid_width) - 1;
594 u32 cfg;
596 div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
597 div = min_t(u32, div, mask);
599 f.pre_div = div;
601 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
602 cfg &= CFG_SRC_SEL_MASK;
603 cfg >>= CFG_SRC_SEL_SHIFT;
605 for (i = 0; i < num_parents; i++) {
606 if (cfg == rcg->parent_map[i].cfg) {
607 f.src = rcg->parent_map[i].src;
608 return clk_rcg2_configure(rcg, &f);
612 return -EINVAL;
615 static int clk_byte2_set_rate_and_parent(struct clk_hw *hw,
616 unsigned long rate, unsigned long parent_rate, u8 index)
618 /* Read the hardware to determine parent during set_rate */
619 return clk_byte2_set_rate(hw, rate, parent_rate);
622 const struct clk_ops clk_byte2_ops = {
623 .is_enabled = clk_rcg2_is_enabled,
624 .get_parent = clk_rcg2_get_parent,
625 .set_parent = clk_rcg2_set_parent,
626 .recalc_rate = clk_rcg2_recalc_rate,
627 .set_rate = clk_byte2_set_rate,
628 .set_rate_and_parent = clk_byte2_set_rate_and_parent,
629 .determine_rate = clk_byte2_determine_rate,
631 EXPORT_SYMBOL_GPL(clk_byte2_ops);
633 static const struct frac_entry frac_table_pixel[] = {
634 { 3, 8 },
635 { 2, 9 },
636 { 4, 9 },
637 { 1, 1 },
641 static int clk_pixel_determine_rate(struct clk_hw *hw,
642 struct clk_rate_request *req)
644 unsigned long request, src_rate;
645 int delta = 100000;
646 const struct frac_entry *frac = frac_table_pixel;
648 for (; frac->num; frac++) {
649 request = (req->rate * frac->den) / frac->num;
651 src_rate = clk_hw_round_rate(req->best_parent_hw, request);
652 if ((src_rate < (request - delta)) ||
653 (src_rate > (request + delta)))
654 continue;
656 req->best_parent_rate = src_rate;
657 req->rate = (src_rate * frac->num) / frac->den;
658 return 0;
661 return -EINVAL;
664 static int clk_pixel_set_rate(struct clk_hw *hw, unsigned long rate,
665 unsigned long parent_rate)
667 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
668 struct freq_tbl f = { 0 };
669 const struct frac_entry *frac = frac_table_pixel;
670 unsigned long request;
671 int delta = 100000;
672 u32 mask = BIT(rcg->hid_width) - 1;
673 u32 hid_div, cfg;
674 int i, num_parents = clk_hw_get_num_parents(hw);
676 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
677 cfg &= CFG_SRC_SEL_MASK;
678 cfg >>= CFG_SRC_SEL_SHIFT;
680 for (i = 0; i < num_parents; i++)
681 if (cfg == rcg->parent_map[i].cfg) {
682 f.src = rcg->parent_map[i].src;
683 break;
686 for (; frac->num; frac++) {
687 request = (rate * frac->den) / frac->num;
689 if ((parent_rate < (request - delta)) ||
690 (parent_rate > (request + delta)))
691 continue;
693 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
694 &hid_div);
695 f.pre_div = hid_div;
696 f.pre_div >>= CFG_SRC_DIV_SHIFT;
697 f.pre_div &= mask;
698 f.m = frac->num;
699 f.n = frac->den;
701 return clk_rcg2_configure(rcg, &f);
703 return -EINVAL;
706 static int clk_pixel_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
707 unsigned long parent_rate, u8 index)
709 return clk_pixel_set_rate(hw, rate, parent_rate);
712 const struct clk_ops clk_pixel_ops = {
713 .is_enabled = clk_rcg2_is_enabled,
714 .get_parent = clk_rcg2_get_parent,
715 .set_parent = clk_rcg2_set_parent,
716 .recalc_rate = clk_rcg2_recalc_rate,
717 .set_rate = clk_pixel_set_rate,
718 .set_rate_and_parent = clk_pixel_set_rate_and_parent,
719 .determine_rate = clk_pixel_determine_rate,
721 EXPORT_SYMBOL_GPL(clk_pixel_ops);
723 static int clk_gfx3d_determine_rate(struct clk_hw *hw,
724 struct clk_rate_request *req)
726 struct clk_rate_request parent_req = { };
727 struct clk_hw *p2, *p8, *p9, *xo;
728 unsigned long p9_rate;
729 int ret;
731 xo = clk_hw_get_parent_by_index(hw, 0);
732 if (req->rate == clk_hw_get_rate(xo)) {
733 req->best_parent_hw = xo;
734 return 0;
737 p9 = clk_hw_get_parent_by_index(hw, 2);
738 p2 = clk_hw_get_parent_by_index(hw, 3);
739 p8 = clk_hw_get_parent_by_index(hw, 4);
741 /* PLL9 is a fixed rate PLL */
742 p9_rate = clk_hw_get_rate(p9);
744 parent_req.rate = req->rate = min(req->rate, p9_rate);
745 if (req->rate == p9_rate) {
746 req->rate = req->best_parent_rate = p9_rate;
747 req->best_parent_hw = p9;
748 return 0;
751 if (req->best_parent_hw == p9) {
752 /* Are we going back to a previously used rate? */
753 if (clk_hw_get_rate(p8) == req->rate)
754 req->best_parent_hw = p8;
755 else
756 req->best_parent_hw = p2;
757 } else if (req->best_parent_hw == p8) {
758 req->best_parent_hw = p2;
759 } else {
760 req->best_parent_hw = p8;
763 ret = __clk_determine_rate(req->best_parent_hw, &parent_req);
764 if (ret)
765 return ret;
767 req->rate = req->best_parent_rate = parent_req.rate;
769 return 0;
772 static int clk_gfx3d_set_rate_and_parent(struct clk_hw *hw, unsigned long rate,
773 unsigned long parent_rate, u8 index)
775 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
776 u32 cfg;
777 int ret;
779 /* Just mux it, we don't use the division or m/n hardware */
780 cfg = rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
781 ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
782 if (ret)
783 return ret;
785 return update_config(rcg);
788 static int clk_gfx3d_set_rate(struct clk_hw *hw, unsigned long rate,
789 unsigned long parent_rate)
792 * We should never get here; clk_gfx3d_determine_rate() should always
793 * make us use a different parent than what we're currently using, so
794 * clk_gfx3d_set_rate_and_parent() should always be called.
796 return 0;
799 const struct clk_ops clk_gfx3d_ops = {
800 .is_enabled = clk_rcg2_is_enabled,
801 .get_parent = clk_rcg2_get_parent,
802 .set_parent = clk_rcg2_set_parent,
803 .recalc_rate = clk_rcg2_recalc_rate,
804 .set_rate = clk_gfx3d_set_rate,
805 .set_rate_and_parent = clk_gfx3d_set_rate_and_parent,
806 .determine_rate = clk_gfx3d_determine_rate,
808 EXPORT_SYMBOL_GPL(clk_gfx3d_ops);
810 static int clk_rcg2_set_force_enable(struct clk_hw *hw)
812 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
813 const char *name = clk_hw_get_name(hw);
814 int ret, count;
816 ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
817 CMD_ROOT_EN, CMD_ROOT_EN);
818 if (ret)
819 return ret;
821 /* wait for RCG to turn ON */
822 for (count = 500; count > 0; count--) {
823 if (clk_rcg2_is_enabled(hw))
824 return 0;
826 udelay(1);
829 pr_err("%s: RCG did not turn on\n", name);
830 return -ETIMEDOUT;
833 static int clk_rcg2_clear_force_enable(struct clk_hw *hw)
835 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
837 return regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
838 CMD_ROOT_EN, 0);
841 static int
842 clk_rcg2_shared_force_enable_clear(struct clk_hw *hw, const struct freq_tbl *f)
844 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
845 int ret;
847 ret = clk_rcg2_set_force_enable(hw);
848 if (ret)
849 return ret;
851 ret = clk_rcg2_configure(rcg, f);
852 if (ret)
853 return ret;
855 return clk_rcg2_clear_force_enable(hw);
858 static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
859 unsigned long parent_rate)
861 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
862 const struct freq_tbl *f;
864 f = qcom_find_freq(rcg->freq_tbl, rate);
865 if (!f)
866 return -EINVAL;
869 * In case clock is disabled, update the CFG, M, N and D registers
870 * and don't hit the update bit of CMD register.
872 if (!__clk_is_enabled(hw->clk))
873 return __clk_rcg2_configure(rcg, f);
875 return clk_rcg2_shared_force_enable_clear(hw, f);
878 static int clk_rcg2_shared_set_rate_and_parent(struct clk_hw *hw,
879 unsigned long rate, unsigned long parent_rate, u8 index)
881 return clk_rcg2_shared_set_rate(hw, rate, parent_rate);
884 static int clk_rcg2_shared_enable(struct clk_hw *hw)
886 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
887 int ret;
890 * Set the update bit because required configuration has already
891 * been written in clk_rcg2_shared_set_rate()
893 ret = clk_rcg2_set_force_enable(hw);
894 if (ret)
895 return ret;
897 ret = update_config(rcg);
898 if (ret)
899 return ret;
901 return clk_rcg2_clear_force_enable(hw);
904 static void clk_rcg2_shared_disable(struct clk_hw *hw)
906 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
907 u32 cfg;
910 * Store current configuration as switching to safe source would clear
911 * the SRC and DIV of CFG register
913 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
916 * Park the RCG at a safe configuration - sourced off of safe source.
917 * Force enable and disable the RCG while configuring it to safeguard
918 * against any update signal coming from the downstream clock.
919 * The current parent is still prepared and enabled at this point, and
920 * the safe source is always on while application processor subsystem
921 * is online. Therefore, the RCG can safely switch its parent.
923 clk_rcg2_set_force_enable(hw);
925 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG,
926 rcg->safe_src_index << CFG_SRC_SEL_SHIFT);
928 update_config(rcg);
930 clk_rcg2_clear_force_enable(hw);
932 /* Write back the stored configuration corresponding to current rate */
933 regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg);
936 const struct clk_ops clk_rcg2_shared_ops = {
937 .enable = clk_rcg2_shared_enable,
938 .disable = clk_rcg2_shared_disable,
939 .get_parent = clk_rcg2_get_parent,
940 .set_parent = clk_rcg2_set_parent,
941 .recalc_rate = clk_rcg2_recalc_rate,
942 .determine_rate = clk_rcg2_determine_rate,
943 .set_rate = clk_rcg2_shared_set_rate,
944 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
946 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops);
948 /* Common APIs to be used for DFS based RCGR */
949 static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l,
950 struct freq_tbl *f)
952 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
953 struct clk_hw *p;
954 unsigned long prate = 0;
955 u32 val, mask, cfg, mode;
956 int i, num_parents;
958 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg);
960 mask = BIT(rcg->hid_width) - 1;
961 f->pre_div = 1;
962 if (cfg & mask)
963 f->pre_div = cfg & mask;
965 cfg &= CFG_SRC_SEL_MASK;
966 cfg >>= CFG_SRC_SEL_SHIFT;
968 num_parents = clk_hw_get_num_parents(hw);
969 for (i = 0; i < num_parents; i++) {
970 if (cfg == rcg->parent_map[i].cfg) {
971 f->src = rcg->parent_map[i].src;
972 p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i);
973 prate = clk_hw_get_rate(p);
977 mode = cfg & CFG_MODE_MASK;
978 mode >>= CFG_MODE_SHIFT;
979 if (mode) {
980 mask = BIT(rcg->mnd_width) - 1;
981 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l),
982 &val);
983 val &= mask;
984 f->m = val;
986 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l),
987 &val);
988 val = ~val;
989 val &= mask;
990 val += f->m;
991 f->n = val;
994 f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div);
997 static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg)
999 struct freq_tbl *freq_tbl;
1000 int i;
1002 /* Allocate space for 1 extra since table is NULL terminated */
1003 freq_tbl = kcalloc(MAX_PERF_LEVEL + 1, sizeof(*freq_tbl), GFP_KERNEL);
1004 if (!freq_tbl)
1005 return -ENOMEM;
1006 rcg->freq_tbl = freq_tbl;
1008 for (i = 0; i < MAX_PERF_LEVEL; i++)
1009 clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i);
1011 return 0;
1014 static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw,
1015 struct clk_rate_request *req)
1017 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1018 int ret;
1020 if (!rcg->freq_tbl) {
1021 ret = clk_rcg2_dfs_populate_freq_table(rcg);
1022 if (ret) {
1023 pr_err("Failed to update DFS tables for %s\n",
1024 clk_hw_get_name(hw));
1025 return ret;
1029 return clk_rcg2_determine_rate(hw, req);
1032 static unsigned long
1033 clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
1035 struct clk_rcg2 *rcg = to_clk_rcg2(hw);
1036 u32 level, mask, cfg, m = 0, n = 0, mode, pre_div;
1038 regmap_read(rcg->clkr.regmap,
1039 rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level);
1040 level &= GENMASK(4, 1);
1041 level >>= 1;
1043 if (rcg->freq_tbl)
1044 return rcg->freq_tbl[level].freq;
1047 * Assume that parent_rate is actually the parent because
1048 * we can't do any better at figuring it out when the table
1049 * hasn't been populated yet. We only populate the table
1050 * in determine_rate because we can't guarantee the parents
1051 * will be registered with the framework until then.
1053 regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level),
1054 &cfg);
1056 mask = BIT(rcg->hid_width) - 1;
1057 pre_div = 1;
1058 if (cfg & mask)
1059 pre_div = cfg & mask;
1061 mode = cfg & CFG_MODE_MASK;
1062 mode >>= CFG_MODE_SHIFT;
1063 if (mode) {
1064 mask = BIT(rcg->mnd_width) - 1;
1065 regmap_read(rcg->clkr.regmap,
1066 rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m);
1067 m &= mask;
1069 regmap_read(rcg->clkr.regmap,
1070 rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n);
1071 n = ~n;
1072 n &= mask;
1073 n += m;
1076 return calc_rate(parent_rate, m, n, mode, pre_div);
1079 static const struct clk_ops clk_rcg2_dfs_ops = {
1080 .is_enabled = clk_rcg2_is_enabled,
1081 .get_parent = clk_rcg2_get_parent,
1082 .determine_rate = clk_rcg2_dfs_determine_rate,
1083 .recalc_rate = clk_rcg2_dfs_recalc_rate,
1086 static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data,
1087 struct regmap *regmap)
1089 struct clk_rcg2 *rcg = data->rcg;
1090 struct clk_init_data *init = data->init;
1091 u32 val;
1092 int ret;
1094 ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val);
1095 if (ret)
1096 return -EINVAL;
1098 if (!(val & SE_CMD_DFS_EN))
1099 return 0;
1102 * Rate changes with consumer writing a register in
1103 * their own I/O region
1105 init->flags |= CLK_GET_RATE_NOCACHE;
1106 init->ops = &clk_rcg2_dfs_ops;
1108 rcg->freq_tbl = NULL;
1110 return 0;
1113 int qcom_cc_register_rcg_dfs(struct regmap *regmap,
1114 const struct clk_rcg_dfs_data *rcgs, size_t len)
1116 int i, ret;
1118 for (i = 0; i < len; i++) {
1119 ret = clk_rcg2_enable_dfs(&rcgs[i], regmap);
1120 if (ret)
1121 return ret;
1124 return 0;
1126 EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);