Linux 4.18.10
[linux/fpc-iii.git] / drivers / clk / sunxi-ng / ccu_nm.c
blob4e2073307f34013e215fd827cc7048f6d4608bc6
1 /*
2 * Copyright (C) 2016 Maxime Ripard
3 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 */
11 #include <linux/clk-provider.h>
13 #include "ccu_frac.h"
14 #include "ccu_gate.h"
15 #include "ccu_nm.h"
17 struct _ccu_nm {
18 unsigned long n, min_n, max_n;
19 unsigned long m, min_m, max_m;
22 static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
23 struct _ccu_nm *nm)
25 unsigned long best_rate = 0;
26 unsigned long best_n = 0, best_m = 0;
27 unsigned long _n, _m;
29 for (_n = nm->min_n; _n <= nm->max_n; _n++) {
30 for (_m = nm->min_m; _m <= nm->max_m; _m++) {
31 unsigned long tmp_rate = parent * _n / _m;
33 if (tmp_rate > rate)
34 continue;
36 if ((rate - tmp_rate) < (rate - best_rate)) {
37 best_rate = tmp_rate;
38 best_n = _n;
39 best_m = _m;
44 nm->n = best_n;
45 nm->m = best_m;
48 static void ccu_nm_disable(struct clk_hw *hw)
50 struct ccu_nm *nm = hw_to_ccu_nm(hw);
52 return ccu_gate_helper_disable(&nm->common, nm->enable);
55 static int ccu_nm_enable(struct clk_hw *hw)
57 struct ccu_nm *nm = hw_to_ccu_nm(hw);
59 return ccu_gate_helper_enable(&nm->common, nm->enable);
62 static int ccu_nm_is_enabled(struct clk_hw *hw)
64 struct ccu_nm *nm = hw_to_ccu_nm(hw);
66 return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
69 static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
70 unsigned long parent_rate)
72 struct ccu_nm *nm = hw_to_ccu_nm(hw);
73 unsigned long rate;
74 unsigned long n, m;
75 u32 reg;
77 if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac)) {
78 rate = ccu_frac_helper_read_rate(&nm->common, &nm->frac);
80 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
81 rate /= nm->fixed_post_div;
83 return rate;
86 reg = readl(nm->common.base + nm->common.reg);
88 n = reg >> nm->n.shift;
89 n &= (1 << nm->n.width) - 1;
90 n += nm->n.offset;
91 if (!n)
92 n++;
94 m = reg >> nm->m.shift;
95 m &= (1 << nm->m.width) - 1;
96 m += nm->m.offset;
97 if (!m)
98 m++;
100 if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
101 rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
102 else
103 rate = parent_rate * n / m;
105 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
106 rate /= nm->fixed_post_div;
108 return rate;
111 static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
112 unsigned long *parent_rate)
114 struct ccu_nm *nm = hw_to_ccu_nm(hw);
115 struct _ccu_nm _nm;
117 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
118 rate *= nm->fixed_post_div;
120 if (rate < nm->min_rate) {
121 rate = nm->min_rate;
122 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
123 rate /= nm->fixed_post_div;
124 return rate;
127 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
128 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
129 rate /= nm->fixed_post_div;
130 return rate;
133 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
134 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
135 rate /= nm->fixed_post_div;
136 return rate;
139 _nm.min_n = nm->n.min ?: 1;
140 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
141 _nm.min_m = 1;
142 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
144 ccu_nm_find_best(*parent_rate, rate, &_nm);
145 rate = *parent_rate * _nm.n / _nm.m;
147 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
148 rate /= nm->fixed_post_div;
150 return rate;
153 static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
154 unsigned long parent_rate)
156 struct ccu_nm *nm = hw_to_ccu_nm(hw);
157 struct _ccu_nm _nm;
158 unsigned long flags;
159 u32 reg;
161 /* Adjust target rate according to post-dividers */
162 if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
163 rate = rate * nm->fixed_post_div;
165 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
166 spin_lock_irqsave(nm->common.lock, flags);
168 /* most SoCs require M to be 0 if fractional mode is used */
169 reg = readl(nm->common.base + nm->common.reg);
170 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
171 writel(reg, nm->common.base + nm->common.reg);
173 spin_unlock_irqrestore(nm->common.lock, flags);
175 ccu_frac_helper_enable(&nm->common, &nm->frac);
177 return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
178 rate, nm->lock);
179 } else {
180 ccu_frac_helper_disable(&nm->common, &nm->frac);
183 _nm.min_n = nm->n.min ?: 1;
184 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
185 _nm.min_m = 1;
186 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
188 if (ccu_sdm_helper_has_rate(&nm->common, &nm->sdm, rate)) {
189 ccu_sdm_helper_enable(&nm->common, &nm->sdm, rate);
191 /* Sigma delta modulation requires specific N and M factors */
192 ccu_sdm_helper_get_factors(&nm->common, &nm->sdm, rate,
193 &_nm.m, &_nm.n);
194 } else {
195 ccu_sdm_helper_disable(&nm->common, &nm->sdm);
196 ccu_nm_find_best(parent_rate, rate, &_nm);
199 spin_lock_irqsave(nm->common.lock, flags);
201 reg = readl(nm->common.base + nm->common.reg);
202 reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
203 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
205 reg |= (_nm.n - nm->n.offset) << nm->n.shift;
206 reg |= (_nm.m - nm->m.offset) << nm->m.shift;
207 writel(reg, nm->common.base + nm->common.reg);
209 spin_unlock_irqrestore(nm->common.lock, flags);
211 ccu_helper_wait_for_lock(&nm->common, nm->lock);
213 return 0;
216 const struct clk_ops ccu_nm_ops = {
217 .disable = ccu_nm_disable,
218 .enable = ccu_nm_enable,
219 .is_enabled = ccu_nm_is_enabled,
221 .recalc_rate = ccu_nm_recalc_rate,
222 .round_rate = ccu_nm_round_rate,
223 .set_rate = ccu_nm_set_rate,