crypto: simd - correctly take reqsize of wrapped skcipher into account
[linux/fpc-iii.git] / drivers / clk / sunxi-ng / ccu_mp.c
blob5d0af40517374c89fc97e069372ab3bf96627bd7
1 /*
2 * Copyright (C) 2016 Maxime Ripard
3 * Maxime Ripard <maxime.ripard@free-electrons.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation; either version 2 of
8 * the License, or (at your option) any later version.
9 */
11 #include <linux/clk-provider.h>
13 #include "ccu_gate.h"
14 #include "ccu_mp.h"
16 static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
17 unsigned int max_m, unsigned int max_p,
18 unsigned int *m, unsigned int *p)
20 unsigned long best_rate = 0;
21 unsigned int best_m = 0, best_p = 0;
22 unsigned int _m, _p;
24 for (_p = 1; _p <= max_p; _p <<= 1) {
25 for (_m = 1; _m <= max_m; _m++) {
26 unsigned long tmp_rate = parent / _p / _m;
28 if (tmp_rate > rate)
29 continue;
31 if ((rate - tmp_rate) < (rate - best_rate)) {
32 best_rate = tmp_rate;
33 best_m = _m;
34 best_p = _p;
39 *m = best_m;
40 *p = best_p;
43 static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
44 struct clk_hw *hw,
45 unsigned long *parent_rate,
46 unsigned long rate,
47 void *data)
49 struct ccu_mp *cmp = data;
50 unsigned int max_m, max_p;
51 unsigned int m, p;
53 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
54 rate *= cmp->fixed_post_div;
56 max_m = cmp->m.max ?: 1 << cmp->m.width;
57 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
59 ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
60 rate = *parent_rate / p / m;
62 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
63 rate /= cmp->fixed_post_div;
65 return rate;
68 static void ccu_mp_disable(struct clk_hw *hw)
70 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
72 return ccu_gate_helper_disable(&cmp->common, cmp->enable);
75 static int ccu_mp_enable(struct clk_hw *hw)
77 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
79 return ccu_gate_helper_enable(&cmp->common, cmp->enable);
82 static int ccu_mp_is_enabled(struct clk_hw *hw)
84 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
86 return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable);
89 static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
90 unsigned long parent_rate)
92 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
93 unsigned long rate;
94 unsigned int m, p;
95 u32 reg;
97 /* Adjust parent_rate according to pre-dividers */
98 parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
99 parent_rate);
101 reg = readl(cmp->common.base + cmp->common.reg);
103 m = reg >> cmp->m.shift;
104 m &= (1 << cmp->m.width) - 1;
105 m += cmp->m.offset;
106 if (!m)
107 m++;
109 p = reg >> cmp->p.shift;
110 p &= (1 << cmp->p.width) - 1;
112 rate = (parent_rate >> p) / m;
113 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
114 rate /= cmp->fixed_post_div;
116 return rate;
119 static int ccu_mp_determine_rate(struct clk_hw *hw,
120 struct clk_rate_request *req)
122 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
124 return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux,
125 req, ccu_mp_round_rate, cmp);
128 static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
129 unsigned long parent_rate)
131 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
132 unsigned long flags;
133 unsigned int max_m, max_p;
134 unsigned int m, p;
135 u32 reg;
137 /* Adjust parent_rate according to pre-dividers */
138 parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
139 parent_rate);
141 max_m = cmp->m.max ?: 1 << cmp->m.width;
142 max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
144 /* Adjust target rate according to post-dividers */
145 if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
146 rate = rate * cmp->fixed_post_div;
148 ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
150 spin_lock_irqsave(cmp->common.lock, flags);
152 reg = readl(cmp->common.base + cmp->common.reg);
153 reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
154 reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
155 reg |= (m - cmp->m.offset) << cmp->m.shift;
156 reg |= ilog2(p) << cmp->p.shift;
158 writel(reg, cmp->common.base + cmp->common.reg);
160 spin_unlock_irqrestore(cmp->common.lock, flags);
162 return 0;
165 static u8 ccu_mp_get_parent(struct clk_hw *hw)
167 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
169 return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux);
172 static int ccu_mp_set_parent(struct clk_hw *hw, u8 index)
174 struct ccu_mp *cmp = hw_to_ccu_mp(hw);
176 return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index);
179 const struct clk_ops ccu_mp_ops = {
180 .disable = ccu_mp_disable,
181 .enable = ccu_mp_enable,
182 .is_enabled = ccu_mp_is_enabled,
184 .get_parent = ccu_mp_get_parent,
185 .set_parent = ccu_mp_set_parent,
187 .determine_rate = ccu_mp_determine_rate,
188 .recalc_rate = ccu_mp_recalc_rate,
189 .set_rate = ccu_mp_set_rate,
193 * Support for MMC timing mode switching
195 * The MMC clocks on some SoCs support switching between old and
196 * new timing modes. A platform specific API is provided to query
197 * and set the timing mode on supported SoCs.
199 * In addition, a special class of ccu_mp_ops is provided, which
200 * takes in to account the timing mode switch. When the new timing
201 * mode is active, the clock output rate is halved. This new class
202 * is a wrapper around the generic ccu_mp_ops. When clock rates
203 * are passed through to ccu_mp_ops callbacks, they are doubled
204 * if the new timing mode bit is set, to account for the post
205 * divider. Conversely, when clock rates are passed back, they
206 * are halved if the mode bit is set.
209 static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
210 unsigned long parent_rate)
212 unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
213 struct ccu_common *cm = hw_to_ccu_common(hw);
214 u32 val = readl(cm->base + cm->reg);
216 if (val & CCU_MMC_NEW_TIMING_MODE)
217 return rate / 2;
218 return rate;
221 static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
222 struct clk_rate_request *req)
224 struct ccu_common *cm = hw_to_ccu_common(hw);
225 u32 val = readl(cm->base + cm->reg);
226 int ret;
228 /* adjust the requested clock rate */
229 if (val & CCU_MMC_NEW_TIMING_MODE) {
230 req->rate *= 2;
231 req->min_rate *= 2;
232 req->max_rate *= 2;
235 ret = ccu_mp_determine_rate(hw, req);
237 /* re-adjust the requested clock rate back */
238 if (val & CCU_MMC_NEW_TIMING_MODE) {
239 req->rate /= 2;
240 req->min_rate /= 2;
241 req->max_rate /= 2;
244 return ret;
247 static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
248 unsigned long parent_rate)
250 struct ccu_common *cm = hw_to_ccu_common(hw);
251 u32 val = readl(cm->base + cm->reg);
253 if (val & CCU_MMC_NEW_TIMING_MODE)
254 rate *= 2;
256 return ccu_mp_set_rate(hw, rate, parent_rate);
259 const struct clk_ops ccu_mp_mmc_ops = {
260 .disable = ccu_mp_disable,
261 .enable = ccu_mp_enable,
262 .is_enabled = ccu_mp_is_enabled,
264 .get_parent = ccu_mp_get_parent,
265 .set_parent = ccu_mp_set_parent,
267 .determine_rate = ccu_mp_mmc_determine_rate,
268 .recalc_rate = ccu_mp_mmc_recalc_rate,
269 .set_rate = ccu_mp_mmc_set_rate,