1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved.
7 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8 * Copyright (C) 2011 Richard Zhao, Linaro <richard.zhao@linaro.org>
9 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
13 #include <linux/clk-provider.h>
14 #include <linux/err.h>
16 #include <linux/types.h>
20 #define DIV_MASK GENMASK(7, 0)
22 #define MUX_MASK GENMASK(MUX_SHIFT + 2, MUX_SHIFT)
25 #define get_max_div(d) DIV_MASK
26 #define get_div_field(val) ((val) & DIV_MASK)
27 #define get_mux_field(val) (((val) & MUX_MASK) >> MUX_SHIFT)
29 static const char * const mux_sdmmc_parents
[] = {
30 "pll_p", "pll_c4_out2", "pll_c4_out0", "pll_c4_out1", "clk_m"
33 static const u8 mux_lj_idx
[] = {
34 [0] = 0, [1] = 1, [2] = 2, [3] = 5, [4] = 6
37 static const u8 mux_non_lj_idx
[] = {
38 [0] = 0, [1] = 3, [2] = 7, [3] = 4, [4] = 6
41 static u8
clk_sdmmc_mux_get_parent(struct clk_hw
*hw
)
43 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
48 num_parents
= clk_hw_get_num_parents(hw
);
50 val
= readl_relaxed(sdmmc_mux
->reg
);
51 src
= get_mux_field(val
);
52 if (get_div_field(val
))
53 mux_idx
= mux_non_lj_idx
;
57 for (i
= 0; i
< num_parents
; i
++) {
58 if (mux_idx
[i
] == src
)
62 WARN(1, "Unknown parent selector %d\n", src
);
67 static int clk_sdmmc_mux_set_parent(struct clk_hw
*hw
, u8 index
)
69 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
73 val
= readl_relaxed(sdmmc_mux
->reg
);
74 if (get_div_field(val
))
75 index
= mux_non_lj_idx
[index
];
77 index
= mux_lj_idx
[index
];
80 val
|= index
<< MUX_SHIFT
;
82 writel(val
, sdmmc_mux
->reg
);
87 static unsigned long clk_sdmmc_mux_recalc_rate(struct clk_hw
*hw
,
88 unsigned long parent_rate
)
90 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
93 u64 rate
= parent_rate
;
95 val
= readl_relaxed(sdmmc_mux
->reg
);
96 div
= get_div_field(val
);
107 static int clk_sdmmc_mux_determine_rate(struct clk_hw
*hw
,
108 struct clk_rate_request
*req
)
110 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
112 unsigned long output_rate
= req
->best_parent_rate
;
114 req
->rate
= max(req
->rate
, req
->min_rate
);
115 req
->rate
= min(req
->rate
, req
->max_rate
);
120 div
= div_frac_get(req
->rate
, output_rate
, 8, 1, sdmmc_mux
->div_flags
);
124 if (sdmmc_mux
->div_flags
& TEGRA_DIVIDER_ROUND_UP
)
125 req
->rate
= DIV_ROUND_UP(output_rate
* SDMMC_MUL
,
128 req
->rate
= output_rate
* SDMMC_MUL
/ (div
+ SDMMC_MUL
);
133 static int clk_sdmmc_mux_set_rate(struct clk_hw
*hw
, unsigned long rate
,
134 unsigned long parent_rate
)
136 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
138 unsigned long flags
= 0;
142 div
= div_frac_get(rate
, parent_rate
, 8, 1, sdmmc_mux
->div_flags
);
147 spin_lock_irqsave(sdmmc_mux
->lock
, flags
);
149 src
= clk_sdmmc_mux_get_parent(hw
);
151 src
= mux_non_lj_idx
[src
];
153 src
= mux_lj_idx
[src
];
155 val
= src
<< MUX_SHIFT
;
157 writel(val
, sdmmc_mux
->reg
);
158 fence_udelay(2, sdmmc_mux
->reg
);
161 spin_unlock_irqrestore(sdmmc_mux
->lock
, flags
);
166 static int clk_sdmmc_mux_is_enabled(struct clk_hw
*hw
)
168 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
169 const struct clk_ops
*gate_ops
= sdmmc_mux
->gate_ops
;
170 struct clk_hw
*gate_hw
= &sdmmc_mux
->gate
.hw
;
172 __clk_hw_set_clk(gate_hw
, hw
);
174 return gate_ops
->is_enabled(gate_hw
);
177 static int clk_sdmmc_mux_enable(struct clk_hw
*hw
)
179 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
180 const struct clk_ops
*gate_ops
= sdmmc_mux
->gate_ops
;
181 struct clk_hw
*gate_hw
= &sdmmc_mux
->gate
.hw
;
183 __clk_hw_set_clk(gate_hw
, hw
);
185 return gate_ops
->enable(gate_hw
);
188 static void clk_sdmmc_mux_disable(struct clk_hw
*hw
)
190 struct tegra_sdmmc_mux
*sdmmc_mux
= to_clk_sdmmc_mux(hw
);
191 const struct clk_ops
*gate_ops
= sdmmc_mux
->gate_ops
;
192 struct clk_hw
*gate_hw
= &sdmmc_mux
->gate
.hw
;
194 gate_ops
->disable(gate_hw
);
197 static void clk_sdmmc_mux_restore_context(struct clk_hw
*hw
)
199 struct clk_hw
*parent
= clk_hw_get_parent(hw
);
200 unsigned long parent_rate
= clk_hw_get_rate(parent
);
201 unsigned long rate
= clk_hw_get_rate(hw
);
204 parent_id
= clk_hw_get_parent_index(hw
);
205 if (WARN_ON(parent_id
< 0))
208 clk_sdmmc_mux_set_parent(hw
, parent_id
);
209 clk_sdmmc_mux_set_rate(hw
, rate
, parent_rate
);
212 static const struct clk_ops tegra_clk_sdmmc_mux_ops
= {
213 .get_parent
= clk_sdmmc_mux_get_parent
,
214 .set_parent
= clk_sdmmc_mux_set_parent
,
215 .determine_rate
= clk_sdmmc_mux_determine_rate
,
216 .recalc_rate
= clk_sdmmc_mux_recalc_rate
,
217 .set_rate
= clk_sdmmc_mux_set_rate
,
218 .is_enabled
= clk_sdmmc_mux_is_enabled
,
219 .enable
= clk_sdmmc_mux_enable
,
220 .disable
= clk_sdmmc_mux_disable
,
221 .restore_context
= clk_sdmmc_mux_restore_context
,
224 struct clk
*tegra_clk_register_sdmmc_mux_div(const char *name
,
225 void __iomem
*clk_base
, u32 offset
, u32 clk_num
, u8 div_flags
,
226 unsigned long flags
, void *lock
)
229 struct clk_init_data init
;
230 const struct tegra_clk_periph_regs
*bank
;
231 struct tegra_sdmmc_mux
*sdmmc_mux
;
233 init
.ops
= &tegra_clk_sdmmc_mux_ops
;
236 init
.parent_names
= mux_sdmmc_parents
;
237 init
.num_parents
= ARRAY_SIZE(mux_sdmmc_parents
);
239 bank
= get_reg_bank(clk_num
);
241 return ERR_PTR(-EINVAL
);
243 sdmmc_mux
= kzalloc(sizeof(*sdmmc_mux
), GFP_KERNEL
);
245 return ERR_PTR(-ENOMEM
);
247 /* Data in .init is copied by clk_register(), so stack variable OK */
248 sdmmc_mux
->hw
.init
= &init
;
249 sdmmc_mux
->reg
= clk_base
+ offset
;
250 sdmmc_mux
->lock
= lock
;
251 sdmmc_mux
->gate
.clk_base
= clk_base
;
252 sdmmc_mux
->gate
.regs
= bank
;
253 sdmmc_mux
->gate
.enable_refcnt
= periph_clk_enb_refcnt
;
254 sdmmc_mux
->gate
.clk_num
= clk_num
;
255 sdmmc_mux
->gate
.flags
= TEGRA_PERIPH_ON_APB
;
256 sdmmc_mux
->div_flags
= div_flags
;
257 sdmmc_mux
->gate_ops
= &tegra_clk_periph_gate_ops
;
259 clk
= clk_register(NULL
, &sdmmc_mux
->hw
);
265 sdmmc_mux
->gate
.hw
.clk
= clk
;