2 * Copyright (c) 2016, Linaro Limited
3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/clk-provider.h>
16 #include <linux/err.h>
17 #include <linux/export.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mfd/qcom_rpm.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
27 #include <dt-bindings/mfd/qcom-rpm.h>
28 #include <dt-bindings/clock/qcom,rpmcc.h>
30 #define QCOM_RPM_MISC_CLK_TYPE 0x306b6c63
31 #define QCOM_RPM_SCALING_ENABLE_ID 0x2
33 #define DEFINE_CLK_RPM(_platform, _name, _active, r_id) \
34 static struct clk_rpm _platform##_##_active; \
35 static struct clk_rpm _platform##_##_name = { \
36 .rpm_clk_id = (r_id), \
37 .peer = &_platform##_##_active, \
39 .hw.init = &(struct clk_init_data){ \
40 .ops = &clk_rpm_ops, \
42 .parent_names = (const char *[]){ "pxo_board" }, \
46 static struct clk_rpm _platform##_##_active = { \
47 .rpm_clk_id = (r_id), \
48 .peer = &_platform##_##_name, \
49 .active_only = true, \
51 .hw.init = &(struct clk_init_data){ \
52 .ops = &clk_rpm_ops, \
54 .parent_names = (const char *[]){ "pxo_board" }, \
59 #define DEFINE_CLK_RPM_PXO_BRANCH(_platform, _name, _active, r_id, r) \
60 static struct clk_rpm _platform##_##_active; \
61 static struct clk_rpm _platform##_##_name = { \
62 .rpm_clk_id = (r_id), \
63 .active_only = true, \
64 .peer = &_platform##_##_active, \
67 .hw.init = &(struct clk_init_data){ \
68 .ops = &clk_rpm_branch_ops, \
70 .parent_names = (const char *[]){ "pxo_board" }, \
74 static struct clk_rpm _platform##_##_active = { \
75 .rpm_clk_id = (r_id), \
76 .peer = &_platform##_##_name, \
79 .hw.init = &(struct clk_init_data){ \
80 .ops = &clk_rpm_branch_ops, \
82 .parent_names = (const char *[]){ "pxo_board" }, \
87 #define DEFINE_CLK_RPM_CXO_BRANCH(_platform, _name, _active, r_id, r) \
88 static struct clk_rpm _platform##_##_active; \
89 static struct clk_rpm _platform##_##_name = { \
90 .rpm_clk_id = (r_id), \
91 .peer = &_platform##_##_active, \
94 .hw.init = &(struct clk_init_data){ \
95 .ops = &clk_rpm_branch_ops, \
97 .parent_names = (const char *[]){ "cxo_board" }, \
101 static struct clk_rpm _platform##_##_active = { \
102 .rpm_clk_id = (r_id), \
103 .active_only = true, \
104 .peer = &_platform##_##_name, \
107 .hw.init = &(struct clk_init_data){ \
108 .ops = &clk_rpm_branch_ops, \
110 .parent_names = (const char *[]){ "cxo_board" }, \
115 #define to_clk_rpm(_hw) container_of(_hw, struct clk_rpm, hw)
118 const int rpm_clk_id
;
119 const bool active_only
;
123 struct clk_rpm
*peer
;
125 struct qcom_rpm
*rpm
;
129 struct qcom_rpm
*rpm
;
130 struct clk_rpm
**clks
;
134 struct rpm_clk_desc
{
135 struct clk_rpm
**clks
;
139 static DEFINE_MUTEX(rpm_clk_lock
);
141 static int clk_rpm_handoff(struct clk_rpm
*r
)
146 ret
= qcom_rpm_write(r
->rpm
, QCOM_RPM_ACTIVE_STATE
,
147 r
->rpm_clk_id
, &value
, 1);
150 ret
= qcom_rpm_write(r
->rpm
, QCOM_RPM_SLEEP_STATE
,
151 r
->rpm_clk_id
, &value
, 1);
158 static int clk_rpm_set_rate_active(struct clk_rpm
*r
, unsigned long rate
)
160 u32 value
= DIV_ROUND_UP(rate
, 1000); /* to kHz */
162 return qcom_rpm_write(r
->rpm
, QCOM_RPM_ACTIVE_STATE
,
163 r
->rpm_clk_id
, &value
, 1);
166 static int clk_rpm_set_rate_sleep(struct clk_rpm
*r
, unsigned long rate
)
168 u32 value
= DIV_ROUND_UP(rate
, 1000); /* to kHz */
170 return qcom_rpm_write(r
->rpm
, QCOM_RPM_SLEEP_STATE
,
171 r
->rpm_clk_id
, &value
, 1);
174 static void to_active_sleep(struct clk_rpm
*r
, unsigned long rate
,
175 unsigned long *active
, unsigned long *sleep
)
180 * Active-only clocks don't care what the rate is during sleep. So,
181 * they vote for zero.
189 static int clk_rpm_prepare(struct clk_hw
*hw
)
191 struct clk_rpm
*r
= to_clk_rpm(hw
);
192 struct clk_rpm
*peer
= r
->peer
;
193 unsigned long this_rate
= 0, this_sleep_rate
= 0;
194 unsigned long peer_rate
= 0, peer_sleep_rate
= 0;
195 unsigned long active_rate
, sleep_rate
;
198 mutex_lock(&rpm_clk_lock
);
200 /* Don't send requests to the RPM if the rate has not been set. */
204 to_active_sleep(r
, r
->rate
, &this_rate
, &this_sleep_rate
);
206 /* Take peer clock's rate into account only if it's enabled. */
208 to_active_sleep(peer
, peer
->rate
,
209 &peer_rate
, &peer_sleep_rate
);
211 active_rate
= max(this_rate
, peer_rate
);
214 active_rate
= !!active_rate
;
216 ret
= clk_rpm_set_rate_active(r
, active_rate
);
220 sleep_rate
= max(this_sleep_rate
, peer_sleep_rate
);
222 sleep_rate
= !!sleep_rate
;
224 ret
= clk_rpm_set_rate_sleep(r
, sleep_rate
);
226 /* Undo the active set vote and restore it */
227 ret
= clk_rpm_set_rate_active(r
, peer_rate
);
233 mutex_unlock(&rpm_clk_lock
);
238 static void clk_rpm_unprepare(struct clk_hw
*hw
)
240 struct clk_rpm
*r
= to_clk_rpm(hw
);
241 struct clk_rpm
*peer
= r
->peer
;
242 unsigned long peer_rate
= 0, peer_sleep_rate
= 0;
243 unsigned long active_rate
, sleep_rate
;
246 mutex_lock(&rpm_clk_lock
);
251 /* Take peer clock's rate into account only if it's enabled. */
253 to_active_sleep(peer
, peer
->rate
, &peer_rate
,
256 active_rate
= r
->branch
? !!peer_rate
: peer_rate
;
257 ret
= clk_rpm_set_rate_active(r
, active_rate
);
261 sleep_rate
= r
->branch
? !!peer_sleep_rate
: peer_sleep_rate
;
262 ret
= clk_rpm_set_rate_sleep(r
, sleep_rate
);
269 mutex_unlock(&rpm_clk_lock
);
272 static int clk_rpm_set_rate(struct clk_hw
*hw
,
273 unsigned long rate
, unsigned long parent_rate
)
275 struct clk_rpm
*r
= to_clk_rpm(hw
);
276 struct clk_rpm
*peer
= r
->peer
;
277 unsigned long active_rate
, sleep_rate
;
278 unsigned long this_rate
= 0, this_sleep_rate
= 0;
279 unsigned long peer_rate
= 0, peer_sleep_rate
= 0;
282 mutex_lock(&rpm_clk_lock
);
287 to_active_sleep(r
, rate
, &this_rate
, &this_sleep_rate
);
289 /* Take peer clock's rate into account only if it's enabled. */
291 to_active_sleep(peer
, peer
->rate
,
292 &peer_rate
, &peer_sleep_rate
);
294 active_rate
= max(this_rate
, peer_rate
);
295 ret
= clk_rpm_set_rate_active(r
, active_rate
);
299 sleep_rate
= max(this_sleep_rate
, peer_sleep_rate
);
300 ret
= clk_rpm_set_rate_sleep(r
, sleep_rate
);
307 mutex_unlock(&rpm_clk_lock
);
312 static long clk_rpm_round_rate(struct clk_hw
*hw
, unsigned long rate
,
313 unsigned long *parent_rate
)
316 * RPM handles rate rounding and we don't have a way to
317 * know what the rate will be, so just return whatever
323 static unsigned long clk_rpm_recalc_rate(struct clk_hw
*hw
,
324 unsigned long parent_rate
)
326 struct clk_rpm
*r
= to_clk_rpm(hw
);
329 * RPM handles rate rounding and we don't have a way to
330 * know what the rate will be, so just return whatever
336 static const struct clk_ops clk_rpm_ops
= {
337 .prepare
= clk_rpm_prepare
,
338 .unprepare
= clk_rpm_unprepare
,
339 .set_rate
= clk_rpm_set_rate
,
340 .round_rate
= clk_rpm_round_rate
,
341 .recalc_rate
= clk_rpm_recalc_rate
,
344 static const struct clk_ops clk_rpm_branch_ops
= {
345 .prepare
= clk_rpm_prepare
,
346 .unprepare
= clk_rpm_unprepare
,
347 .round_rate
= clk_rpm_round_rate
,
348 .recalc_rate
= clk_rpm_recalc_rate
,
352 DEFINE_CLK_RPM(apq8064
, afab_clk
, afab_a_clk
, QCOM_RPM_APPS_FABRIC_CLK
);
353 DEFINE_CLK_RPM(apq8064
, cfpb_clk
, cfpb_a_clk
, QCOM_RPM_CFPB_CLK
);
354 DEFINE_CLK_RPM(apq8064
, daytona_clk
, daytona_a_clk
, QCOM_RPM_DAYTONA_FABRIC_CLK
);
355 DEFINE_CLK_RPM(apq8064
, ebi1_clk
, ebi1_a_clk
, QCOM_RPM_EBI1_CLK
);
356 DEFINE_CLK_RPM(apq8064
, mmfab_clk
, mmfab_a_clk
, QCOM_RPM_MM_FABRIC_CLK
);
357 DEFINE_CLK_RPM(apq8064
, mmfpb_clk
, mmfpb_a_clk
, QCOM_RPM_MMFPB_CLK
);
358 DEFINE_CLK_RPM(apq8064
, sfab_clk
, sfab_a_clk
, QCOM_RPM_SYS_FABRIC_CLK
);
359 DEFINE_CLK_RPM(apq8064
, sfpb_clk
, sfpb_a_clk
, QCOM_RPM_SFPB_CLK
);
360 DEFINE_CLK_RPM(apq8064
, qdss_clk
, qdss_a_clk
, QCOM_RPM_QDSS_CLK
);
362 static struct clk_rpm
*apq8064_clks
[] = {
363 [RPM_APPS_FABRIC_CLK
] = &apq8064_afab_clk
,
364 [RPM_APPS_FABRIC_A_CLK
] = &apq8064_afab_a_clk
,
365 [RPM_CFPB_CLK
] = &apq8064_cfpb_clk
,
366 [RPM_CFPB_A_CLK
] = &apq8064_cfpb_a_clk
,
367 [RPM_DAYTONA_FABRIC_CLK
] = &apq8064_daytona_clk
,
368 [RPM_DAYTONA_FABRIC_A_CLK
] = &apq8064_daytona_a_clk
,
369 [RPM_EBI1_CLK
] = &apq8064_ebi1_clk
,
370 [RPM_EBI1_A_CLK
] = &apq8064_ebi1_a_clk
,
371 [RPM_MM_FABRIC_CLK
] = &apq8064_mmfab_clk
,
372 [RPM_MM_FABRIC_A_CLK
] = &apq8064_mmfab_a_clk
,
373 [RPM_MMFPB_CLK
] = &apq8064_mmfpb_clk
,
374 [RPM_MMFPB_A_CLK
] = &apq8064_mmfpb_a_clk
,
375 [RPM_SYS_FABRIC_CLK
] = &apq8064_sfab_clk
,
376 [RPM_SYS_FABRIC_A_CLK
] = &apq8064_sfab_a_clk
,
377 [RPM_SFPB_CLK
] = &apq8064_sfpb_clk
,
378 [RPM_SFPB_A_CLK
] = &apq8064_sfpb_a_clk
,
379 [RPM_QDSS_CLK
] = &apq8064_qdss_clk
,
380 [RPM_QDSS_A_CLK
] = &apq8064_qdss_a_clk
,
383 static const struct rpm_clk_desc rpm_clk_apq8064
= {
384 .clks
= apq8064_clks
,
385 .num_clks
= ARRAY_SIZE(apq8064_clks
),
388 static const struct of_device_id rpm_clk_match_table
[] = {
389 { .compatible
= "qcom,rpmcc-apq8064", .data
= &rpm_clk_apq8064
},
392 MODULE_DEVICE_TABLE(of
, rpm_clk_match_table
);
394 static struct clk_hw
*qcom_rpm_clk_hw_get(struct of_phandle_args
*clkspec
,
397 struct rpm_cc
*rcc
= data
;
398 unsigned int idx
= clkspec
->args
[0];
400 if (idx
>= rcc
->num_clks
) {
401 pr_err("%s: invalid index %u\n", __func__
, idx
);
402 return ERR_PTR(-EINVAL
);
405 return rcc
->clks
[idx
] ? &rcc
->clks
[idx
]->hw
: ERR_PTR(-ENOENT
);
408 static int rpm_clk_probe(struct platform_device
*pdev
)
413 struct qcom_rpm
*rpm
;
414 struct clk_rpm
**rpm_clks
;
415 const struct rpm_clk_desc
*desc
;
417 rpm
= dev_get_drvdata(pdev
->dev
.parent
);
419 dev_err(&pdev
->dev
, "Unable to retrieve handle to RPM\n");
423 desc
= of_device_get_match_data(&pdev
->dev
);
427 rpm_clks
= desc
->clks
;
428 num_clks
= desc
->num_clks
;
430 rcc
= devm_kzalloc(&pdev
->dev
, sizeof(*rcc
), GFP_KERNEL
);
434 rcc
->clks
= rpm_clks
;
435 rcc
->num_clks
= num_clks
;
437 for (i
= 0; i
< num_clks
; i
++) {
441 rpm_clks
[i
]->rpm
= rpm
;
443 ret
= clk_rpm_handoff(rpm_clks
[i
]);
448 for (i
= 0; i
< num_clks
; i
++) {
452 ret
= devm_clk_hw_register(&pdev
->dev
, &rpm_clks
[i
]->hw
);
457 ret
= of_clk_add_hw_provider(pdev
->dev
.of_node
, qcom_rpm_clk_hw_get
,
464 dev_err(&pdev
->dev
, "Error registering RPM Clock driver (%d)\n", ret
);
468 static int rpm_clk_remove(struct platform_device
*pdev
)
470 of_clk_del_provider(pdev
->dev
.of_node
);
474 static struct platform_driver rpm_clk_driver
= {
476 .name
= "qcom-clk-rpm",
477 .of_match_table
= rpm_clk_match_table
,
479 .probe
= rpm_clk_probe
,
480 .remove
= rpm_clk_remove
,
483 static int __init
rpm_clk_init(void)
485 return platform_driver_register(&rpm_clk_driver
);
487 core_initcall(rpm_clk_init
);
489 static void __exit
rpm_clk_exit(void)
491 platform_driver_unregister(&rpm_clk_driver
);
493 module_exit(rpm_clk_exit
);
495 MODULE_DESCRIPTION("Qualcomm RPM Clock Controller Driver");
496 MODULE_LICENSE("GPL v2");
497 MODULE_ALIAS("platform:qcom-clk-rpm");