1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
6 #include <linux/bitfield.h>
7 #include <linux/clk-provider.h>
8 #include <linux/cpufreq.h>
9 #include <linux/init.h>
10 #include <linux/interconnect.h>
11 #include <linux/interrupt.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_opp.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/units.h>
22 #define LUT_MAX_ENTRIES 40U
23 #define LUT_SRC GENMASK(31, 30)
24 #define LUT_L_VAL GENMASK(7, 0)
25 #define LUT_CORE_COUNT GENMASK(18, 16)
26 #define LUT_VOLT GENMASK(11, 0)
28 #define LUT_TURBO_IND 1
30 #define GT_IRQ_STATUS BIT(2)
32 #define MAX_FREQ_DOMAINS 4
34 struct qcom_cpufreq_soc_data
{
46 struct qcom_cpufreq_data
{
50 * Mutex to synchronize between de-init sequence and re-starting LMh
53 struct mutex throttle_lock
;
57 struct delayed_work throttle_work
;
58 struct cpufreq_policy
*policy
;
59 struct clk_hw cpu_clk
;
65 struct qcom_cpufreq_data
*data
;
66 const struct qcom_cpufreq_soc_data
*soc_data
;
69 static unsigned long cpu_hw_rate
, xo_rate
;
70 static bool icc_scaling_enabled
;
72 static int qcom_cpufreq_set_bw(struct cpufreq_policy
*policy
,
73 unsigned long freq_khz
)
75 unsigned long freq_hz
= freq_khz
* 1000;
76 struct dev_pm_opp
*opp
;
80 dev
= get_cpu_device(policy
->cpu
);
84 opp
= dev_pm_opp_find_freq_exact(dev
, freq_hz
, true);
88 ret
= dev_pm_opp_set_opp(dev
, opp
);
93 static int qcom_cpufreq_update_opp(struct device
*cpu_dev
,
94 unsigned long freq_khz
,
97 unsigned long freq_hz
= freq_khz
* 1000;
100 /* Skip voltage update if the opp table is not available */
101 if (!icc_scaling_enabled
)
102 return dev_pm_opp_add(cpu_dev
, freq_hz
, volt
);
104 ret
= dev_pm_opp_adjust_voltage(cpu_dev
, freq_hz
, volt
, volt
, volt
);
106 dev_err(cpu_dev
, "Voltage update failed freq=%ld\n", freq_khz
);
110 return dev_pm_opp_enable(cpu_dev
, freq_hz
);
113 static int qcom_cpufreq_hw_target_index(struct cpufreq_policy
*policy
,
116 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
117 const struct qcom_cpufreq_soc_data
*soc_data
= qcom_cpufreq
.soc_data
;
118 unsigned long freq
= policy
->freq_table
[index
].frequency
;
121 writel_relaxed(index
, data
->base
+ soc_data
->reg_perf_state
);
123 if (data
->per_core_dcvs
)
124 for (i
= 1; i
< cpumask_weight(policy
->related_cpus
); i
++)
125 writel_relaxed(index
, data
->base
+ soc_data
->reg_perf_state
+ i
* 4);
127 if (icc_scaling_enabled
)
128 qcom_cpufreq_set_bw(policy
, freq
);
133 static unsigned long qcom_lmh_get_throttle_freq(struct qcom_cpufreq_data
*data
)
137 if (qcom_cpufreq
.soc_data
->reg_current_vote
)
138 lval
= readl_relaxed(data
->base
+ qcom_cpufreq
.soc_data
->reg_current_vote
) & 0x3ff;
140 lval
= readl_relaxed(data
->base
+ qcom_cpufreq
.soc_data
->reg_domain_state
) & 0xff;
142 return lval
* xo_rate
;
145 /* Get the frequency requested by the cpufreq core for the CPU */
146 static unsigned int qcom_cpufreq_get_freq(unsigned int cpu
)
148 struct qcom_cpufreq_data
*data
;
149 const struct qcom_cpufreq_soc_data
*soc_data
;
150 struct cpufreq_policy
*policy
;
153 policy
= cpufreq_cpu_get_raw(cpu
);
157 data
= policy
->driver_data
;
158 soc_data
= qcom_cpufreq
.soc_data
;
160 index
= readl_relaxed(data
->base
+ soc_data
->reg_perf_state
);
161 index
= min(index
, LUT_MAX_ENTRIES
- 1);
163 return policy
->freq_table
[index
].frequency
;
166 static unsigned int qcom_cpufreq_hw_get(unsigned int cpu
)
168 struct qcom_cpufreq_data
*data
;
169 struct cpufreq_policy
*policy
;
171 policy
= cpufreq_cpu_get_raw(cpu
);
175 data
= policy
->driver_data
;
177 if (data
->throttle_irq
>= 0)
178 return qcom_lmh_get_throttle_freq(data
) / HZ_PER_KHZ
;
180 return qcom_cpufreq_get_freq(cpu
);
183 static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy
*policy
,
184 unsigned int target_freq
)
186 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
187 const struct qcom_cpufreq_soc_data
*soc_data
= qcom_cpufreq
.soc_data
;
191 index
= policy
->cached_resolved_idx
;
192 writel_relaxed(index
, data
->base
+ soc_data
->reg_perf_state
);
194 if (data
->per_core_dcvs
)
195 for (i
= 1; i
< cpumask_weight(policy
->related_cpus
); i
++)
196 writel_relaxed(index
, data
->base
+ soc_data
->reg_perf_state
+ i
* 4);
198 return policy
->freq_table
[index
].frequency
;
201 static int qcom_cpufreq_hw_read_lut(struct device
*cpu_dev
,
202 struct cpufreq_policy
*policy
)
204 u32 data
, src
, lval
, i
, core_count
, prev_freq
= 0, freq
;
206 struct cpufreq_frequency_table
*table
;
207 struct dev_pm_opp
*opp
;
210 struct qcom_cpufreq_data
*drv_data
= policy
->driver_data
;
211 const struct qcom_cpufreq_soc_data
*soc_data
= qcom_cpufreq
.soc_data
;
213 table
= kcalloc(LUT_MAX_ENTRIES
+ 1, sizeof(*table
), GFP_KERNEL
);
217 ret
= dev_pm_opp_of_add_table(cpu_dev
);
219 /* Disable all opps and cross-validate against LUT later */
220 icc_scaling_enabled
= true;
221 for (rate
= 0; ; rate
++) {
222 opp
= dev_pm_opp_find_freq_ceil(cpu_dev
, &rate
);
227 dev_pm_opp_disable(cpu_dev
, rate
);
229 } else if (ret
!= -ENODEV
) {
230 dev_err(cpu_dev
, "Invalid opp table in device tree\n");
234 policy
->fast_switch_possible
= true;
235 icc_scaling_enabled
= false;
238 for (i
= 0; i
< LUT_MAX_ENTRIES
; i
++) {
239 data
= readl_relaxed(drv_data
->base
+ soc_data
->reg_freq_lut
+
240 i
* soc_data
->lut_row_size
);
241 src
= FIELD_GET(LUT_SRC
, data
);
242 lval
= FIELD_GET(LUT_L_VAL
, data
);
243 core_count
= FIELD_GET(LUT_CORE_COUNT
, data
);
245 data
= readl_relaxed(drv_data
->base
+ soc_data
->reg_volt_lut
+
246 i
* soc_data
->lut_row_size
);
247 volt
= FIELD_GET(LUT_VOLT
, data
) * 1000;
250 freq
= xo_rate
* lval
/ 1000;
252 freq
= cpu_hw_rate
/ 1000;
254 if (freq
!= prev_freq
&& core_count
!= LUT_TURBO_IND
) {
255 if (!qcom_cpufreq_update_opp(cpu_dev
, freq
, volt
)) {
256 table
[i
].frequency
= freq
;
257 dev_dbg(cpu_dev
, "index=%d freq=%d, core_count %d\n", i
,
260 dev_warn(cpu_dev
, "failed to update OPP for freq=%d\n", freq
);
261 table
[i
].frequency
= CPUFREQ_ENTRY_INVALID
;
264 } else if (core_count
== LUT_TURBO_IND
) {
265 table
[i
].frequency
= CPUFREQ_ENTRY_INVALID
;
269 * Two of the same frequencies with the same core counts means
272 if (i
> 0 && prev_freq
== freq
) {
273 struct cpufreq_frequency_table
*prev
= &table
[i
- 1];
276 * Only treat the last frequency that might be a boost
277 * as the boost frequency
279 if (prev
->frequency
== CPUFREQ_ENTRY_INVALID
) {
280 if (!qcom_cpufreq_update_opp(cpu_dev
, prev_freq
, volt
)) {
281 prev
->frequency
= prev_freq
;
282 prev
->flags
= CPUFREQ_BOOST_FREQ
;
284 dev_warn(cpu_dev
, "failed to update OPP for freq=%d\n",
295 table
[i
].frequency
= CPUFREQ_TABLE_END
;
296 policy
->freq_table
= table
;
297 dev_pm_opp_set_sharing_cpus(cpu_dev
, policy
->cpus
);
302 static void qcom_get_related_cpus(int index
, struct cpumask
*m
)
304 struct device_node
*cpu_np
;
305 struct of_phandle_args args
;
308 for_each_possible_cpu(cpu
) {
309 cpu_np
= of_cpu_device_node_get(cpu
);
313 ret
= of_parse_phandle_with_args(cpu_np
, "qcom,freq-domain",
314 "#freq-domain-cells", 0,
320 if (index
== args
.args
[0])
321 cpumask_set_cpu(cpu
, m
);
325 static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data
*data
)
327 struct cpufreq_policy
*policy
= data
->policy
;
328 int cpu
= cpumask_first(policy
->related_cpus
);
329 struct device
*dev
= get_cpu_device(cpu
);
330 unsigned long freq_hz
, throttled_freq
;
331 struct dev_pm_opp
*opp
;
334 * Get the h/w throttled frequency, normalize it using the
335 * registered opp table and use it to calculate thermal pressure.
337 freq_hz
= qcom_lmh_get_throttle_freq(data
);
339 opp
= dev_pm_opp_find_freq_floor(dev
, &freq_hz
);
340 if (IS_ERR(opp
) && PTR_ERR(opp
) == -ERANGE
)
341 opp
= dev_pm_opp_find_freq_ceil(dev
, &freq_hz
);
344 dev_warn(dev
, "Can't find the OPP for throttling: %pe!\n", opp
);
349 throttled_freq
= freq_hz
/ HZ_PER_KHZ
;
351 /* Update HW pressure (the boost frequencies are accepted) */
352 arch_update_hw_pressure(policy
->related_cpus
, throttled_freq
);
355 * In the unlikely case policy is unregistered do not enable
356 * polling or h/w interrupt
358 mutex_lock(&data
->throttle_lock
);
359 if (data
->cancel_throttle
)
363 * If h/w throttled frequency is higher than what cpufreq has requested
364 * for, then stop polling and switch back to interrupt mechanism.
366 if (throttled_freq
>= qcom_cpufreq_get_freq(cpu
))
367 enable_irq(data
->throttle_irq
);
369 mod_delayed_work(system_highpri_wq
, &data
->throttle_work
,
370 msecs_to_jiffies(10));
373 mutex_unlock(&data
->throttle_lock
);
376 static void qcom_lmh_dcvs_poll(struct work_struct
*work
)
378 struct qcom_cpufreq_data
*data
;
380 data
= container_of(work
, struct qcom_cpufreq_data
, throttle_work
.work
);
381 qcom_lmh_dcvs_notify(data
);
384 static irqreturn_t
qcom_lmh_dcvs_handle_irq(int irq
, void *data
)
386 struct qcom_cpufreq_data
*c_data
= data
;
388 /* Disable interrupt and enable polling */
389 disable_irq_nosync(c_data
->throttle_irq
);
390 schedule_delayed_work(&c_data
->throttle_work
, 0);
392 if (qcom_cpufreq
.soc_data
->reg_intr_clr
)
393 writel_relaxed(GT_IRQ_STATUS
,
394 c_data
->base
+ qcom_cpufreq
.soc_data
->reg_intr_clr
);
399 static const struct qcom_cpufreq_soc_data qcom_soc_data
= {
401 .reg_dcvs_ctrl
= 0xbc,
402 .reg_freq_lut
= 0x110,
403 .reg_volt_lut
= 0x114,
404 .reg_current_vote
= 0x704,
405 .reg_perf_state
= 0x920,
409 static const struct qcom_cpufreq_soc_data epss_soc_data
= {
411 .reg_domain_state
= 0x20,
412 .reg_dcvs_ctrl
= 0xb0,
413 .reg_freq_lut
= 0x100,
414 .reg_volt_lut
= 0x200,
415 .reg_intr_clr
= 0x308,
416 .reg_perf_state
= 0x320,
420 static const struct of_device_id qcom_cpufreq_hw_match
[] = {
421 { .compatible
= "qcom,cpufreq-hw", .data
= &qcom_soc_data
},
422 { .compatible
= "qcom,cpufreq-epss", .data
= &epss_soc_data
},
425 MODULE_DEVICE_TABLE(of
, qcom_cpufreq_hw_match
);
427 static int qcom_cpufreq_hw_lmh_init(struct cpufreq_policy
*policy
, int index
)
429 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
430 struct platform_device
*pdev
= cpufreq_get_driver_data();
434 * Look for LMh interrupt. If no interrupt line is specified /
435 * if there is an error, allow cpufreq to be enabled as usual.
437 data
->throttle_irq
= platform_get_irq_optional(pdev
, index
);
438 if (data
->throttle_irq
== -ENXIO
)
440 if (data
->throttle_irq
< 0)
441 return data
->throttle_irq
;
443 data
->cancel_throttle
= false;
444 data
->policy
= policy
;
446 mutex_init(&data
->throttle_lock
);
447 INIT_DEFERRABLE_WORK(&data
->throttle_work
, qcom_lmh_dcvs_poll
);
449 snprintf(data
->irq_name
, sizeof(data
->irq_name
), "dcvsh-irq-%u", policy
->cpu
);
450 ret
= request_threaded_irq(data
->throttle_irq
, NULL
, qcom_lmh_dcvs_handle_irq
,
451 IRQF_ONESHOT
| IRQF_NO_AUTOEN
, data
->irq_name
, data
);
453 dev_err(&pdev
->dev
, "Error registering %s: %d\n", data
->irq_name
, ret
);
457 ret
= irq_set_affinity_and_hint(data
->throttle_irq
, policy
->cpus
);
459 dev_err(&pdev
->dev
, "Failed to set CPU affinity of %s[%d]\n",
460 data
->irq_name
, data
->throttle_irq
);
465 static int qcom_cpufreq_hw_cpu_online(struct cpufreq_policy
*policy
)
467 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
468 struct platform_device
*pdev
= cpufreq_get_driver_data();
471 if (data
->throttle_irq
<= 0)
474 mutex_lock(&data
->throttle_lock
);
475 data
->cancel_throttle
= false;
476 mutex_unlock(&data
->throttle_lock
);
478 ret
= irq_set_affinity_and_hint(data
->throttle_irq
, policy
->cpus
);
480 dev_err(&pdev
->dev
, "Failed to set CPU affinity of %s[%d]\n",
481 data
->irq_name
, data
->throttle_irq
);
486 static int qcom_cpufreq_hw_cpu_offline(struct cpufreq_policy
*policy
)
488 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
490 if (data
->throttle_irq
<= 0)
493 mutex_lock(&data
->throttle_lock
);
494 data
->cancel_throttle
= true;
495 mutex_unlock(&data
->throttle_lock
);
497 cancel_delayed_work_sync(&data
->throttle_work
);
498 irq_set_affinity_and_hint(data
->throttle_irq
, NULL
);
499 disable_irq_nosync(data
->throttle_irq
);
504 static void qcom_cpufreq_hw_lmh_exit(struct qcom_cpufreq_data
*data
)
506 if (data
->throttle_irq
<= 0)
509 free_irq(data
->throttle_irq
, data
);
512 static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy
*policy
)
514 struct platform_device
*pdev
= cpufreq_get_driver_data();
515 struct device
*dev
= &pdev
->dev
;
516 struct of_phandle_args args
;
517 struct device_node
*cpu_np
;
518 struct device
*cpu_dev
;
519 struct qcom_cpufreq_data
*data
;
522 cpu_dev
= get_cpu_device(policy
->cpu
);
524 pr_err("%s: failed to get cpu%d device\n", __func__
,
529 cpu_np
= of_cpu_device_node_get(policy
->cpu
);
533 ret
= of_parse_phandle_with_args(cpu_np
, "qcom,freq-domain",
534 "#freq-domain-cells", 0, &args
);
539 index
= args
.args
[0];
540 data
= &qcom_cpufreq
.data
[index
];
542 /* HW should be in enabled state to proceed */
543 if (!(readl_relaxed(data
->base
+ qcom_cpufreq
.soc_data
->reg_enable
) & 0x1)) {
544 dev_err(dev
, "Domain-%d cpufreq hardware not enabled\n", index
);
548 if (readl_relaxed(data
->base
+ qcom_cpufreq
.soc_data
->reg_dcvs_ctrl
) & 0x1)
549 data
->per_core_dcvs
= true;
551 qcom_get_related_cpus(index
, policy
->cpus
);
553 policy
->driver_data
= data
;
554 policy
->dvfs_possible_from_any_cpu
= true;
556 ret
= qcom_cpufreq_hw_read_lut(cpu_dev
, policy
);
558 dev_err(dev
, "Domain-%d failed to read LUT\n", index
);
562 ret
= dev_pm_opp_get_opp_count(cpu_dev
);
564 dev_err(cpu_dev
, "Failed to add OPPs\n");
568 if (policy_has_boost_freq(policy
)) {
569 ret
= cpufreq_enable_boost_support();
571 dev_warn(cpu_dev
, "failed to enable boost: %d\n", ret
);
574 return qcom_cpufreq_hw_lmh_init(policy
, index
);
577 static void qcom_cpufreq_hw_cpu_exit(struct cpufreq_policy
*policy
)
579 struct device
*cpu_dev
= get_cpu_device(policy
->cpu
);
580 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
582 dev_pm_opp_remove_all_dynamic(cpu_dev
);
583 dev_pm_opp_of_cpumask_remove_table(policy
->related_cpus
);
584 qcom_cpufreq_hw_lmh_exit(data
);
585 kfree(policy
->freq_table
);
589 static void qcom_cpufreq_ready(struct cpufreq_policy
*policy
)
591 struct qcom_cpufreq_data
*data
= policy
->driver_data
;
593 if (data
->throttle_irq
>= 0)
594 enable_irq(data
->throttle_irq
);
597 static struct freq_attr
*qcom_cpufreq_hw_attr
[] = {
598 &cpufreq_freq_attr_scaling_available_freqs
,
599 &cpufreq_freq_attr_scaling_boost_freqs
,
603 static struct cpufreq_driver cpufreq_qcom_hw_driver
= {
604 .flags
= CPUFREQ_NEED_INITIAL_FREQ_CHECK
|
605 CPUFREQ_HAVE_GOVERNOR_PER_POLICY
|
606 CPUFREQ_IS_COOLING_DEV
,
607 .verify
= cpufreq_generic_frequency_table_verify
,
608 .target_index
= qcom_cpufreq_hw_target_index
,
609 .get
= qcom_cpufreq_hw_get
,
610 .init
= qcom_cpufreq_hw_cpu_init
,
611 .exit
= qcom_cpufreq_hw_cpu_exit
,
612 .online
= qcom_cpufreq_hw_cpu_online
,
613 .offline
= qcom_cpufreq_hw_cpu_offline
,
614 .register_em
= cpufreq_register_em_with_opp
,
615 .fast_switch
= qcom_cpufreq_hw_fast_switch
,
616 .name
= "qcom-cpufreq-hw",
617 .attr
= qcom_cpufreq_hw_attr
,
618 .ready
= qcom_cpufreq_ready
,
621 static unsigned long qcom_cpufreq_hw_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
623 struct qcom_cpufreq_data
*data
= container_of(hw
, struct qcom_cpufreq_data
, cpu_clk
);
625 return qcom_lmh_get_throttle_freq(data
);
628 static const struct clk_ops qcom_cpufreq_hw_clk_ops
= {
629 .recalc_rate
= qcom_cpufreq_hw_recalc_rate
,
632 static int qcom_cpufreq_hw_driver_probe(struct platform_device
*pdev
)
634 struct clk_hw_onecell_data
*clk_data
;
635 struct device
*dev
= &pdev
->dev
;
636 struct device
*cpu_dev
;
638 int ret
, i
, num_domains
;
640 clk
= clk_get(dev
, "xo");
644 xo_rate
= clk_get_rate(clk
);
647 clk
= clk_get(dev
, "alternate");
651 cpu_hw_rate
= clk_get_rate(clk
) / CLK_HW_DIV
;
654 cpufreq_qcom_hw_driver
.driver_data
= pdev
;
656 /* Check for optional interconnect paths on CPU0 */
657 cpu_dev
= get_cpu_device(0);
659 return -EPROBE_DEFER
;
661 ret
= dev_pm_opp_of_find_icc_paths(cpu_dev
, NULL
);
663 return dev_err_probe(dev
, ret
, "Failed to find icc paths\n");
665 for (num_domains
= 0; num_domains
< MAX_FREQ_DOMAINS
; num_domains
++)
666 if (!platform_get_resource(pdev
, IORESOURCE_MEM
, num_domains
))
669 qcom_cpufreq
.data
= devm_kzalloc(dev
, sizeof(struct qcom_cpufreq_data
) * num_domains
,
671 if (!qcom_cpufreq
.data
)
674 qcom_cpufreq
.soc_data
= of_device_get_match_data(dev
);
675 if (!qcom_cpufreq
.soc_data
)
678 clk_data
= devm_kzalloc(dev
, struct_size(clk_data
, hws
, num_domains
), GFP_KERNEL
);
682 clk_data
->num
= num_domains
;
684 for (i
= 0; i
< num_domains
; i
++) {
685 struct qcom_cpufreq_data
*data
= &qcom_cpufreq
.data
[i
];
686 struct clk_init_data clk_init
= {};
689 base
= devm_platform_ioremap_resource(pdev
, i
);
691 dev_err(dev
, "Failed to map resource index %d\n", i
);
692 return PTR_ERR(base
);
697 /* Register CPU clock for each frequency domain */
698 clk_init
.name
= kasprintf(GFP_KERNEL
, "qcom_cpufreq%d", i
);
702 clk_init
.flags
= CLK_GET_RATE_NOCACHE
;
703 clk_init
.ops
= &qcom_cpufreq_hw_clk_ops
;
704 data
->cpu_clk
.init
= &clk_init
;
706 ret
= devm_clk_hw_register(dev
, &data
->cpu_clk
);
708 dev_err(dev
, "Failed to register clock %d: %d\n", i
, ret
);
709 kfree(clk_init
.name
);
713 clk_data
->hws
[i
] = &data
->cpu_clk
;
714 kfree(clk_init
.name
);
717 ret
= devm_of_clk_add_hw_provider(dev
, of_clk_hw_onecell_get
, clk_data
);
719 dev_err(dev
, "Failed to add clock provider\n");
723 ret
= cpufreq_register_driver(&cpufreq_qcom_hw_driver
);
725 dev_err(dev
, "CPUFreq HW driver failed to register\n");
727 dev_dbg(dev
, "QCOM CPUFreq HW driver initialized\n");
732 static void qcom_cpufreq_hw_driver_remove(struct platform_device
*pdev
)
734 cpufreq_unregister_driver(&cpufreq_qcom_hw_driver
);
737 static struct platform_driver qcom_cpufreq_hw_driver
= {
738 .probe
= qcom_cpufreq_hw_driver_probe
,
739 .remove_new
= qcom_cpufreq_hw_driver_remove
,
741 .name
= "qcom-cpufreq-hw",
742 .of_match_table
= qcom_cpufreq_hw_match
,
746 static int __init
qcom_cpufreq_hw_init(void)
748 return platform_driver_register(&qcom_cpufreq_hw_driver
);
750 postcore_initcall(qcom_cpufreq_hw_init
);
752 static void __exit
qcom_cpufreq_hw_exit(void)
754 platform_driver_unregister(&qcom_cpufreq_hw_driver
);
756 module_exit(qcom_cpufreq_hw_exit
);
758 MODULE_DESCRIPTION("QCOM CPUFREQ HW Driver");
759 MODULE_LICENSE("GPL v2");