1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2019, Linaro Limited
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/string.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_opp.h>
22 #include <linux/interrupt.h>
23 #include <linux/regmap.h>
24 #include <linux/mfd/syscon.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/clk.h>
27 #include <linux/nvmem-consumer.h>
29 /* Register Offsets for RB-CPR and Bit Definitions */
31 /* RBCPR Version Register */
32 #define REG_RBCPR_VERSION 0
33 #define RBCPR_VER_2 0x02
34 #define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
36 /* RBCPR Gate Count and Target Registers */
37 #define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
39 #define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
40 #define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
41 #define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
42 #define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
44 /* RBCPR Timer Control */
45 #define REG_RBCPR_TIMER_INTERVAL 0x44
46 #define REG_RBIF_TIMER_ADJUST 0x4c
48 #define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
49 #define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
50 #define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
51 #define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
52 #define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
53 #define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
55 /* RBCPR Config Register */
56 #define REG_RBIF_LIMIT 0x48
57 #define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
58 #define RBIF_LIMIT_CEILING_SHIFT 6
59 #define RBIF_LIMIT_FLOOR_BITS 6
60 #define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
62 #define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
63 #define RBIF_LIMIT_FLOOR_DEFAULT 0
65 #define REG_RBIF_SW_VLEVEL 0x94
66 #define RBIF_SW_VLEVEL_DEFAULT 0x20
68 #define REG_RBCPR_STEP_QUOT 0x80
69 #define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
70 #define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
71 #define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
73 /* RBCPR Control Register */
74 #define REG_RBCPR_CTL 0x90
76 #define RBCPR_CTL_LOOP_EN BIT(0)
77 #define RBCPR_CTL_TIMER_EN BIT(3)
78 #define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
79 #define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
80 #define RBCPR_CTL_COUNT_MODE BIT(10)
81 #define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
82 #define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
83 #define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
84 #define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
86 /* RBCPR Ack/Nack Response */
87 #define REG_RBIF_CONT_ACK_CMD 0x98
88 #define REG_RBIF_CONT_NACK_CMD 0x9c
90 /* RBCPR Result status Register */
91 #define REG_RBCPR_RESULT_0 0xa0
93 #define RBCPR_RESULT0_BUSY_SHIFT 19
94 #define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
95 #define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
96 #define RBCPR_RESULT0_ERROR_SHIFT 6
97 #define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
98 #define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
99 #define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
100 #define RBCPR_RESULT0_STEP_UP_SHIFT 1
102 /* RBCPR Interrupt Control Register */
103 #define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
104 #define REG_RBIF_IRQ_CLEAR 0x110
105 #define REG_RBIF_IRQ_STATUS 0x114
107 #define CPR_INT_DONE BIT(0)
108 #define CPR_INT_MIN BIT(1)
109 #define CPR_INT_DOWN BIT(2)
110 #define CPR_INT_MID BIT(3)
111 #define CPR_INT_UP BIT(4)
112 #define CPR_INT_MAX BIT(5)
113 #define CPR_INT_CLAMP BIT(6)
114 #define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
115 CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
116 #define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
118 #define CPR_NUM_RING_OSC 8
120 /* CPR eFuse parameters */
121 #define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
123 #define CPR_FUSE_MIN_QUOT_DIFF 50
125 #define FUSE_REVISION_UNKNOWN (-1)
127 enum voltage_change_dir
{
137 char *quotient_offset
;
140 struct fuse_corner_data
{
150 /* fuse quot_offset */
151 int quot_offset_scale
;
152 int quot_offset_adjust
;
156 int init_voltage_step
;
157 int init_voltage_width
;
158 struct fuse_corner_data
*fuse_corner_data
;
162 unsigned int fuse_corner
;
167 unsigned int num_fuse_corners
;
171 unsigned int timer_delay_us
;
172 unsigned int timer_cons_up
;
173 unsigned int timer_cons_down
;
174 unsigned int up_threshold
;
175 unsigned int down_threshold
;
176 unsigned int idle_clocks
;
177 unsigned int gcnt_us
;
178 unsigned int vdd_apc_step_up_limit
;
179 unsigned int vdd_apc_step_down_limit
;
180 unsigned int clamp_timer_interval
;
182 struct cpr_fuses cpr_fuses
;
183 bool reduce_to_fuse_uV
;
184 bool reduce_to_corner_uV
;
188 unsigned int enable_reg
;
191 struct reg_sequence
*config
;
192 struct reg_sequence
*settings
;
193 int num_regs_per_fuse
;
196 struct cpr_acc_desc
{
197 const struct cpr_desc
*cpr_desc
;
198 const struct acc_desc
*acc_desc
;
207 const struct reg_sequence
*accs
;
209 unsigned long max_freq
;
222 struct fuse_corner
*fuse_corner
;
226 unsigned int num_corners
;
227 unsigned int ref_clk_khz
;
229 struct generic_pm_domain pd
;
231 struct device
*attached_cpu_dev
;
234 struct corner
*corner
;
235 struct regulator
*vdd_apc
;
242 struct fuse_corner
*fuse_corners
;
243 struct corner
*corners
;
245 const struct cpr_desc
*desc
;
246 const struct acc_desc
*acc_desc
;
247 const struct cpr_fuse
*cpr_fuses
;
249 struct dentry
*debugfs
;
252 static bool cpr_is_allowed(struct cpr_drv
*drv
)
254 return !drv
->loop_disabled
;
257 static void cpr_write(struct cpr_drv
*drv
, u32 offset
, u32 value
)
259 writel_relaxed(value
, drv
->base
+ offset
);
262 static u32
cpr_read(struct cpr_drv
*drv
, u32 offset
)
264 return readl_relaxed(drv
->base
+ offset
);
268 cpr_masked_write(struct cpr_drv
*drv
, u32 offset
, u32 mask
, u32 value
)
272 val
= readl_relaxed(drv
->base
+ offset
);
275 writel_relaxed(val
, drv
->base
+ offset
);
278 static void cpr_irq_clr(struct cpr_drv
*drv
)
280 cpr_write(drv
, REG_RBIF_IRQ_CLEAR
, CPR_INT_ALL
);
283 static void cpr_irq_clr_nack(struct cpr_drv
*drv
)
286 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
289 static void cpr_irq_clr_ack(struct cpr_drv
*drv
)
292 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
295 static void cpr_irq_set(struct cpr_drv
*drv
, u32 int_bits
)
297 cpr_write(drv
, REG_RBIF_IRQ_EN(0), int_bits
);
300 static void cpr_ctl_modify(struct cpr_drv
*drv
, u32 mask
, u32 value
)
302 cpr_masked_write(drv
, REG_RBCPR_CTL
, mask
, value
);
305 static void cpr_ctl_enable(struct cpr_drv
*drv
, struct corner
*corner
)
308 const struct cpr_desc
*desc
= drv
->desc
;
310 /* Program Consecutive Up & Down */
311 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
312 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
313 mask
= RBIF_TIMER_ADJ_CONS_UP_MASK
| RBIF_TIMER_ADJ_CONS_DOWN_MASK
;
314 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
, mask
, val
);
315 cpr_masked_write(drv
, REG_RBCPR_CTL
,
316 RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
317 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
,
319 cpr_irq_set(drv
, corner
->save_irq
);
321 if (cpr_is_allowed(drv
) && corner
->max_uV
> corner
->min_uV
)
322 val
= RBCPR_CTL_LOOP_EN
;
325 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, val
);
328 static void cpr_ctl_disable(struct cpr_drv
*drv
)
331 cpr_ctl_modify(drv
, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
332 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
, 0);
333 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
,
334 RBIF_TIMER_ADJ_CONS_UP_MASK
|
335 RBIF_TIMER_ADJ_CONS_DOWN_MASK
, 0);
337 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
338 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
339 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, 0);
342 static bool cpr_ctl_is_enabled(struct cpr_drv
*drv
)
346 reg_val
= cpr_read(drv
, REG_RBCPR_CTL
);
347 return reg_val
& RBCPR_CTL_LOOP_EN
;
350 static bool cpr_ctl_is_busy(struct cpr_drv
*drv
)
354 reg_val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
355 return reg_val
& RBCPR_RESULT0_BUSY_MASK
;
358 static void cpr_corner_save(struct cpr_drv
*drv
, struct corner
*corner
)
360 corner
->save_ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
361 corner
->save_irq
= cpr_read(drv
, REG_RBIF_IRQ_EN(0));
364 static void cpr_corner_restore(struct cpr_drv
*drv
, struct corner
*corner
)
366 u32 gcnt
, ctl
, irq
, ro_sel
, step_quot
;
367 struct fuse_corner
*fuse
= corner
->fuse_corner
;
368 const struct cpr_desc
*desc
= drv
->desc
;
371 ro_sel
= fuse
->ring_osc_idx
;
373 gcnt
|= fuse
->quot
- corner
->quot_adjust
;
375 /* Program the step quotient and idle clocks */
376 step_quot
= desc
->idle_clocks
<< RBCPR_STEP_QUOT_IDLE_CLK_SHIFT
;
377 step_quot
|= fuse
->step_quot
& RBCPR_STEP_QUOT_STEPQUOT_MASK
;
378 cpr_write(drv
, REG_RBCPR_STEP_QUOT
, step_quot
);
380 /* Clear the target quotient value and gate count of all ROs */
381 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
382 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
384 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
), gcnt
);
385 ctl
= corner
->save_ctl
;
386 cpr_write(drv
, REG_RBCPR_CTL
, ctl
);
387 irq
= corner
->save_irq
;
388 cpr_irq_set(drv
, irq
);
389 dev_dbg(drv
->dev
, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt
,
393 static void cpr_set_acc(struct regmap
*tcsr
, struct fuse_corner
*f
,
394 struct fuse_corner
*end
)
400 for (f
+= 1; f
<= end
; f
++)
401 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
403 for (f
-= 1; f
>= end
; f
--)
404 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
408 static int cpr_pre_voltage(struct cpr_drv
*drv
,
409 struct fuse_corner
*fuse_corner
,
410 enum voltage_change_dir dir
)
412 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
414 if (drv
->tcsr
&& dir
== DOWN
)
415 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
420 static int cpr_post_voltage(struct cpr_drv
*drv
,
421 struct fuse_corner
*fuse_corner
,
422 enum voltage_change_dir dir
)
424 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
426 if (drv
->tcsr
&& dir
== UP
)
427 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
432 static int cpr_scale_voltage(struct cpr_drv
*drv
, struct corner
*corner
,
433 int new_uV
, enum voltage_change_dir dir
)
436 struct fuse_corner
*fuse_corner
= corner
->fuse_corner
;
438 ret
= cpr_pre_voltage(drv
, fuse_corner
, dir
);
442 ret
= regulator_set_voltage(drv
->vdd_apc
, new_uV
, new_uV
);
444 dev_err_ratelimited(drv
->dev
, "failed to set apc voltage %d\n",
449 ret
= cpr_post_voltage(drv
, fuse_corner
, dir
);
456 static unsigned int cpr_get_cur_perf_state(struct cpr_drv
*drv
)
458 return drv
->corner
? drv
->corner
- drv
->corners
+ 1 : 0;
461 static int cpr_scale(struct cpr_drv
*drv
, enum voltage_change_dir dir
)
463 u32 val
, error_steps
, reg_mask
;
464 int last_uV
, new_uV
, step_uV
, ret
;
465 struct corner
*corner
;
466 const struct cpr_desc
*desc
= drv
->desc
;
468 if (dir
!= UP
&& dir
!= DOWN
)
471 step_uV
= regulator_get_linear_step(drv
->vdd_apc
);
475 corner
= drv
->corner
;
477 val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
479 error_steps
= val
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
;
480 error_steps
&= RBCPR_RESULT0_ERROR_STEPS_MASK
;
481 last_uV
= corner
->last_uV
;
484 if (desc
->clamp_timer_interval
&&
485 error_steps
< desc
->up_threshold
) {
487 * Handle the case where another measurement started
488 * after the interrupt was triggered due to a core
489 * exiting from power collapse.
491 error_steps
= max(desc
->up_threshold
,
492 desc
->vdd_apc_step_up_limit
);
495 if (last_uV
>= corner
->max_uV
) {
496 cpr_irq_clr_nack(drv
);
498 /* Maximize the UP threshold */
499 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
500 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
502 cpr_ctl_modify(drv
, reg_mask
, val
);
504 /* Disable UP interrupt */
505 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_UP
);
510 if (error_steps
> desc
->vdd_apc_step_up_limit
)
511 error_steps
= desc
->vdd_apc_step_up_limit
;
513 /* Calculate new voltage */
514 new_uV
= last_uV
+ error_steps
* step_uV
;
515 new_uV
= min(new_uV
, corner
->max_uV
);
518 "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
519 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
520 } else if (dir
== DOWN
) {
521 if (desc
->clamp_timer_interval
&&
522 error_steps
< desc
->down_threshold
) {
524 * Handle the case where another measurement started
525 * after the interrupt was triggered due to a core
526 * exiting from power collapse.
528 error_steps
= max(desc
->down_threshold
,
529 desc
->vdd_apc_step_down_limit
);
532 if (last_uV
<= corner
->min_uV
) {
533 cpr_irq_clr_nack(drv
);
535 /* Enable auto nack down */
536 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
537 val
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
539 cpr_ctl_modify(drv
, reg_mask
, val
);
541 /* Disable DOWN interrupt */
542 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_DOWN
);
547 if (error_steps
> desc
->vdd_apc_step_down_limit
)
548 error_steps
= desc
->vdd_apc_step_down_limit
;
550 /* Calculate new voltage */
551 new_uV
= last_uV
- error_steps
* step_uV
;
552 new_uV
= max(new_uV
, corner
->min_uV
);
555 "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
556 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
559 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
561 cpr_irq_clr_nack(drv
);
564 drv
->corner
->last_uV
= new_uV
;
567 /* Disable auto nack down */
568 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
570 } else if (dir
== DOWN
) {
571 /* Restore default threshold for UP */
572 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
573 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
574 val
= desc
->up_threshold
;
575 val
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
578 cpr_ctl_modify(drv
, reg_mask
, val
);
580 /* Re-enable default interrupts */
581 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
584 cpr_irq_clr_ack(drv
);
589 static irqreturn_t
cpr_irq_handler(int irq
, void *dev
)
591 struct cpr_drv
*drv
= dev
;
592 const struct cpr_desc
*desc
= drv
->desc
;
593 irqreturn_t ret
= IRQ_HANDLED
;
596 mutex_lock(&drv
->lock
);
598 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
599 if (drv
->flags
& FLAGS_IGNORE_1ST_IRQ_STATUS
)
600 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
602 dev_dbg(drv
->dev
, "IRQ_STATUS = %#02x\n", val
);
604 if (!cpr_ctl_is_enabled(drv
)) {
605 dev_dbg(drv
->dev
, "CPR is disabled\n");
607 } else if (cpr_ctl_is_busy(drv
) && !desc
->clamp_timer_interval
) {
608 dev_dbg(drv
->dev
, "CPR measurement is not ready\n");
609 } else if (!cpr_is_allowed(drv
)) {
610 val
= cpr_read(drv
, REG_RBCPR_CTL
);
611 dev_err_ratelimited(drv
->dev
,
612 "Interrupt broken? RBCPR_CTL = %#02x\n",
617 * Following sequence of handling is as per each IRQ's
620 if (val
& CPR_INT_UP
) {
622 } else if (val
& CPR_INT_DOWN
) {
623 cpr_scale(drv
, DOWN
);
624 } else if (val
& CPR_INT_MIN
) {
625 cpr_irq_clr_nack(drv
);
626 } else if (val
& CPR_INT_MAX
) {
627 cpr_irq_clr_nack(drv
);
628 } else if (val
& CPR_INT_MID
) {
629 /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
630 dev_dbg(drv
->dev
, "IRQ occurred for Mid Flag\n");
633 "IRQ occurred for unknown flag (%#08x)\n", val
);
636 /* Save register values for the corner */
637 cpr_corner_save(drv
, drv
->corner
);
640 mutex_unlock(&drv
->lock
);
645 static int cpr_enable(struct cpr_drv
*drv
)
649 ret
= regulator_enable(drv
->vdd_apc
);
653 mutex_lock(&drv
->lock
);
655 if (cpr_is_allowed(drv
) && drv
->corner
) {
657 cpr_corner_restore(drv
, drv
->corner
);
658 cpr_ctl_enable(drv
, drv
->corner
);
661 mutex_unlock(&drv
->lock
);
666 static int cpr_disable(struct cpr_drv
*drv
)
670 mutex_lock(&drv
->lock
);
672 if (cpr_is_allowed(drv
)) {
673 cpr_ctl_disable(drv
);
677 mutex_unlock(&drv
->lock
);
679 ret
= regulator_disable(drv
->vdd_apc
);
686 static int cpr_config(struct cpr_drv
*drv
)
690 struct corner
*corner
;
691 const struct cpr_desc
*desc
= drv
->desc
;
693 /* Disable interrupt and CPR */
694 cpr_write(drv
, REG_RBIF_IRQ_EN(0), 0);
695 cpr_write(drv
, REG_RBCPR_CTL
, 0);
697 /* Program the default HW ceiling, floor and vlevel */
698 val
= (RBIF_LIMIT_CEILING_DEFAULT
& RBIF_LIMIT_CEILING_MASK
)
699 << RBIF_LIMIT_CEILING_SHIFT
;
700 val
|= RBIF_LIMIT_FLOOR_DEFAULT
& RBIF_LIMIT_FLOOR_MASK
;
701 cpr_write(drv
, REG_RBIF_LIMIT
, val
);
702 cpr_write(drv
, REG_RBIF_SW_VLEVEL
, RBIF_SW_VLEVEL_DEFAULT
);
705 * Clear the target quotient value and gate count of all
708 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
709 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
711 /* Init and save gcnt */
712 gcnt
= (drv
->ref_clk_khz
* desc
->gcnt_us
) / 1000;
713 gcnt
= gcnt
& RBCPR_GCNT_TARGET_GCNT_MASK
;
714 gcnt
<<= RBCPR_GCNT_TARGET_GCNT_SHIFT
;
717 /* Program the delay count for the timer */
718 val
= (drv
->ref_clk_khz
* desc
->timer_delay_us
) / 1000;
719 cpr_write(drv
, REG_RBCPR_TIMER_INTERVAL
, val
);
720 dev_dbg(drv
->dev
, "Timer count: %#0x (for %d us)\n", val
,
721 desc
->timer_delay_us
);
723 /* Program Consecutive Up & Down */
724 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
725 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
726 val
|= desc
->clamp_timer_interval
<< RBIF_TIMER_ADJ_CLAMP_INT_SHIFT
;
727 cpr_write(drv
, REG_RBIF_TIMER_ADJUST
, val
);
729 /* Program the control register */
730 val
= desc
->up_threshold
<< RBCPR_CTL_UP_THRESHOLD_SHIFT
;
731 val
|= desc
->down_threshold
<< RBCPR_CTL_DN_THRESHOLD_SHIFT
;
732 val
|= RBCPR_CTL_TIMER_EN
| RBCPR_CTL_COUNT_MODE
;
733 val
|= RBCPR_CTL_SW_AUTO_CONT_ACK_EN
;
734 cpr_write(drv
, REG_RBCPR_CTL
, val
);
736 for (i
= 0; i
< drv
->num_corners
; i
++) {
737 corner
= &drv
->corners
[i
];
738 corner
->save_ctl
= val
;
739 corner
->save_irq
= CPR_INT_DEFAULT
;
742 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
744 val
= cpr_read(drv
, REG_RBCPR_VERSION
);
745 if (val
<= RBCPR_VER_2
)
746 drv
->flags
|= FLAGS_IGNORE_1ST_IRQ_STATUS
;
751 static int cpr_set_performance_state(struct generic_pm_domain
*domain
,
754 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
755 struct corner
*corner
, *end
;
756 enum voltage_change_dir dir
;
759 mutex_lock(&drv
->lock
);
761 dev_dbg(drv
->dev
, "%s: setting perf state: %u (prev state: %u)\n",
762 __func__
, state
, cpr_get_cur_perf_state(drv
));
765 * Determine new corner we're going to.
766 * Remove one since lowest performance state is 1.
768 corner
= drv
->corners
+ state
- 1;
769 end
= &drv
->corners
[drv
->num_corners
- 1];
770 if (corner
> end
|| corner
< drv
->corners
) {
775 /* Determine direction */
776 if (drv
->corner
> corner
)
778 else if (drv
->corner
< corner
)
783 if (cpr_is_allowed(drv
))
784 new_uV
= corner
->last_uV
;
788 if (cpr_is_allowed(drv
))
789 cpr_ctl_disable(drv
);
791 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
795 if (cpr_is_allowed(drv
)) {
797 if (drv
->corner
!= corner
)
798 cpr_corner_restore(drv
, corner
);
799 cpr_ctl_enable(drv
, corner
);
802 drv
->corner
= corner
;
805 mutex_unlock(&drv
->lock
);
810 static int cpr_read_efuse(struct device
*dev
, const char *cname
, u32
*data
)
812 struct nvmem_cell
*cell
;
819 cell
= nvmem_cell_get(dev
, cname
);
821 if (PTR_ERR(cell
) != -EPROBE_DEFER
)
822 dev_err(dev
, "undefined cell %s\n", cname
);
823 return PTR_ERR(cell
);
826 ret
= nvmem_cell_read(cell
, &len
);
827 nvmem_cell_put(cell
);
829 dev_err(dev
, "can't read cell %s\n", cname
);
833 for (i
= 0; i
< len
; i
++)
834 *data
|= ret
[i
] << (8 * i
);
837 dev_dbg(dev
, "efuse read(%s) = %x, bytes %zd\n", cname
, *data
, len
);
843 cpr_populate_ring_osc_idx(struct cpr_drv
*drv
)
845 struct fuse_corner
*fuse
= drv
->fuse_corners
;
846 struct fuse_corner
*end
= fuse
+ drv
->desc
->num_fuse_corners
;
847 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
851 for (; fuse
< end
; fuse
++, fuses
++) {
852 ret
= cpr_read_efuse(drv
->dev
, fuses
->ring_osc
,
856 fuse
->ring_osc_idx
= data
;
862 static int cpr_read_fuse_uV(const struct cpr_desc
*desc
,
863 const struct fuse_corner_data
*fdata
,
864 const char *init_v_efuse
,
868 int step_size_uV
, steps
, uV
;
872 ret
= cpr_read_efuse(drv
->dev
, init_v_efuse
, &bits
);
876 steps
= bits
& ~BIT(desc
->cpr_fuses
.init_voltage_width
- 1);
877 /* Not two's complement.. instead highest bit is sign bit */
878 if (bits
& BIT(desc
->cpr_fuses
.init_voltage_width
- 1))
881 step_size_uV
= desc
->cpr_fuses
.init_voltage_step
;
883 uV
= fdata
->ref_uV
+ steps
* step_size_uV
;
884 return DIV_ROUND_UP(uV
, step_volt
) * step_volt
;
887 static int cpr_fuse_corner_init(struct cpr_drv
*drv
)
889 const struct cpr_desc
*desc
= drv
->desc
;
890 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
891 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
893 unsigned int step_volt
;
894 struct fuse_corner_data
*fdata
;
895 struct fuse_corner
*fuse
, *end
;
897 const struct reg_sequence
*accs
;
900 accs
= acc_desc
->settings
;
902 step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
906 /* Populate fuse_corner members */
907 fuse
= drv
->fuse_corners
;
908 end
= &fuse
[desc
->num_fuse_corners
- 1];
909 fdata
= desc
->cpr_fuses
.fuse_corner_data
;
911 for (i
= 0; fuse
<= end
; fuse
++, fuses
++, i
++, fdata
++) {
913 * Update SoC voltages: platforms might choose a different
914 * regulators than the one used to characterize the algorithms
915 * (ie, init_voltage_step).
917 fdata
->min_uV
= roundup(fdata
->min_uV
, step_volt
);
918 fdata
->max_uV
= roundup(fdata
->max_uV
, step_volt
);
921 uV
= cpr_read_fuse_uV(desc
, fdata
, fuses
->init_voltage
,
926 fuse
->min_uV
= fdata
->min_uV
;
927 fuse
->max_uV
= fdata
->max_uV
;
928 fuse
->uV
= clamp(uV
, fuse
->min_uV
, fuse
->max_uV
);
932 * Allow the highest fuse corner's PVS voltage to
933 * define the ceiling voltage for that corner in order
934 * to support SoC's in which variable ceiling values
937 end
->max_uV
= max(end
->max_uV
, end
->uV
);
940 /* Populate target quotient by scaling */
941 ret
= cpr_read_efuse(drv
->dev
, fuses
->quotient
, &fuse
->quot
);
945 fuse
->quot
*= fdata
->quot_scale
;
946 fuse
->quot
+= fdata
->quot_offset
;
947 fuse
->quot
+= fdata
->quot_adjust
;
948 fuse
->step_quot
= desc
->step_quot
[fuse
->ring_osc_idx
];
950 /* Populate acc settings */
952 fuse
->num_accs
= acc_desc
->num_regs_per_fuse
;
953 accs
+= acc_desc
->num_regs_per_fuse
;
957 * Restrict all fuse corner PVS voltages based upon per corner
958 * ceiling and floor voltages.
960 for (fuse
= drv
->fuse_corners
, i
= 0; fuse
<= end
; fuse
++, i
++) {
961 if (fuse
->uV
> fuse
->max_uV
)
962 fuse
->uV
= fuse
->max_uV
;
963 else if (fuse
->uV
< fuse
->min_uV
)
964 fuse
->uV
= fuse
->min_uV
;
966 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
971 "min uV: %d (fuse corner: %d) not supported by regulator\n",
976 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
981 "max uV: %d (fuse corner: %d) not supported by regulator\n",
987 "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
988 i
, fuse
->min_uV
, fuse
->uV
, fuse
->max_uV
,
989 fuse
->ring_osc_idx
, fuse
->quot
, fuse
->step_quot
);
995 static int cpr_calculate_scaling(const char *quot_offset
,
997 const struct fuse_corner_data
*fdata
,
998 const struct corner
*corner
)
1001 unsigned long freq_diff
;
1003 const struct fuse_corner
*fuse
, *prev_fuse
;
1006 fuse
= corner
->fuse_corner
;
1007 prev_fuse
= fuse
- 1;
1010 ret
= cpr_read_efuse(drv
->dev
, quot_offset
, "_diff
);
1014 quot_diff
*= fdata
->quot_offset_scale
;
1015 quot_diff
+= fdata
->quot_offset_adjust
;
1017 quot_diff
= fuse
->quot
- prev_fuse
->quot
;
1020 freq_diff
= fuse
->max_freq
- prev_fuse
->max_freq
;
1021 freq_diff
/= 1000000; /* Convert to MHz */
1022 scaling
= 1000 * quot_diff
/ freq_diff
;
1023 return min(scaling
, fdata
->max_quot_scale
);
1026 static int cpr_interpolate(const struct corner
*corner
, int step_volt
,
1027 const struct fuse_corner_data
*fdata
)
1029 unsigned long f_high
, f_low
, f_diff
;
1030 int uV_high
, uV_low
, uV
;
1031 u64 temp
, temp_limit
;
1032 const struct fuse_corner
*fuse
, *prev_fuse
;
1034 fuse
= corner
->fuse_corner
;
1035 prev_fuse
= fuse
- 1;
1037 f_high
= fuse
->max_freq
;
1038 f_low
= prev_fuse
->max_freq
;
1040 uV_low
= prev_fuse
->uV
;
1041 f_diff
= fuse
->max_freq
- corner
->freq
;
1044 * Don't interpolate in the wrong direction. This could happen
1045 * if the adjusted fuse voltage overlaps with the previous fuse's
1048 if (f_high
<= f_low
|| uV_high
<= uV_low
|| f_high
<= corner
->freq
)
1051 temp
= f_diff
* (uV_high
- uV_low
);
1052 do_div(temp
, f_high
- f_low
);
1055 * max_volt_scale has units of uV/MHz while freq values
1056 * have units of Hz. Divide by 1000000 to convert to.
1058 temp_limit
= f_diff
* fdata
->max_volt_scale
;
1059 do_div(temp_limit
, 1000000);
1061 uV
= uV_high
- min(temp
, temp_limit
);
1062 return roundup(uV
, step_volt
);
1065 static unsigned int cpr_get_fuse_corner(struct dev_pm_opp
*opp
)
1067 struct device_node
*np
;
1068 unsigned int fuse_corner
= 0;
1070 np
= dev_pm_opp_get_of_node(opp
);
1071 if (of_property_read_u32(np
, "qcom,opp-fuse-level", &fuse_corner
))
1072 pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
1080 static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp
*ref
,
1081 struct device
*cpu_dev
)
1084 struct device_node
*ref_np
;
1085 struct device_node
*desc_np
;
1086 struct device_node
*child_np
= NULL
;
1087 struct device_node
*child_req_np
= NULL
;
1089 desc_np
= dev_pm_opp_of_get_opp_desc_node(cpu_dev
);
1093 ref_np
= dev_pm_opp_get_of_node(ref
);
1098 of_node_put(child_req_np
);
1099 child_np
= of_get_next_available_child(desc_np
, child_np
);
1100 child_req_np
= of_parse_phandle(child_np
, "required-opps", 0);
1101 } while (child_np
&& child_req_np
!= ref_np
);
1103 if (child_np
&& child_req_np
== ref_np
)
1104 of_property_read_u64(child_np
, "opp-hz", &rate
);
1106 of_node_put(child_req_np
);
1107 of_node_put(child_np
);
1108 of_node_put(ref_np
);
1110 of_node_put(desc_np
);
1112 return (unsigned long) rate
;
1115 static int cpr_corner_init(struct cpr_drv
*drv
)
1117 const struct cpr_desc
*desc
= drv
->desc
;
1118 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
1119 int i
, level
, scaling
= 0;
1120 unsigned int fnum
, fc
;
1121 const char *quot_offset
;
1122 struct fuse_corner
*fuse
, *prev_fuse
;
1123 struct corner
*corner
, *end
;
1124 struct corner_data
*cdata
;
1125 const struct fuse_corner_data
*fdata
;
1127 unsigned long freq_diff
, freq_diff_mhz
;
1129 int step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
1130 struct dev_pm_opp
*opp
;
1135 corner
= drv
->corners
;
1136 end
= &corner
[drv
->num_corners
- 1];
1138 cdata
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1139 sizeof(struct corner_data
),
1145 * Store maximum frequency for each fuse corner based on the frequency
1148 for (level
= 1; level
<= drv
->num_corners
; level
++) {
1149 opp
= dev_pm_opp_find_level_exact(&drv
->pd
.dev
, level
);
1152 fc
= cpr_get_fuse_corner(opp
);
1154 dev_pm_opp_put(opp
);
1158 freq
= cpr_get_opp_hz_for_req(opp
, drv
->attached_cpu_dev
);
1160 dev_pm_opp_put(opp
);
1163 cdata
[level
- 1].fuse_corner
= fnum
;
1164 cdata
[level
- 1].freq
= freq
;
1166 fuse
= &drv
->fuse_corners
[fnum
];
1167 dev_dbg(drv
->dev
, "freq: %lu level: %u fuse level: %u\n",
1168 freq
, dev_pm_opp_get_level(opp
) - 1, fnum
);
1169 if (freq
> fuse
->max_freq
)
1170 fuse
->max_freq
= freq
;
1171 dev_pm_opp_put(opp
);
1175 * Get the quotient adjustment scaling factor, according to:
1177 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
1178 * / (freq(corner_N) - freq(corner_N-1)), max_factor)
1180 * QUOT(corner_N): quotient read from fuse for fuse corner N
1181 * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
1182 * freq(corner_N): max frequency in MHz supported by fuse corner N
1183 * freq(corner_N-1): max frequency in MHz supported by fuse corner
1186 * Then walk through the corners mapped to each fuse corner
1187 * and calculate the quotient adjustment for each one using the
1188 * following formula:
1190 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
1192 * freq_max: max frequency in MHz supported by the fuse corner
1193 * freq_corner: frequency in MHz corresponding to the corner
1194 * scaling: calculated from above equation
1205 * +--------------- +----------------
1206 * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
1213 for (apply_scaling
= false, i
= 0; corner
<= end
; corner
++, i
++) {
1214 fnum
= cdata
[i
].fuse_corner
;
1215 fdata
= &desc
->cpr_fuses
.fuse_corner_data
[fnum
];
1216 quot_offset
= fuses
[fnum
].quotient_offset
;
1217 fuse
= &drv
->fuse_corners
[fnum
];
1219 prev_fuse
= &drv
->fuse_corners
[fnum
- 1];
1223 corner
->fuse_corner
= fuse
;
1224 corner
->freq
= cdata
[i
].freq
;
1225 corner
->uV
= fuse
->uV
;
1227 if (prev_fuse
&& cdata
[i
- 1].freq
== prev_fuse
->max_freq
) {
1228 scaling
= cpr_calculate_scaling(quot_offset
, drv
,
1233 apply_scaling
= true;
1234 } else if (corner
->freq
== fuse
->max_freq
) {
1235 /* This is a fuse corner; don't scale anything */
1236 apply_scaling
= false;
1239 if (apply_scaling
) {
1240 freq_diff
= fuse
->max_freq
- corner
->freq
;
1241 freq_diff_mhz
= freq_diff
/ 1000000;
1242 corner
->quot_adjust
= scaling
* freq_diff_mhz
/ 1000;
1244 corner
->uV
= cpr_interpolate(corner
, step_volt
, fdata
);
1247 corner
->max_uV
= fuse
->max_uV
;
1248 corner
->min_uV
= fuse
->min_uV
;
1249 corner
->uV
= clamp(corner
->uV
, corner
->min_uV
, corner
->max_uV
);
1250 corner
->last_uV
= corner
->uV
;
1252 /* Reduce the ceiling voltage if needed */
1253 if (desc
->reduce_to_corner_uV
&& corner
->uV
< corner
->max_uV
)
1254 corner
->max_uV
= corner
->uV
;
1255 else if (desc
->reduce_to_fuse_uV
&& fuse
->uV
< corner
->max_uV
)
1256 corner
->max_uV
= max(corner
->min_uV
, fuse
->uV
);
1258 dev_dbg(drv
->dev
, "corner %d: [%d %d %d] quot %d\n", i
,
1259 corner
->min_uV
, corner
->uV
, corner
->max_uV
,
1260 fuse
->quot
- corner
->quot_adjust
);
1266 static const struct cpr_fuse
*cpr_get_fuses(struct cpr_drv
*drv
)
1268 const struct cpr_desc
*desc
= drv
->desc
;
1269 struct cpr_fuse
*fuses
;
1272 fuses
= devm_kcalloc(drv
->dev
, desc
->num_fuse_corners
,
1273 sizeof(struct cpr_fuse
),
1276 return ERR_PTR(-ENOMEM
);
1278 for (i
= 0; i
< desc
->num_fuse_corners
; i
++) {
1281 snprintf(tbuf
, 32, "cpr_ring_osc%d", i
+ 1);
1282 fuses
[i
].ring_osc
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1283 if (!fuses
[i
].ring_osc
)
1284 return ERR_PTR(-ENOMEM
);
1286 snprintf(tbuf
, 32, "cpr_init_voltage%d", i
+ 1);
1287 fuses
[i
].init_voltage
= devm_kstrdup(drv
->dev
, tbuf
,
1289 if (!fuses
[i
].init_voltage
)
1290 return ERR_PTR(-ENOMEM
);
1292 snprintf(tbuf
, 32, "cpr_quotient%d", i
+ 1);
1293 fuses
[i
].quotient
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1294 if (!fuses
[i
].quotient
)
1295 return ERR_PTR(-ENOMEM
);
1297 snprintf(tbuf
, 32, "cpr_quotient_offset%d", i
+ 1);
1298 fuses
[i
].quotient_offset
= devm_kstrdup(drv
->dev
, tbuf
,
1300 if (!fuses
[i
].quotient_offset
)
1301 return ERR_PTR(-ENOMEM
);
1307 static void cpr_set_loop_allowed(struct cpr_drv
*drv
)
1309 drv
->loop_disabled
= false;
1312 static int cpr_init_parameters(struct cpr_drv
*drv
)
1314 const struct cpr_desc
*desc
= drv
->desc
;
1317 clk
= clk_get(drv
->dev
, "ref");
1319 return PTR_ERR(clk
);
1321 drv
->ref_clk_khz
= clk_get_rate(clk
) / 1000;
1324 if (desc
->timer_cons_up
> RBIF_TIMER_ADJ_CONS_UP_MASK
||
1325 desc
->timer_cons_down
> RBIF_TIMER_ADJ_CONS_DOWN_MASK
||
1326 desc
->up_threshold
> RBCPR_CTL_UP_THRESHOLD_MASK
||
1327 desc
->down_threshold
> RBCPR_CTL_DN_THRESHOLD_MASK
||
1328 desc
->idle_clocks
> RBCPR_STEP_QUOT_IDLE_CLK_MASK
||
1329 desc
->clamp_timer_interval
> RBIF_TIMER_ADJ_CLAMP_INT_MASK
)
1332 dev_dbg(drv
->dev
, "up threshold = %u, down threshold = %u\n",
1333 desc
->up_threshold
, desc
->down_threshold
);
1338 static int cpr_find_initial_corner(struct cpr_drv
*drv
)
1341 const struct corner
*end
;
1342 struct corner
*iter
;
1345 if (!drv
->cpu_clk
) {
1346 dev_err(drv
->dev
, "cannot get rate from NULL clk\n");
1350 end
= &drv
->corners
[drv
->num_corners
- 1];
1351 rate
= clk_get_rate(drv
->cpu_clk
);
1354 * Some bootloaders set a CPU clock frequency that is not defined
1355 * in the OPP table. When running at an unlisted frequency,
1356 * cpufreq_online() will change to the OPP which has the lowest
1357 * frequency, at or above the unlisted frequency.
1358 * Since cpufreq_online() always "rounds up" in the case of an
1359 * unlisted frequency, this function always "rounds down" in case
1360 * of an unlisted frequency. That way, when cpufreq_online()
1361 * triggers the first ever call to cpr_set_performance_state(),
1362 * it will correctly determine the direction as UP.
1364 for (iter
= drv
->corners
; iter
<= end
; iter
++) {
1365 if (iter
->freq
> rate
)
1368 if (iter
->freq
== rate
) {
1372 if (iter
->freq
< rate
)
1377 dev_err(drv
->dev
, "boot up corner not found\n");
1381 dev_dbg(drv
->dev
, "boot up perf state: %u\n", i
);
1386 static const struct cpr_desc qcs404_cpr_desc
= {
1387 .num_fuse_corners
= 3,
1388 .min_diff_quot
= CPR_FUSE_MIN_QUOT_DIFF
,
1389 .step_quot
= (int []){ 25, 25, 25, },
1390 .timer_delay_us
= 5000,
1392 .timer_cons_down
= 2,
1394 .down_threshold
= 3,
1397 .vdd_apc_step_up_limit
= 1,
1398 .vdd_apc_step_down_limit
= 1,
1400 .init_voltage_step
= 8000,
1401 .init_voltage_width
= 6,
1402 .fuse_corner_data
= (struct fuse_corner_data
[]){
1408 .max_volt_scale
= 0,
1409 .max_quot_scale
= 0,
1413 .quot_offset_scale
= 5,
1414 .quot_offset_adjust
= 0,
1421 .max_volt_scale
= 2000,
1422 .max_quot_scale
= 1400,
1426 .quot_offset_scale
= 5,
1427 .quot_offset_adjust
= 0,
1434 .max_volt_scale
= 2000,
1435 .max_quot_scale
= 1400,
1439 .quot_offset_scale
= 5,
1440 .quot_offset_adjust
= 0,
1446 static const struct acc_desc qcs404_acc_desc
= {
1447 .settings
= (struct reg_sequence
[]){
1448 { 0xb120, 0x1041040 },
1455 .config
= (struct reg_sequence
[]){
1459 .num_regs_per_fuse
= 2,
1462 static const struct cpr_acc_desc qcs404_cpr_acc_desc
= {
1463 .cpr_desc
= &qcs404_cpr_desc
,
1464 .acc_desc
= &qcs404_acc_desc
,
1467 static unsigned int cpr_get_performance_state(struct generic_pm_domain
*genpd
,
1468 struct dev_pm_opp
*opp
)
1470 return dev_pm_opp_get_level(opp
);
1473 static int cpr_power_off(struct generic_pm_domain
*domain
)
1475 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1477 return cpr_disable(drv
);
1480 static int cpr_power_on(struct generic_pm_domain
*domain
)
1482 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1484 return cpr_enable(drv
);
1487 static int cpr_pd_attach_dev(struct generic_pm_domain
*domain
,
1490 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1491 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
1494 mutex_lock(&drv
->lock
);
1496 dev_dbg(drv
->dev
, "attach callback for: %s\n", dev_name(dev
));
1499 * This driver only supports scaling voltage for a CPU cluster
1500 * where all CPUs in the cluster share a single regulator.
1501 * Therefore, save the struct device pointer only for the first
1502 * CPU device that gets attached. There is no need to do any
1503 * additional initialization when further CPUs get attached.
1505 if (drv
->attached_cpu_dev
)
1509 * cpr_scale_voltage() requires the direction (if we are changing
1510 * to a higher or lower OPP). The first time
1511 * cpr_set_performance_state() is called, there is no previous
1512 * performance state defined. Therefore, we call
1513 * cpr_find_initial_corner() that gets the CPU clock frequency
1514 * set by the bootloader, so that we can determine the direction
1515 * the first time cpr_set_performance_state() is called.
1517 drv
->cpu_clk
= devm_clk_get(dev
, NULL
);
1518 if (IS_ERR(drv
->cpu_clk
)) {
1519 ret
= PTR_ERR(drv
->cpu_clk
);
1520 if (ret
!= -EPROBE_DEFER
)
1521 dev_err(drv
->dev
, "could not get cpu clk: %d\n", ret
);
1524 drv
->attached_cpu_dev
= dev
;
1526 dev_dbg(drv
->dev
, "using cpu clk from: %s\n",
1527 dev_name(drv
->attached_cpu_dev
));
1530 * Everything related to (virtual) corners has to be initialized
1531 * here, when attaching to the power domain, since we need to know
1532 * the maximum frequency for each fuse corner, and this is only
1533 * available after the cpufreq driver has attached to us.
1534 * The reason for this is that we need to know the highest
1535 * frequency associated with each fuse corner.
1537 ret
= dev_pm_opp_get_opp_count(&drv
->pd
.dev
);
1539 dev_err(drv
->dev
, "could not get OPP count\n");
1542 drv
->num_corners
= ret
;
1544 if (drv
->num_corners
< 2) {
1545 dev_err(drv
->dev
, "need at least 2 OPPs to use CPR\n");
1550 dev_dbg(drv
->dev
, "number of OPPs: %d\n", drv
->num_corners
);
1552 drv
->corners
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1553 sizeof(*drv
->corners
),
1555 if (!drv
->corners
) {
1560 ret
= cpr_corner_init(drv
);
1564 cpr_set_loop_allowed(drv
);
1566 ret
= cpr_init_parameters(drv
);
1570 /* Configure CPR HW but keep it disabled */
1571 ret
= cpr_config(drv
);
1575 ret
= cpr_find_initial_corner(drv
);
1579 if (acc_desc
->config
)
1580 regmap_multi_reg_write(drv
->tcsr
, acc_desc
->config
,
1581 acc_desc
->num_regs_per_fuse
);
1583 /* Enable ACC if required */
1584 if (acc_desc
->enable_mask
)
1585 regmap_update_bits(drv
->tcsr
, acc_desc
->enable_reg
,
1586 acc_desc
->enable_mask
,
1587 acc_desc
->enable_mask
);
1590 mutex_unlock(&drv
->lock
);
1595 static int cpr_debug_info_show(struct seq_file
*s
, void *unused
)
1597 u32 gcnt
, ro_sel
, ctl
, irq_status
, reg
, error_steps
;
1598 u32 step_dn
, step_up
, error
, error_lt0
, busy
;
1599 struct cpr_drv
*drv
= s
->private;
1600 struct fuse_corner
*fuse_corner
;
1601 struct corner
*corner
;
1603 corner
= drv
->corner
;
1604 fuse_corner
= corner
->fuse_corner
;
1606 seq_printf(s
, "corner, current_volt = %d uV\n",
1609 ro_sel
= fuse_corner
->ring_osc_idx
;
1610 gcnt
= cpr_read(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
));
1611 seq_printf(s
, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel
, gcnt
);
1613 ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
1614 seq_printf(s
, "rbcpr_ctl = %#02X\n", ctl
);
1616 irq_status
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
1617 seq_printf(s
, "rbcpr_irq_status = %#02X\n", irq_status
);
1619 reg
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
1620 seq_printf(s
, "rbcpr_result_0 = %#02X\n", reg
);
1622 step_dn
= reg
& 0x01;
1623 step_up
= (reg
>> RBCPR_RESULT0_STEP_UP_SHIFT
) & 0x01;
1624 seq_printf(s
, " [step_dn = %u", step_dn
);
1626 seq_printf(s
, ", step_up = %u", step_up
);
1628 error_steps
= (reg
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
)
1629 & RBCPR_RESULT0_ERROR_STEPS_MASK
;
1630 seq_printf(s
, ", error_steps = %u", error_steps
);
1632 error
= (reg
>> RBCPR_RESULT0_ERROR_SHIFT
) & RBCPR_RESULT0_ERROR_MASK
;
1633 seq_printf(s
, ", error = %u", error
);
1635 error_lt0
= (reg
>> RBCPR_RESULT0_ERROR_LT0_SHIFT
) & 0x01;
1636 seq_printf(s
, ", error_lt_0 = %u", error_lt0
);
1638 busy
= (reg
>> RBCPR_RESULT0_BUSY_SHIFT
) & 0x01;
1639 seq_printf(s
, ", busy = %u]\n", busy
);
1643 DEFINE_SHOW_ATTRIBUTE(cpr_debug_info
);
1645 static void cpr_debugfs_init(struct cpr_drv
*drv
)
1647 drv
->debugfs
= debugfs_create_dir("qcom_cpr", NULL
);
1649 debugfs_create_file("debug_info", 0444, drv
->debugfs
,
1650 drv
, &cpr_debug_info_fops
);
1653 static int cpr_probe(struct platform_device
*pdev
)
1655 struct resource
*res
;
1656 struct device
*dev
= &pdev
->dev
;
1657 struct cpr_drv
*drv
;
1659 const struct cpr_acc_desc
*data
;
1660 struct device_node
*np
;
1661 u32 cpr_rev
= FUSE_REVISION_UNKNOWN
;
1663 data
= of_device_get_match_data(dev
);
1664 if (!data
|| !data
->cpr_desc
|| !data
->acc_desc
)
1667 drv
= devm_kzalloc(dev
, sizeof(*drv
), GFP_KERNEL
);
1671 drv
->desc
= data
->cpr_desc
;
1672 drv
->acc_desc
= data
->acc_desc
;
1674 drv
->fuse_corners
= devm_kcalloc(dev
, drv
->desc
->num_fuse_corners
,
1675 sizeof(*drv
->fuse_corners
),
1677 if (!drv
->fuse_corners
)
1680 np
= of_parse_phandle(dev
->of_node
, "acc-syscon", 0);
1684 drv
->tcsr
= syscon_node_to_regmap(np
);
1686 if (IS_ERR(drv
->tcsr
))
1687 return PTR_ERR(drv
->tcsr
);
1689 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1690 drv
->base
= devm_ioremap_resource(dev
, res
);
1691 if (IS_ERR(drv
->base
))
1692 return PTR_ERR(drv
->base
);
1694 irq
= platform_get_irq(pdev
, 0);
1698 drv
->vdd_apc
= devm_regulator_get(dev
, "vdd-apc");
1699 if (IS_ERR(drv
->vdd_apc
))
1700 return PTR_ERR(drv
->vdd_apc
);
1703 * Initialize fuse corners, since it simply depends
1704 * on data in efuses.
1705 * Everything related to (virtual) corners has to be
1706 * initialized after attaching to the power domain,
1707 * since it depends on the CPU's OPP table.
1709 ret
= cpr_read_efuse(dev
, "cpr_fuse_revision", &cpr_rev
);
1713 drv
->cpr_fuses
= cpr_get_fuses(drv
);
1714 if (IS_ERR(drv
->cpr_fuses
))
1715 return PTR_ERR(drv
->cpr_fuses
);
1717 ret
= cpr_populate_ring_osc_idx(drv
);
1721 ret
= cpr_fuse_corner_init(drv
);
1725 mutex_init(&drv
->lock
);
1727 ret
= devm_request_threaded_irq(dev
, irq
, NULL
,
1729 IRQF_ONESHOT
| IRQF_TRIGGER_RISING
,
1734 drv
->pd
.name
= devm_kstrdup_const(dev
, dev
->of_node
->full_name
,
1739 drv
->pd
.power_off
= cpr_power_off
;
1740 drv
->pd
.power_on
= cpr_power_on
;
1741 drv
->pd
.set_performance_state
= cpr_set_performance_state
;
1742 drv
->pd
.opp_to_performance_state
= cpr_get_performance_state
;
1743 drv
->pd
.attach_dev
= cpr_pd_attach_dev
;
1745 ret
= pm_genpd_init(&drv
->pd
, NULL
, true);
1749 ret
= of_genpd_add_provider_simple(dev
->of_node
, &drv
->pd
);
1753 platform_set_drvdata(pdev
, drv
);
1754 cpr_debugfs_init(drv
);
1759 static int cpr_remove(struct platform_device
*pdev
)
1761 struct cpr_drv
*drv
= platform_get_drvdata(pdev
);
1763 if (cpr_is_allowed(drv
)) {
1764 cpr_ctl_disable(drv
);
1765 cpr_irq_set(drv
, 0);
1768 of_genpd_del_provider(pdev
->dev
.of_node
);
1769 pm_genpd_remove(&drv
->pd
);
1771 debugfs_remove_recursive(drv
->debugfs
);
1776 static const struct of_device_id cpr_match_table
[] = {
1777 { .compatible
= "qcom,qcs404-cpr", .data
= &qcs404_cpr_acc_desc
},
1780 MODULE_DEVICE_TABLE(of
, cpr_match_table
);
1782 static struct platform_driver cpr_driver
= {
1784 .remove
= cpr_remove
,
1787 .of_match_table
= cpr_match_table
,
1790 module_platform_driver(cpr_driver
);
1792 MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
1793 MODULE_LICENSE("GPL v2");