1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2019, Linaro Limited
7 #include <linux/module.h>
9 #include <linux/debugfs.h>
10 #include <linux/string.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/init.h>
15 #include <linux/bitops.h>
16 #include <linux/slab.h>
18 #include <linux/of_device.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_opp.h>
22 #include <linux/interrupt.h>
23 #include <linux/regmap.h>
24 #include <linux/mfd/syscon.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/clk.h>
27 #include <linux/nvmem-consumer.h>
29 /* Register Offsets for RB-CPR and Bit Definitions */
31 /* RBCPR Version Register */
32 #define REG_RBCPR_VERSION 0
33 #define RBCPR_VER_2 0x02
34 #define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
36 /* RBCPR Gate Count and Target Registers */
37 #define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
39 #define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
40 #define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
41 #define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
42 #define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
44 /* RBCPR Timer Control */
45 #define REG_RBCPR_TIMER_INTERVAL 0x44
46 #define REG_RBIF_TIMER_ADJUST 0x4c
48 #define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
49 #define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
50 #define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
51 #define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
52 #define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
53 #define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
55 /* RBCPR Config Register */
56 #define REG_RBIF_LIMIT 0x48
57 #define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
58 #define RBIF_LIMIT_CEILING_SHIFT 6
59 #define RBIF_LIMIT_FLOOR_BITS 6
60 #define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
62 #define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
63 #define RBIF_LIMIT_FLOOR_DEFAULT 0
65 #define REG_RBIF_SW_VLEVEL 0x94
66 #define RBIF_SW_VLEVEL_DEFAULT 0x20
68 #define REG_RBCPR_STEP_QUOT 0x80
69 #define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
70 #define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
71 #define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
73 /* RBCPR Control Register */
74 #define REG_RBCPR_CTL 0x90
76 #define RBCPR_CTL_LOOP_EN BIT(0)
77 #define RBCPR_CTL_TIMER_EN BIT(3)
78 #define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
79 #define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
80 #define RBCPR_CTL_COUNT_MODE BIT(10)
81 #define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
82 #define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
83 #define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
84 #define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
86 /* RBCPR Ack/Nack Response */
87 #define REG_RBIF_CONT_ACK_CMD 0x98
88 #define REG_RBIF_CONT_NACK_CMD 0x9c
90 /* RBCPR Result status Register */
91 #define REG_RBCPR_RESULT_0 0xa0
93 #define RBCPR_RESULT0_BUSY_SHIFT 19
94 #define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
95 #define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
96 #define RBCPR_RESULT0_ERROR_SHIFT 6
97 #define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
98 #define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
99 #define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
100 #define RBCPR_RESULT0_STEP_UP_SHIFT 1
102 /* RBCPR Interrupt Control Register */
103 #define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
104 #define REG_RBIF_IRQ_CLEAR 0x110
105 #define REG_RBIF_IRQ_STATUS 0x114
107 #define CPR_INT_DONE BIT(0)
108 #define CPR_INT_MIN BIT(1)
109 #define CPR_INT_DOWN BIT(2)
110 #define CPR_INT_MID BIT(3)
111 #define CPR_INT_UP BIT(4)
112 #define CPR_INT_MAX BIT(5)
113 #define CPR_INT_CLAMP BIT(6)
114 #define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
115 CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
116 #define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
118 #define CPR_NUM_RING_OSC 8
120 /* CPR eFuse parameters */
121 #define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
123 #define CPR_FUSE_MIN_QUOT_DIFF 50
125 #define FUSE_REVISION_UNKNOWN (-1)
127 enum voltage_change_dir
{
137 char *quotient_offset
;
140 struct fuse_corner_data
{
150 /* fuse quot_offset */
151 int quot_offset_scale
;
152 int quot_offset_adjust
;
156 int init_voltage_step
;
157 int init_voltage_width
;
158 struct fuse_corner_data
*fuse_corner_data
;
162 unsigned int fuse_corner
;
167 unsigned int num_fuse_corners
;
171 unsigned int timer_delay_us
;
172 unsigned int timer_cons_up
;
173 unsigned int timer_cons_down
;
174 unsigned int up_threshold
;
175 unsigned int down_threshold
;
176 unsigned int idle_clocks
;
177 unsigned int gcnt_us
;
178 unsigned int vdd_apc_step_up_limit
;
179 unsigned int vdd_apc_step_down_limit
;
180 unsigned int clamp_timer_interval
;
182 struct cpr_fuses cpr_fuses
;
183 bool reduce_to_fuse_uV
;
184 bool reduce_to_corner_uV
;
188 unsigned int enable_reg
;
191 struct reg_sequence
*config
;
192 struct reg_sequence
*settings
;
193 int num_regs_per_fuse
;
196 struct cpr_acc_desc
{
197 const struct cpr_desc
*cpr_desc
;
198 const struct acc_desc
*acc_desc
;
207 const struct reg_sequence
*accs
;
209 unsigned long max_freq
;
222 struct fuse_corner
*fuse_corner
;
226 unsigned int num_corners
;
227 unsigned int ref_clk_khz
;
229 struct generic_pm_domain pd
;
231 struct device
*attached_cpu_dev
;
234 struct corner
*corner
;
235 struct regulator
*vdd_apc
;
242 struct fuse_corner
*fuse_corners
;
243 struct corner
*corners
;
245 const struct cpr_desc
*desc
;
246 const struct acc_desc
*acc_desc
;
247 const struct cpr_fuse
*cpr_fuses
;
249 struct dentry
*debugfs
;
252 static bool cpr_is_allowed(struct cpr_drv
*drv
)
254 return !drv
->loop_disabled
;
257 static void cpr_write(struct cpr_drv
*drv
, u32 offset
, u32 value
)
259 writel_relaxed(value
, drv
->base
+ offset
);
262 static u32
cpr_read(struct cpr_drv
*drv
, u32 offset
)
264 return readl_relaxed(drv
->base
+ offset
);
268 cpr_masked_write(struct cpr_drv
*drv
, u32 offset
, u32 mask
, u32 value
)
272 val
= readl_relaxed(drv
->base
+ offset
);
275 writel_relaxed(val
, drv
->base
+ offset
);
278 static void cpr_irq_clr(struct cpr_drv
*drv
)
280 cpr_write(drv
, REG_RBIF_IRQ_CLEAR
, CPR_INT_ALL
);
283 static void cpr_irq_clr_nack(struct cpr_drv
*drv
)
286 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
289 static void cpr_irq_clr_ack(struct cpr_drv
*drv
)
292 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
295 static void cpr_irq_set(struct cpr_drv
*drv
, u32 int_bits
)
297 cpr_write(drv
, REG_RBIF_IRQ_EN(0), int_bits
);
300 static void cpr_ctl_modify(struct cpr_drv
*drv
, u32 mask
, u32 value
)
302 cpr_masked_write(drv
, REG_RBCPR_CTL
, mask
, value
);
305 static void cpr_ctl_enable(struct cpr_drv
*drv
, struct corner
*corner
)
308 const struct cpr_desc
*desc
= drv
->desc
;
310 /* Program Consecutive Up & Down */
311 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
312 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
313 mask
= RBIF_TIMER_ADJ_CONS_UP_MASK
| RBIF_TIMER_ADJ_CONS_DOWN_MASK
;
314 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
, mask
, val
);
315 cpr_masked_write(drv
, REG_RBCPR_CTL
,
316 RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
317 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
,
319 cpr_irq_set(drv
, corner
->save_irq
);
321 if (cpr_is_allowed(drv
) && corner
->max_uV
> corner
->min_uV
)
322 val
= RBCPR_CTL_LOOP_EN
;
325 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, val
);
328 static void cpr_ctl_disable(struct cpr_drv
*drv
)
331 cpr_ctl_modify(drv
, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
332 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
, 0);
333 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
,
334 RBIF_TIMER_ADJ_CONS_UP_MASK
|
335 RBIF_TIMER_ADJ_CONS_DOWN_MASK
, 0);
337 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
338 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
339 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, 0);
342 static bool cpr_ctl_is_enabled(struct cpr_drv
*drv
)
346 reg_val
= cpr_read(drv
, REG_RBCPR_CTL
);
347 return reg_val
& RBCPR_CTL_LOOP_EN
;
350 static bool cpr_ctl_is_busy(struct cpr_drv
*drv
)
354 reg_val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
355 return reg_val
& RBCPR_RESULT0_BUSY_MASK
;
358 static void cpr_corner_save(struct cpr_drv
*drv
, struct corner
*corner
)
360 corner
->save_ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
361 corner
->save_irq
= cpr_read(drv
, REG_RBIF_IRQ_EN(0));
364 static void cpr_corner_restore(struct cpr_drv
*drv
, struct corner
*corner
)
366 u32 gcnt
, ctl
, irq
, ro_sel
, step_quot
;
367 struct fuse_corner
*fuse
= corner
->fuse_corner
;
368 const struct cpr_desc
*desc
= drv
->desc
;
371 ro_sel
= fuse
->ring_osc_idx
;
373 gcnt
|= fuse
->quot
- corner
->quot_adjust
;
375 /* Program the step quotient and idle clocks */
376 step_quot
= desc
->idle_clocks
<< RBCPR_STEP_QUOT_IDLE_CLK_SHIFT
;
377 step_quot
|= fuse
->step_quot
& RBCPR_STEP_QUOT_STEPQUOT_MASK
;
378 cpr_write(drv
, REG_RBCPR_STEP_QUOT
, step_quot
);
380 /* Clear the target quotient value and gate count of all ROs */
381 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
382 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
384 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
), gcnt
);
385 ctl
= corner
->save_ctl
;
386 cpr_write(drv
, REG_RBCPR_CTL
, ctl
);
387 irq
= corner
->save_irq
;
388 cpr_irq_set(drv
, irq
);
389 dev_dbg(drv
->dev
, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt
,
393 static void cpr_set_acc(struct regmap
*tcsr
, struct fuse_corner
*f
,
394 struct fuse_corner
*end
)
400 for (f
+= 1; f
<= end
; f
++)
401 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
403 for (f
-= 1; f
>= end
; f
--)
404 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
408 static int cpr_pre_voltage(struct cpr_drv
*drv
,
409 struct fuse_corner
*fuse_corner
,
410 enum voltage_change_dir dir
)
412 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
414 if (drv
->tcsr
&& dir
== DOWN
)
415 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
420 static int cpr_post_voltage(struct cpr_drv
*drv
,
421 struct fuse_corner
*fuse_corner
,
422 enum voltage_change_dir dir
)
424 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
426 if (drv
->tcsr
&& dir
== UP
)
427 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
432 static int cpr_scale_voltage(struct cpr_drv
*drv
, struct corner
*corner
,
433 int new_uV
, enum voltage_change_dir dir
)
436 struct fuse_corner
*fuse_corner
= corner
->fuse_corner
;
438 ret
= cpr_pre_voltage(drv
, fuse_corner
, dir
);
442 ret
= regulator_set_voltage(drv
->vdd_apc
, new_uV
, new_uV
);
444 dev_err_ratelimited(drv
->dev
, "failed to set apc voltage %d\n",
449 ret
= cpr_post_voltage(drv
, fuse_corner
, dir
);
456 static unsigned int cpr_get_cur_perf_state(struct cpr_drv
*drv
)
458 return drv
->corner
? drv
->corner
- drv
->corners
+ 1 : 0;
461 static int cpr_scale(struct cpr_drv
*drv
, enum voltage_change_dir dir
)
463 u32 val
, error_steps
, reg_mask
;
464 int last_uV
, new_uV
, step_uV
, ret
;
465 struct corner
*corner
;
466 const struct cpr_desc
*desc
= drv
->desc
;
468 if (dir
!= UP
&& dir
!= DOWN
)
471 step_uV
= regulator_get_linear_step(drv
->vdd_apc
);
475 corner
= drv
->corner
;
477 val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
479 error_steps
= val
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
;
480 error_steps
&= RBCPR_RESULT0_ERROR_STEPS_MASK
;
481 last_uV
= corner
->last_uV
;
484 if (desc
->clamp_timer_interval
&&
485 error_steps
< desc
->up_threshold
) {
487 * Handle the case where another measurement started
488 * after the interrupt was triggered due to a core
489 * exiting from power collapse.
491 error_steps
= max(desc
->up_threshold
,
492 desc
->vdd_apc_step_up_limit
);
495 if (last_uV
>= corner
->max_uV
) {
496 cpr_irq_clr_nack(drv
);
498 /* Maximize the UP threshold */
499 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
500 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
502 cpr_ctl_modify(drv
, reg_mask
, val
);
504 /* Disable UP interrupt */
505 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_UP
);
510 if (error_steps
> desc
->vdd_apc_step_up_limit
)
511 error_steps
= desc
->vdd_apc_step_up_limit
;
513 /* Calculate new voltage */
514 new_uV
= last_uV
+ error_steps
* step_uV
;
515 new_uV
= min(new_uV
, corner
->max_uV
);
518 "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
519 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
521 if (desc
->clamp_timer_interval
&&
522 error_steps
< desc
->down_threshold
) {
524 * Handle the case where another measurement started
525 * after the interrupt was triggered due to a core
526 * exiting from power collapse.
528 error_steps
= max(desc
->down_threshold
,
529 desc
->vdd_apc_step_down_limit
);
532 if (last_uV
<= corner
->min_uV
) {
533 cpr_irq_clr_nack(drv
);
535 /* Enable auto nack down */
536 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
537 val
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
539 cpr_ctl_modify(drv
, reg_mask
, val
);
541 /* Disable DOWN interrupt */
542 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_DOWN
);
547 if (error_steps
> desc
->vdd_apc_step_down_limit
)
548 error_steps
= desc
->vdd_apc_step_down_limit
;
550 /* Calculate new voltage */
551 new_uV
= last_uV
- error_steps
* step_uV
;
552 new_uV
= max(new_uV
, corner
->min_uV
);
555 "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
556 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
559 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
561 cpr_irq_clr_nack(drv
);
564 drv
->corner
->last_uV
= new_uV
;
567 /* Disable auto nack down */
568 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
571 /* Restore default threshold for UP */
572 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
573 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
574 val
= desc
->up_threshold
;
575 val
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
578 cpr_ctl_modify(drv
, reg_mask
, val
);
580 /* Re-enable default interrupts */
581 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
584 cpr_irq_clr_ack(drv
);
589 static irqreturn_t
cpr_irq_handler(int irq
, void *dev
)
591 struct cpr_drv
*drv
= dev
;
592 const struct cpr_desc
*desc
= drv
->desc
;
593 irqreturn_t ret
= IRQ_HANDLED
;
596 mutex_lock(&drv
->lock
);
598 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
599 if (drv
->flags
& FLAGS_IGNORE_1ST_IRQ_STATUS
)
600 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
602 dev_dbg(drv
->dev
, "IRQ_STATUS = %#02x\n", val
);
604 if (!cpr_ctl_is_enabled(drv
)) {
605 dev_dbg(drv
->dev
, "CPR is disabled\n");
607 } else if (cpr_ctl_is_busy(drv
) && !desc
->clamp_timer_interval
) {
608 dev_dbg(drv
->dev
, "CPR measurement is not ready\n");
609 } else if (!cpr_is_allowed(drv
)) {
610 val
= cpr_read(drv
, REG_RBCPR_CTL
);
611 dev_err_ratelimited(drv
->dev
,
612 "Interrupt broken? RBCPR_CTL = %#02x\n",
617 * Following sequence of handling is as per each IRQ's
620 if (val
& CPR_INT_UP
) {
622 } else if (val
& CPR_INT_DOWN
) {
623 cpr_scale(drv
, DOWN
);
624 } else if (val
& CPR_INT_MIN
) {
625 cpr_irq_clr_nack(drv
);
626 } else if (val
& CPR_INT_MAX
) {
627 cpr_irq_clr_nack(drv
);
628 } else if (val
& CPR_INT_MID
) {
629 /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
630 dev_dbg(drv
->dev
, "IRQ occurred for Mid Flag\n");
633 "IRQ occurred for unknown flag (%#08x)\n", val
);
636 /* Save register values for the corner */
637 cpr_corner_save(drv
, drv
->corner
);
640 mutex_unlock(&drv
->lock
);
645 static int cpr_enable(struct cpr_drv
*drv
)
649 ret
= regulator_enable(drv
->vdd_apc
);
653 mutex_lock(&drv
->lock
);
655 if (cpr_is_allowed(drv
) && drv
->corner
) {
657 cpr_corner_restore(drv
, drv
->corner
);
658 cpr_ctl_enable(drv
, drv
->corner
);
661 mutex_unlock(&drv
->lock
);
666 static int cpr_disable(struct cpr_drv
*drv
)
668 mutex_lock(&drv
->lock
);
670 if (cpr_is_allowed(drv
)) {
671 cpr_ctl_disable(drv
);
675 mutex_unlock(&drv
->lock
);
677 return regulator_disable(drv
->vdd_apc
);
680 static int cpr_config(struct cpr_drv
*drv
)
684 struct corner
*corner
;
685 const struct cpr_desc
*desc
= drv
->desc
;
687 /* Disable interrupt and CPR */
688 cpr_write(drv
, REG_RBIF_IRQ_EN(0), 0);
689 cpr_write(drv
, REG_RBCPR_CTL
, 0);
691 /* Program the default HW ceiling, floor and vlevel */
692 val
= (RBIF_LIMIT_CEILING_DEFAULT
& RBIF_LIMIT_CEILING_MASK
)
693 << RBIF_LIMIT_CEILING_SHIFT
;
694 val
|= RBIF_LIMIT_FLOOR_DEFAULT
& RBIF_LIMIT_FLOOR_MASK
;
695 cpr_write(drv
, REG_RBIF_LIMIT
, val
);
696 cpr_write(drv
, REG_RBIF_SW_VLEVEL
, RBIF_SW_VLEVEL_DEFAULT
);
699 * Clear the target quotient value and gate count of all
702 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
703 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
705 /* Init and save gcnt */
706 gcnt
= (drv
->ref_clk_khz
* desc
->gcnt_us
) / 1000;
707 gcnt
= gcnt
& RBCPR_GCNT_TARGET_GCNT_MASK
;
708 gcnt
<<= RBCPR_GCNT_TARGET_GCNT_SHIFT
;
711 /* Program the delay count for the timer */
712 val
= (drv
->ref_clk_khz
* desc
->timer_delay_us
) / 1000;
713 cpr_write(drv
, REG_RBCPR_TIMER_INTERVAL
, val
);
714 dev_dbg(drv
->dev
, "Timer count: %#0x (for %d us)\n", val
,
715 desc
->timer_delay_us
);
717 /* Program Consecutive Up & Down */
718 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
719 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
720 val
|= desc
->clamp_timer_interval
<< RBIF_TIMER_ADJ_CLAMP_INT_SHIFT
;
721 cpr_write(drv
, REG_RBIF_TIMER_ADJUST
, val
);
723 /* Program the control register */
724 val
= desc
->up_threshold
<< RBCPR_CTL_UP_THRESHOLD_SHIFT
;
725 val
|= desc
->down_threshold
<< RBCPR_CTL_DN_THRESHOLD_SHIFT
;
726 val
|= RBCPR_CTL_TIMER_EN
| RBCPR_CTL_COUNT_MODE
;
727 val
|= RBCPR_CTL_SW_AUTO_CONT_ACK_EN
;
728 cpr_write(drv
, REG_RBCPR_CTL
, val
);
730 for (i
= 0; i
< drv
->num_corners
; i
++) {
731 corner
= &drv
->corners
[i
];
732 corner
->save_ctl
= val
;
733 corner
->save_irq
= CPR_INT_DEFAULT
;
736 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
738 val
= cpr_read(drv
, REG_RBCPR_VERSION
);
739 if (val
<= RBCPR_VER_2
)
740 drv
->flags
|= FLAGS_IGNORE_1ST_IRQ_STATUS
;
745 static int cpr_set_performance_state(struct generic_pm_domain
*domain
,
748 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
749 struct corner
*corner
, *end
;
750 enum voltage_change_dir dir
;
753 mutex_lock(&drv
->lock
);
755 dev_dbg(drv
->dev
, "%s: setting perf state: %u (prev state: %u)\n",
756 __func__
, state
, cpr_get_cur_perf_state(drv
));
759 * Determine new corner we're going to.
760 * Remove one since lowest performance state is 1.
762 corner
= drv
->corners
+ state
- 1;
763 end
= &drv
->corners
[drv
->num_corners
- 1];
764 if (corner
> end
|| corner
< drv
->corners
) {
769 /* Determine direction */
770 if (drv
->corner
> corner
)
772 else if (drv
->corner
< corner
)
777 if (cpr_is_allowed(drv
))
778 new_uV
= corner
->last_uV
;
782 if (cpr_is_allowed(drv
))
783 cpr_ctl_disable(drv
);
785 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
789 if (cpr_is_allowed(drv
)) {
791 if (drv
->corner
!= corner
)
792 cpr_corner_restore(drv
, corner
);
793 cpr_ctl_enable(drv
, corner
);
796 drv
->corner
= corner
;
799 mutex_unlock(&drv
->lock
);
804 static int cpr_read_efuse(struct device
*dev
, const char *cname
, u32
*data
)
806 struct nvmem_cell
*cell
;
813 cell
= nvmem_cell_get(dev
, cname
);
815 if (PTR_ERR(cell
) != -EPROBE_DEFER
)
816 dev_err(dev
, "undefined cell %s\n", cname
);
817 return PTR_ERR(cell
);
820 ret
= nvmem_cell_read(cell
, &len
);
821 nvmem_cell_put(cell
);
823 dev_err(dev
, "can't read cell %s\n", cname
);
827 for (i
= 0; i
< len
; i
++)
828 *data
|= ret
[i
] << (8 * i
);
831 dev_dbg(dev
, "efuse read(%s) = %x, bytes %zd\n", cname
, *data
, len
);
837 cpr_populate_ring_osc_idx(struct cpr_drv
*drv
)
839 struct fuse_corner
*fuse
= drv
->fuse_corners
;
840 struct fuse_corner
*end
= fuse
+ drv
->desc
->num_fuse_corners
;
841 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
845 for (; fuse
< end
; fuse
++, fuses
++) {
846 ret
= cpr_read_efuse(drv
->dev
, fuses
->ring_osc
,
850 fuse
->ring_osc_idx
= data
;
856 static int cpr_read_fuse_uV(const struct cpr_desc
*desc
,
857 const struct fuse_corner_data
*fdata
,
858 const char *init_v_efuse
,
862 int step_size_uV
, steps
, uV
;
866 ret
= cpr_read_efuse(drv
->dev
, init_v_efuse
, &bits
);
870 steps
= bits
& ~BIT(desc
->cpr_fuses
.init_voltage_width
- 1);
871 /* Not two's complement.. instead highest bit is sign bit */
872 if (bits
& BIT(desc
->cpr_fuses
.init_voltage_width
- 1))
875 step_size_uV
= desc
->cpr_fuses
.init_voltage_step
;
877 uV
= fdata
->ref_uV
+ steps
* step_size_uV
;
878 return DIV_ROUND_UP(uV
, step_volt
) * step_volt
;
881 static int cpr_fuse_corner_init(struct cpr_drv
*drv
)
883 const struct cpr_desc
*desc
= drv
->desc
;
884 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
885 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
887 unsigned int step_volt
;
888 struct fuse_corner_data
*fdata
;
889 struct fuse_corner
*fuse
, *end
;
891 const struct reg_sequence
*accs
;
894 accs
= acc_desc
->settings
;
896 step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
900 /* Populate fuse_corner members */
901 fuse
= drv
->fuse_corners
;
902 end
= &fuse
[desc
->num_fuse_corners
- 1];
903 fdata
= desc
->cpr_fuses
.fuse_corner_data
;
905 for (i
= 0; fuse
<= end
; fuse
++, fuses
++, i
++, fdata
++) {
907 * Update SoC voltages: platforms might choose a different
908 * regulators than the one used to characterize the algorithms
909 * (ie, init_voltage_step).
911 fdata
->min_uV
= roundup(fdata
->min_uV
, step_volt
);
912 fdata
->max_uV
= roundup(fdata
->max_uV
, step_volt
);
915 uV
= cpr_read_fuse_uV(desc
, fdata
, fuses
->init_voltage
,
920 fuse
->min_uV
= fdata
->min_uV
;
921 fuse
->max_uV
= fdata
->max_uV
;
922 fuse
->uV
= clamp(uV
, fuse
->min_uV
, fuse
->max_uV
);
926 * Allow the highest fuse corner's PVS voltage to
927 * define the ceiling voltage for that corner in order
928 * to support SoC's in which variable ceiling values
931 end
->max_uV
= max(end
->max_uV
, end
->uV
);
934 /* Populate target quotient by scaling */
935 ret
= cpr_read_efuse(drv
->dev
, fuses
->quotient
, &fuse
->quot
);
939 fuse
->quot
*= fdata
->quot_scale
;
940 fuse
->quot
+= fdata
->quot_offset
;
941 fuse
->quot
+= fdata
->quot_adjust
;
942 fuse
->step_quot
= desc
->step_quot
[fuse
->ring_osc_idx
];
944 /* Populate acc settings */
946 fuse
->num_accs
= acc_desc
->num_regs_per_fuse
;
947 accs
+= acc_desc
->num_regs_per_fuse
;
951 * Restrict all fuse corner PVS voltages based upon per corner
952 * ceiling and floor voltages.
954 for (fuse
= drv
->fuse_corners
, i
= 0; fuse
<= end
; fuse
++, i
++) {
955 if (fuse
->uV
> fuse
->max_uV
)
956 fuse
->uV
= fuse
->max_uV
;
957 else if (fuse
->uV
< fuse
->min_uV
)
958 fuse
->uV
= fuse
->min_uV
;
960 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
965 "min uV: %d (fuse corner: %d) not supported by regulator\n",
970 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
975 "max uV: %d (fuse corner: %d) not supported by regulator\n",
981 "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
982 i
, fuse
->min_uV
, fuse
->uV
, fuse
->max_uV
,
983 fuse
->ring_osc_idx
, fuse
->quot
, fuse
->step_quot
);
989 static int cpr_calculate_scaling(const char *quot_offset
,
991 const struct fuse_corner_data
*fdata
,
992 const struct corner
*corner
)
995 unsigned long freq_diff
;
997 const struct fuse_corner
*fuse
, *prev_fuse
;
1000 fuse
= corner
->fuse_corner
;
1001 prev_fuse
= fuse
- 1;
1004 ret
= cpr_read_efuse(drv
->dev
, quot_offset
, "_diff
);
1008 quot_diff
*= fdata
->quot_offset_scale
;
1009 quot_diff
+= fdata
->quot_offset_adjust
;
1011 quot_diff
= fuse
->quot
- prev_fuse
->quot
;
1014 freq_diff
= fuse
->max_freq
- prev_fuse
->max_freq
;
1015 freq_diff
/= 1000000; /* Convert to MHz */
1016 scaling
= 1000 * quot_diff
/ freq_diff
;
1017 return min(scaling
, fdata
->max_quot_scale
);
1020 static int cpr_interpolate(const struct corner
*corner
, int step_volt
,
1021 const struct fuse_corner_data
*fdata
)
1023 unsigned long f_high
, f_low
, f_diff
;
1024 int uV_high
, uV_low
, uV
;
1025 u64 temp
, temp_limit
;
1026 const struct fuse_corner
*fuse
, *prev_fuse
;
1028 fuse
= corner
->fuse_corner
;
1029 prev_fuse
= fuse
- 1;
1031 f_high
= fuse
->max_freq
;
1032 f_low
= prev_fuse
->max_freq
;
1034 uV_low
= prev_fuse
->uV
;
1035 f_diff
= fuse
->max_freq
- corner
->freq
;
1038 * Don't interpolate in the wrong direction. This could happen
1039 * if the adjusted fuse voltage overlaps with the previous fuse's
1042 if (f_high
<= f_low
|| uV_high
<= uV_low
|| f_high
<= corner
->freq
)
1045 temp
= f_diff
* (uV_high
- uV_low
);
1046 do_div(temp
, f_high
- f_low
);
1049 * max_volt_scale has units of uV/MHz while freq values
1050 * have units of Hz. Divide by 1000000 to convert to.
1052 temp_limit
= f_diff
* fdata
->max_volt_scale
;
1053 do_div(temp_limit
, 1000000);
1055 uV
= uV_high
- min(temp
, temp_limit
);
1056 return roundup(uV
, step_volt
);
1059 static unsigned int cpr_get_fuse_corner(struct dev_pm_opp
*opp
)
1061 struct device_node
*np
;
1062 unsigned int fuse_corner
= 0;
1064 np
= dev_pm_opp_get_of_node(opp
);
1065 if (of_property_read_u32(np
, "qcom,opp-fuse-level", &fuse_corner
))
1066 pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
1074 static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp
*ref
,
1075 struct device
*cpu_dev
)
1078 struct device_node
*ref_np
;
1079 struct device_node
*desc_np
;
1080 struct device_node
*child_np
= NULL
;
1081 struct device_node
*child_req_np
= NULL
;
1083 desc_np
= dev_pm_opp_of_get_opp_desc_node(cpu_dev
);
1087 ref_np
= dev_pm_opp_get_of_node(ref
);
1092 of_node_put(child_req_np
);
1093 child_np
= of_get_next_available_child(desc_np
, child_np
);
1094 child_req_np
= of_parse_phandle(child_np
, "required-opps", 0);
1095 } while (child_np
&& child_req_np
!= ref_np
);
1097 if (child_np
&& child_req_np
== ref_np
)
1098 of_property_read_u64(child_np
, "opp-hz", &rate
);
1100 of_node_put(child_req_np
);
1101 of_node_put(child_np
);
1102 of_node_put(ref_np
);
1104 of_node_put(desc_np
);
1106 return (unsigned long) rate
;
1109 static int cpr_corner_init(struct cpr_drv
*drv
)
1111 const struct cpr_desc
*desc
= drv
->desc
;
1112 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
1113 int i
, level
, scaling
= 0;
1114 unsigned int fnum
, fc
;
1115 const char *quot_offset
;
1116 struct fuse_corner
*fuse
, *prev_fuse
;
1117 struct corner
*corner
, *end
;
1118 struct corner_data
*cdata
;
1119 const struct fuse_corner_data
*fdata
;
1121 unsigned long freq_diff
, freq_diff_mhz
;
1123 int step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
1124 struct dev_pm_opp
*opp
;
1129 corner
= drv
->corners
;
1130 end
= &corner
[drv
->num_corners
- 1];
1132 cdata
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1133 sizeof(struct corner_data
),
1139 * Store maximum frequency for each fuse corner based on the frequency
1142 for (level
= 1; level
<= drv
->num_corners
; level
++) {
1143 opp
= dev_pm_opp_find_level_exact(&drv
->pd
.dev
, level
);
1146 fc
= cpr_get_fuse_corner(opp
);
1148 dev_pm_opp_put(opp
);
1152 freq
= cpr_get_opp_hz_for_req(opp
, drv
->attached_cpu_dev
);
1154 dev_pm_opp_put(opp
);
1157 cdata
[level
- 1].fuse_corner
= fnum
;
1158 cdata
[level
- 1].freq
= freq
;
1160 fuse
= &drv
->fuse_corners
[fnum
];
1161 dev_dbg(drv
->dev
, "freq: %lu level: %u fuse level: %u\n",
1162 freq
, dev_pm_opp_get_level(opp
) - 1, fnum
);
1163 if (freq
> fuse
->max_freq
)
1164 fuse
->max_freq
= freq
;
1165 dev_pm_opp_put(opp
);
1169 * Get the quotient adjustment scaling factor, according to:
1171 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
1172 * / (freq(corner_N) - freq(corner_N-1)), max_factor)
1174 * QUOT(corner_N): quotient read from fuse for fuse corner N
1175 * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
1176 * freq(corner_N): max frequency in MHz supported by fuse corner N
1177 * freq(corner_N-1): max frequency in MHz supported by fuse corner
1180 * Then walk through the corners mapped to each fuse corner
1181 * and calculate the quotient adjustment for each one using the
1182 * following formula:
1184 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
1186 * freq_max: max frequency in MHz supported by the fuse corner
1187 * freq_corner: frequency in MHz corresponding to the corner
1188 * scaling: calculated from above equation
1199 * +--------------- +----------------
1200 * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
1207 for (apply_scaling
= false, i
= 0; corner
<= end
; corner
++, i
++) {
1208 fnum
= cdata
[i
].fuse_corner
;
1209 fdata
= &desc
->cpr_fuses
.fuse_corner_data
[fnum
];
1210 quot_offset
= fuses
[fnum
].quotient_offset
;
1211 fuse
= &drv
->fuse_corners
[fnum
];
1213 prev_fuse
= &drv
->fuse_corners
[fnum
- 1];
1217 corner
->fuse_corner
= fuse
;
1218 corner
->freq
= cdata
[i
].freq
;
1219 corner
->uV
= fuse
->uV
;
1221 if (prev_fuse
&& cdata
[i
- 1].freq
== prev_fuse
->max_freq
) {
1222 scaling
= cpr_calculate_scaling(quot_offset
, drv
,
1227 apply_scaling
= true;
1228 } else if (corner
->freq
== fuse
->max_freq
) {
1229 /* This is a fuse corner; don't scale anything */
1230 apply_scaling
= false;
1233 if (apply_scaling
) {
1234 freq_diff
= fuse
->max_freq
- corner
->freq
;
1235 freq_diff_mhz
= freq_diff
/ 1000000;
1236 corner
->quot_adjust
= scaling
* freq_diff_mhz
/ 1000;
1238 corner
->uV
= cpr_interpolate(corner
, step_volt
, fdata
);
1241 corner
->max_uV
= fuse
->max_uV
;
1242 corner
->min_uV
= fuse
->min_uV
;
1243 corner
->uV
= clamp(corner
->uV
, corner
->min_uV
, corner
->max_uV
);
1244 corner
->last_uV
= corner
->uV
;
1246 /* Reduce the ceiling voltage if needed */
1247 if (desc
->reduce_to_corner_uV
&& corner
->uV
< corner
->max_uV
)
1248 corner
->max_uV
= corner
->uV
;
1249 else if (desc
->reduce_to_fuse_uV
&& fuse
->uV
< corner
->max_uV
)
1250 corner
->max_uV
= max(corner
->min_uV
, fuse
->uV
);
1252 dev_dbg(drv
->dev
, "corner %d: [%d %d %d] quot %d\n", i
,
1253 corner
->min_uV
, corner
->uV
, corner
->max_uV
,
1254 fuse
->quot
- corner
->quot_adjust
);
1260 static const struct cpr_fuse
*cpr_get_fuses(struct cpr_drv
*drv
)
1262 const struct cpr_desc
*desc
= drv
->desc
;
1263 struct cpr_fuse
*fuses
;
1266 fuses
= devm_kcalloc(drv
->dev
, desc
->num_fuse_corners
,
1267 sizeof(struct cpr_fuse
),
1270 return ERR_PTR(-ENOMEM
);
1272 for (i
= 0; i
< desc
->num_fuse_corners
; i
++) {
1275 snprintf(tbuf
, 32, "cpr_ring_osc%d", i
+ 1);
1276 fuses
[i
].ring_osc
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1277 if (!fuses
[i
].ring_osc
)
1278 return ERR_PTR(-ENOMEM
);
1280 snprintf(tbuf
, 32, "cpr_init_voltage%d", i
+ 1);
1281 fuses
[i
].init_voltage
= devm_kstrdup(drv
->dev
, tbuf
,
1283 if (!fuses
[i
].init_voltage
)
1284 return ERR_PTR(-ENOMEM
);
1286 snprintf(tbuf
, 32, "cpr_quotient%d", i
+ 1);
1287 fuses
[i
].quotient
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1288 if (!fuses
[i
].quotient
)
1289 return ERR_PTR(-ENOMEM
);
1291 snprintf(tbuf
, 32, "cpr_quotient_offset%d", i
+ 1);
1292 fuses
[i
].quotient_offset
= devm_kstrdup(drv
->dev
, tbuf
,
1294 if (!fuses
[i
].quotient_offset
)
1295 return ERR_PTR(-ENOMEM
);
1301 static void cpr_set_loop_allowed(struct cpr_drv
*drv
)
1303 drv
->loop_disabled
= false;
1306 static int cpr_init_parameters(struct cpr_drv
*drv
)
1308 const struct cpr_desc
*desc
= drv
->desc
;
1311 clk
= clk_get(drv
->dev
, "ref");
1313 return PTR_ERR(clk
);
1315 drv
->ref_clk_khz
= clk_get_rate(clk
) / 1000;
1318 if (desc
->timer_cons_up
> RBIF_TIMER_ADJ_CONS_UP_MASK
||
1319 desc
->timer_cons_down
> RBIF_TIMER_ADJ_CONS_DOWN_MASK
||
1320 desc
->up_threshold
> RBCPR_CTL_UP_THRESHOLD_MASK
||
1321 desc
->down_threshold
> RBCPR_CTL_DN_THRESHOLD_MASK
||
1322 desc
->idle_clocks
> RBCPR_STEP_QUOT_IDLE_CLK_MASK
||
1323 desc
->clamp_timer_interval
> RBIF_TIMER_ADJ_CLAMP_INT_MASK
)
1326 dev_dbg(drv
->dev
, "up threshold = %u, down threshold = %u\n",
1327 desc
->up_threshold
, desc
->down_threshold
);
1332 static int cpr_find_initial_corner(struct cpr_drv
*drv
)
1335 const struct corner
*end
;
1336 struct corner
*iter
;
1339 if (!drv
->cpu_clk
) {
1340 dev_err(drv
->dev
, "cannot get rate from NULL clk\n");
1344 end
= &drv
->corners
[drv
->num_corners
- 1];
1345 rate
= clk_get_rate(drv
->cpu_clk
);
1348 * Some bootloaders set a CPU clock frequency that is not defined
1349 * in the OPP table. When running at an unlisted frequency,
1350 * cpufreq_online() will change to the OPP which has the lowest
1351 * frequency, at or above the unlisted frequency.
1352 * Since cpufreq_online() always "rounds up" in the case of an
1353 * unlisted frequency, this function always "rounds down" in case
1354 * of an unlisted frequency. That way, when cpufreq_online()
1355 * triggers the first ever call to cpr_set_performance_state(),
1356 * it will correctly determine the direction as UP.
1358 for (iter
= drv
->corners
; iter
<= end
; iter
++) {
1359 if (iter
->freq
> rate
)
1362 if (iter
->freq
== rate
) {
1366 if (iter
->freq
< rate
)
1371 dev_err(drv
->dev
, "boot up corner not found\n");
1375 dev_dbg(drv
->dev
, "boot up perf state: %u\n", i
);
1380 static const struct cpr_desc qcs404_cpr_desc
= {
1381 .num_fuse_corners
= 3,
1382 .min_diff_quot
= CPR_FUSE_MIN_QUOT_DIFF
,
1383 .step_quot
= (int []){ 25, 25, 25, },
1384 .timer_delay_us
= 5000,
1386 .timer_cons_down
= 2,
1388 .down_threshold
= 3,
1391 .vdd_apc_step_up_limit
= 1,
1392 .vdd_apc_step_down_limit
= 1,
1394 .init_voltage_step
= 8000,
1395 .init_voltage_width
= 6,
1396 .fuse_corner_data
= (struct fuse_corner_data
[]){
1402 .max_volt_scale
= 0,
1403 .max_quot_scale
= 0,
1407 .quot_offset_scale
= 5,
1408 .quot_offset_adjust
= 0,
1415 .max_volt_scale
= 2000,
1416 .max_quot_scale
= 1400,
1420 .quot_offset_scale
= 5,
1421 .quot_offset_adjust
= 0,
1428 .max_volt_scale
= 2000,
1429 .max_quot_scale
= 1400,
1433 .quot_offset_scale
= 5,
1434 .quot_offset_adjust
= 0,
1440 static const struct acc_desc qcs404_acc_desc
= {
1441 .settings
= (struct reg_sequence
[]){
1442 { 0xb120, 0x1041040 },
1449 .config
= (struct reg_sequence
[]){
1453 .num_regs_per_fuse
= 2,
1456 static const struct cpr_acc_desc qcs404_cpr_acc_desc
= {
1457 .cpr_desc
= &qcs404_cpr_desc
,
1458 .acc_desc
= &qcs404_acc_desc
,
1461 static unsigned int cpr_get_performance_state(struct generic_pm_domain
*genpd
,
1462 struct dev_pm_opp
*opp
)
1464 return dev_pm_opp_get_level(opp
);
1467 static int cpr_power_off(struct generic_pm_domain
*domain
)
1469 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1471 return cpr_disable(drv
);
1474 static int cpr_power_on(struct generic_pm_domain
*domain
)
1476 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1478 return cpr_enable(drv
);
1481 static int cpr_pd_attach_dev(struct generic_pm_domain
*domain
,
1484 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1485 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
1488 mutex_lock(&drv
->lock
);
1490 dev_dbg(drv
->dev
, "attach callback for: %s\n", dev_name(dev
));
1493 * This driver only supports scaling voltage for a CPU cluster
1494 * where all CPUs in the cluster share a single regulator.
1495 * Therefore, save the struct device pointer only for the first
1496 * CPU device that gets attached. There is no need to do any
1497 * additional initialization when further CPUs get attached.
1499 if (drv
->attached_cpu_dev
)
1503 * cpr_scale_voltage() requires the direction (if we are changing
1504 * to a higher or lower OPP). The first time
1505 * cpr_set_performance_state() is called, there is no previous
1506 * performance state defined. Therefore, we call
1507 * cpr_find_initial_corner() that gets the CPU clock frequency
1508 * set by the bootloader, so that we can determine the direction
1509 * the first time cpr_set_performance_state() is called.
1511 drv
->cpu_clk
= devm_clk_get(dev
, NULL
);
1512 if (IS_ERR(drv
->cpu_clk
)) {
1513 ret
= PTR_ERR(drv
->cpu_clk
);
1514 if (ret
!= -EPROBE_DEFER
)
1515 dev_err(drv
->dev
, "could not get cpu clk: %d\n", ret
);
1518 drv
->attached_cpu_dev
= dev
;
1520 dev_dbg(drv
->dev
, "using cpu clk from: %s\n",
1521 dev_name(drv
->attached_cpu_dev
));
1524 * Everything related to (virtual) corners has to be initialized
1525 * here, when attaching to the power domain, since we need to know
1526 * the maximum frequency for each fuse corner, and this is only
1527 * available after the cpufreq driver has attached to us.
1528 * The reason for this is that we need to know the highest
1529 * frequency associated with each fuse corner.
1531 ret
= dev_pm_opp_get_opp_count(&drv
->pd
.dev
);
1533 dev_err(drv
->dev
, "could not get OPP count\n");
1536 drv
->num_corners
= ret
;
1538 if (drv
->num_corners
< 2) {
1539 dev_err(drv
->dev
, "need at least 2 OPPs to use CPR\n");
1544 drv
->corners
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1545 sizeof(*drv
->corners
),
1547 if (!drv
->corners
) {
1552 ret
= cpr_corner_init(drv
);
1556 cpr_set_loop_allowed(drv
);
1558 ret
= cpr_init_parameters(drv
);
1562 /* Configure CPR HW but keep it disabled */
1563 ret
= cpr_config(drv
);
1567 ret
= cpr_find_initial_corner(drv
);
1571 if (acc_desc
->config
)
1572 regmap_multi_reg_write(drv
->tcsr
, acc_desc
->config
,
1573 acc_desc
->num_regs_per_fuse
);
1575 /* Enable ACC if required */
1576 if (acc_desc
->enable_mask
)
1577 regmap_update_bits(drv
->tcsr
, acc_desc
->enable_reg
,
1578 acc_desc
->enable_mask
,
1579 acc_desc
->enable_mask
);
1581 dev_info(drv
->dev
, "driver initialized with %u OPPs\n",
1585 mutex_unlock(&drv
->lock
);
1590 static int cpr_debug_info_show(struct seq_file
*s
, void *unused
)
1592 u32 gcnt
, ro_sel
, ctl
, irq_status
, reg
, error_steps
;
1593 u32 step_dn
, step_up
, error
, error_lt0
, busy
;
1594 struct cpr_drv
*drv
= s
->private;
1595 struct fuse_corner
*fuse_corner
;
1596 struct corner
*corner
;
1598 corner
= drv
->corner
;
1599 fuse_corner
= corner
->fuse_corner
;
1601 seq_printf(s
, "corner, current_volt = %d uV\n",
1604 ro_sel
= fuse_corner
->ring_osc_idx
;
1605 gcnt
= cpr_read(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
));
1606 seq_printf(s
, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel
, gcnt
);
1608 ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
1609 seq_printf(s
, "rbcpr_ctl = %#02X\n", ctl
);
1611 irq_status
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
1612 seq_printf(s
, "rbcpr_irq_status = %#02X\n", irq_status
);
1614 reg
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
1615 seq_printf(s
, "rbcpr_result_0 = %#02X\n", reg
);
1617 step_dn
= reg
& 0x01;
1618 step_up
= (reg
>> RBCPR_RESULT0_STEP_UP_SHIFT
) & 0x01;
1619 seq_printf(s
, " [step_dn = %u", step_dn
);
1621 seq_printf(s
, ", step_up = %u", step_up
);
1623 error_steps
= (reg
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
)
1624 & RBCPR_RESULT0_ERROR_STEPS_MASK
;
1625 seq_printf(s
, ", error_steps = %u", error_steps
);
1627 error
= (reg
>> RBCPR_RESULT0_ERROR_SHIFT
) & RBCPR_RESULT0_ERROR_MASK
;
1628 seq_printf(s
, ", error = %u", error
);
1630 error_lt0
= (reg
>> RBCPR_RESULT0_ERROR_LT0_SHIFT
) & 0x01;
1631 seq_printf(s
, ", error_lt_0 = %u", error_lt0
);
1633 busy
= (reg
>> RBCPR_RESULT0_BUSY_SHIFT
) & 0x01;
1634 seq_printf(s
, ", busy = %u]\n", busy
);
1638 DEFINE_SHOW_ATTRIBUTE(cpr_debug_info
);
1640 static void cpr_debugfs_init(struct cpr_drv
*drv
)
1642 drv
->debugfs
= debugfs_create_dir("qcom_cpr", NULL
);
1644 debugfs_create_file("debug_info", 0444, drv
->debugfs
,
1645 drv
, &cpr_debug_info_fops
);
1648 static int cpr_probe(struct platform_device
*pdev
)
1650 struct resource
*res
;
1651 struct device
*dev
= &pdev
->dev
;
1652 struct cpr_drv
*drv
;
1654 const struct cpr_acc_desc
*data
;
1655 struct device_node
*np
;
1656 u32 cpr_rev
= FUSE_REVISION_UNKNOWN
;
1658 data
= of_device_get_match_data(dev
);
1659 if (!data
|| !data
->cpr_desc
|| !data
->acc_desc
)
1662 drv
= devm_kzalloc(dev
, sizeof(*drv
), GFP_KERNEL
);
1666 drv
->desc
= data
->cpr_desc
;
1667 drv
->acc_desc
= data
->acc_desc
;
1669 drv
->fuse_corners
= devm_kcalloc(dev
, drv
->desc
->num_fuse_corners
,
1670 sizeof(*drv
->fuse_corners
),
1672 if (!drv
->fuse_corners
)
1675 np
= of_parse_phandle(dev
->of_node
, "acc-syscon", 0);
1679 drv
->tcsr
= syscon_node_to_regmap(np
);
1681 if (IS_ERR(drv
->tcsr
))
1682 return PTR_ERR(drv
->tcsr
);
1684 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1685 drv
->base
= devm_ioremap_resource(dev
, res
);
1686 if (IS_ERR(drv
->base
))
1687 return PTR_ERR(drv
->base
);
1689 irq
= platform_get_irq(pdev
, 0);
1693 drv
->vdd_apc
= devm_regulator_get(dev
, "vdd-apc");
1694 if (IS_ERR(drv
->vdd_apc
))
1695 return PTR_ERR(drv
->vdd_apc
);
1698 * Initialize fuse corners, since it simply depends
1699 * on data in efuses.
1700 * Everything related to (virtual) corners has to be
1701 * initialized after attaching to the power domain,
1702 * since it depends on the CPU's OPP table.
1704 ret
= cpr_read_efuse(dev
, "cpr_fuse_revision", &cpr_rev
);
1708 drv
->cpr_fuses
= cpr_get_fuses(drv
);
1709 if (IS_ERR(drv
->cpr_fuses
))
1710 return PTR_ERR(drv
->cpr_fuses
);
1712 ret
= cpr_populate_ring_osc_idx(drv
);
1716 ret
= cpr_fuse_corner_init(drv
);
1720 mutex_init(&drv
->lock
);
1722 ret
= devm_request_threaded_irq(dev
, irq
, NULL
,
1724 IRQF_ONESHOT
| IRQF_TRIGGER_RISING
,
1729 drv
->pd
.name
= devm_kstrdup_const(dev
, dev
->of_node
->full_name
,
1734 drv
->pd
.power_off
= cpr_power_off
;
1735 drv
->pd
.power_on
= cpr_power_on
;
1736 drv
->pd
.set_performance_state
= cpr_set_performance_state
;
1737 drv
->pd
.opp_to_performance_state
= cpr_get_performance_state
;
1738 drv
->pd
.attach_dev
= cpr_pd_attach_dev
;
1740 ret
= pm_genpd_init(&drv
->pd
, NULL
, true);
1744 ret
= of_genpd_add_provider_simple(dev
->of_node
, &drv
->pd
);
1748 platform_set_drvdata(pdev
, drv
);
1749 cpr_debugfs_init(drv
);
1754 static int cpr_remove(struct platform_device
*pdev
)
1756 struct cpr_drv
*drv
= platform_get_drvdata(pdev
);
1758 if (cpr_is_allowed(drv
)) {
1759 cpr_ctl_disable(drv
);
1760 cpr_irq_set(drv
, 0);
1763 of_genpd_del_provider(pdev
->dev
.of_node
);
1764 pm_genpd_remove(&drv
->pd
);
1766 debugfs_remove_recursive(drv
->debugfs
);
1771 static const struct of_device_id cpr_match_table
[] = {
1772 { .compatible
= "qcom,qcs404-cpr", .data
= &qcs404_cpr_acc_desc
},
1775 MODULE_DEVICE_TABLE(of
, cpr_match_table
);
1777 static struct platform_driver cpr_driver
= {
1779 .remove
= cpr_remove
,
1782 .of_match_table
= cpr_match_table
,
1785 module_platform_driver(cpr_driver
);
1787 MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
1788 MODULE_LICENSE("GPL v2");