1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 * Copyright (c) 2019, Linaro Limited
7 #include <linux/cleanup.h>
8 #include <linux/module.h>
10 #include <linux/debugfs.h>
11 #include <linux/string.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/init.h>
16 #include <linux/bitops.h>
17 #include <linux/slab.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_domain.h>
21 #include <linux/pm_opp.h>
22 #include <linux/interrupt.h>
23 #include <linux/regmap.h>
24 #include <linux/mfd/syscon.h>
25 #include <linux/regulator/consumer.h>
26 #include <linux/clk.h>
27 #include <linux/nvmem-consumer.h>
29 /* Register Offsets for RB-CPR and Bit Definitions */
31 /* RBCPR Version Register */
32 #define REG_RBCPR_VERSION 0
33 #define RBCPR_VER_2 0x02
34 #define FLAGS_IGNORE_1ST_IRQ_STATUS BIT(0)
36 /* RBCPR Gate Count and Target Registers */
37 #define REG_RBCPR_GCNT_TARGET(n) (0x60 + 4 * (n))
39 #define RBCPR_GCNT_TARGET_TARGET_SHIFT 0
40 #define RBCPR_GCNT_TARGET_TARGET_MASK GENMASK(11, 0)
41 #define RBCPR_GCNT_TARGET_GCNT_SHIFT 12
42 #define RBCPR_GCNT_TARGET_GCNT_MASK GENMASK(9, 0)
44 /* RBCPR Timer Control */
45 #define REG_RBCPR_TIMER_INTERVAL 0x44
46 #define REG_RBIF_TIMER_ADJUST 0x4c
48 #define RBIF_TIMER_ADJ_CONS_UP_MASK GENMASK(3, 0)
49 #define RBIF_TIMER_ADJ_CONS_UP_SHIFT 0
50 #define RBIF_TIMER_ADJ_CONS_DOWN_MASK GENMASK(3, 0)
51 #define RBIF_TIMER_ADJ_CONS_DOWN_SHIFT 4
52 #define RBIF_TIMER_ADJ_CLAMP_INT_MASK GENMASK(7, 0)
53 #define RBIF_TIMER_ADJ_CLAMP_INT_SHIFT 8
55 /* RBCPR Config Register */
56 #define REG_RBIF_LIMIT 0x48
57 #define RBIF_LIMIT_CEILING_MASK GENMASK(5, 0)
58 #define RBIF_LIMIT_CEILING_SHIFT 6
59 #define RBIF_LIMIT_FLOOR_BITS 6
60 #define RBIF_LIMIT_FLOOR_MASK GENMASK(5, 0)
62 #define RBIF_LIMIT_CEILING_DEFAULT RBIF_LIMIT_CEILING_MASK
63 #define RBIF_LIMIT_FLOOR_DEFAULT 0
65 #define REG_RBIF_SW_VLEVEL 0x94
66 #define RBIF_SW_VLEVEL_DEFAULT 0x20
68 #define REG_RBCPR_STEP_QUOT 0x80
69 #define RBCPR_STEP_QUOT_STEPQUOT_MASK GENMASK(7, 0)
70 #define RBCPR_STEP_QUOT_IDLE_CLK_MASK GENMASK(3, 0)
71 #define RBCPR_STEP_QUOT_IDLE_CLK_SHIFT 8
73 /* RBCPR Control Register */
74 #define REG_RBCPR_CTL 0x90
76 #define RBCPR_CTL_LOOP_EN BIT(0)
77 #define RBCPR_CTL_TIMER_EN BIT(3)
78 #define RBCPR_CTL_SW_AUTO_CONT_ACK_EN BIT(5)
79 #define RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN BIT(6)
80 #define RBCPR_CTL_COUNT_MODE BIT(10)
81 #define RBCPR_CTL_UP_THRESHOLD_MASK GENMASK(3, 0)
82 #define RBCPR_CTL_UP_THRESHOLD_SHIFT 24
83 #define RBCPR_CTL_DN_THRESHOLD_MASK GENMASK(3, 0)
84 #define RBCPR_CTL_DN_THRESHOLD_SHIFT 28
86 /* RBCPR Ack/Nack Response */
87 #define REG_RBIF_CONT_ACK_CMD 0x98
88 #define REG_RBIF_CONT_NACK_CMD 0x9c
90 /* RBCPR Result status Register */
91 #define REG_RBCPR_RESULT_0 0xa0
93 #define RBCPR_RESULT0_BUSY_SHIFT 19
94 #define RBCPR_RESULT0_BUSY_MASK BIT(RBCPR_RESULT0_BUSY_SHIFT)
95 #define RBCPR_RESULT0_ERROR_LT0_SHIFT 18
96 #define RBCPR_RESULT0_ERROR_SHIFT 6
97 #define RBCPR_RESULT0_ERROR_MASK GENMASK(11, 0)
98 #define RBCPR_RESULT0_ERROR_STEPS_SHIFT 2
99 #define RBCPR_RESULT0_ERROR_STEPS_MASK GENMASK(3, 0)
100 #define RBCPR_RESULT0_STEP_UP_SHIFT 1
102 /* RBCPR Interrupt Control Register */
103 #define REG_RBIF_IRQ_EN(n) (0x100 + 4 * (n))
104 #define REG_RBIF_IRQ_CLEAR 0x110
105 #define REG_RBIF_IRQ_STATUS 0x114
107 #define CPR_INT_DONE BIT(0)
108 #define CPR_INT_MIN BIT(1)
109 #define CPR_INT_DOWN BIT(2)
110 #define CPR_INT_MID BIT(3)
111 #define CPR_INT_UP BIT(4)
112 #define CPR_INT_MAX BIT(5)
113 #define CPR_INT_CLAMP BIT(6)
114 #define CPR_INT_ALL (CPR_INT_DONE | CPR_INT_MIN | CPR_INT_DOWN | \
115 CPR_INT_MID | CPR_INT_UP | CPR_INT_MAX | CPR_INT_CLAMP)
116 #define CPR_INT_DEFAULT (CPR_INT_UP | CPR_INT_DOWN)
118 #define CPR_NUM_RING_OSC 8
120 /* CPR eFuse parameters */
121 #define CPR_FUSE_TARGET_QUOT_BITS_MASK GENMASK(11, 0)
123 #define CPR_FUSE_MIN_QUOT_DIFF 50
125 #define FUSE_REVISION_UNKNOWN (-1)
127 enum voltage_change_dir
{
137 char *quotient_offset
;
140 struct fuse_corner_data
{
150 /* fuse quot_offset */
151 int quot_offset_scale
;
152 int quot_offset_adjust
;
156 int init_voltage_step
;
157 int init_voltage_width
;
158 struct fuse_corner_data
*fuse_corner_data
;
162 unsigned int fuse_corner
;
167 unsigned int num_fuse_corners
;
171 unsigned int timer_delay_us
;
172 unsigned int timer_cons_up
;
173 unsigned int timer_cons_down
;
174 unsigned int up_threshold
;
175 unsigned int down_threshold
;
176 unsigned int idle_clocks
;
177 unsigned int gcnt_us
;
178 unsigned int vdd_apc_step_up_limit
;
179 unsigned int vdd_apc_step_down_limit
;
180 unsigned int clamp_timer_interval
;
182 struct cpr_fuses cpr_fuses
;
183 bool reduce_to_fuse_uV
;
184 bool reduce_to_corner_uV
;
188 unsigned int enable_reg
;
191 struct reg_sequence
*config
;
192 struct reg_sequence
*settings
;
193 int num_regs_per_fuse
;
196 struct cpr_acc_desc
{
197 const struct cpr_desc
*cpr_desc
;
198 const struct acc_desc
*acc_desc
;
207 const struct reg_sequence
*accs
;
209 unsigned long max_freq
;
222 struct fuse_corner
*fuse_corner
;
226 unsigned int num_corners
;
227 unsigned int ref_clk_khz
;
229 struct generic_pm_domain pd
;
231 struct device
*attached_cpu_dev
;
234 struct corner
*corner
;
235 struct regulator
*vdd_apc
;
242 struct fuse_corner
*fuse_corners
;
243 struct corner
*corners
;
245 const struct cpr_desc
*desc
;
246 const struct acc_desc
*acc_desc
;
247 const struct cpr_fuse
*cpr_fuses
;
249 struct dentry
*debugfs
;
252 static bool cpr_is_allowed(struct cpr_drv
*drv
)
254 return !drv
->loop_disabled
;
257 static void cpr_write(struct cpr_drv
*drv
, u32 offset
, u32 value
)
259 writel_relaxed(value
, drv
->base
+ offset
);
262 static u32
cpr_read(struct cpr_drv
*drv
, u32 offset
)
264 return readl_relaxed(drv
->base
+ offset
);
268 cpr_masked_write(struct cpr_drv
*drv
, u32 offset
, u32 mask
, u32 value
)
272 val
= readl_relaxed(drv
->base
+ offset
);
275 writel_relaxed(val
, drv
->base
+ offset
);
278 static void cpr_irq_clr(struct cpr_drv
*drv
)
280 cpr_write(drv
, REG_RBIF_IRQ_CLEAR
, CPR_INT_ALL
);
283 static void cpr_irq_clr_nack(struct cpr_drv
*drv
)
286 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
289 static void cpr_irq_clr_ack(struct cpr_drv
*drv
)
292 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
295 static void cpr_irq_set(struct cpr_drv
*drv
, u32 int_bits
)
297 cpr_write(drv
, REG_RBIF_IRQ_EN(0), int_bits
);
300 static void cpr_ctl_modify(struct cpr_drv
*drv
, u32 mask
, u32 value
)
302 cpr_masked_write(drv
, REG_RBCPR_CTL
, mask
, value
);
305 static void cpr_ctl_enable(struct cpr_drv
*drv
, struct corner
*corner
)
308 const struct cpr_desc
*desc
= drv
->desc
;
310 /* Program Consecutive Up & Down */
311 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
312 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
313 mask
= RBIF_TIMER_ADJ_CONS_UP_MASK
| RBIF_TIMER_ADJ_CONS_DOWN_MASK
;
314 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
, mask
, val
);
315 cpr_masked_write(drv
, REG_RBCPR_CTL
,
316 RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
317 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
,
319 cpr_irq_set(drv
, corner
->save_irq
);
321 if (cpr_is_allowed(drv
) && corner
->max_uV
> corner
->min_uV
)
322 val
= RBCPR_CTL_LOOP_EN
;
325 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, val
);
328 static void cpr_ctl_disable(struct cpr_drv
*drv
)
331 cpr_ctl_modify(drv
, RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
|
332 RBCPR_CTL_SW_AUTO_CONT_ACK_EN
, 0);
333 cpr_masked_write(drv
, REG_RBIF_TIMER_ADJUST
,
334 RBIF_TIMER_ADJ_CONS_UP_MASK
|
335 RBIF_TIMER_ADJ_CONS_DOWN_MASK
, 0);
337 cpr_write(drv
, REG_RBIF_CONT_ACK_CMD
, 1);
338 cpr_write(drv
, REG_RBIF_CONT_NACK_CMD
, 1);
339 cpr_ctl_modify(drv
, RBCPR_CTL_LOOP_EN
, 0);
342 static bool cpr_ctl_is_enabled(struct cpr_drv
*drv
)
346 reg_val
= cpr_read(drv
, REG_RBCPR_CTL
);
347 return reg_val
& RBCPR_CTL_LOOP_EN
;
350 static bool cpr_ctl_is_busy(struct cpr_drv
*drv
)
354 reg_val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
355 return reg_val
& RBCPR_RESULT0_BUSY_MASK
;
358 static void cpr_corner_save(struct cpr_drv
*drv
, struct corner
*corner
)
360 corner
->save_ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
361 corner
->save_irq
= cpr_read(drv
, REG_RBIF_IRQ_EN(0));
364 static void cpr_corner_restore(struct cpr_drv
*drv
, struct corner
*corner
)
366 u32 gcnt
, ctl
, irq
, ro_sel
, step_quot
;
367 struct fuse_corner
*fuse
= corner
->fuse_corner
;
368 const struct cpr_desc
*desc
= drv
->desc
;
371 ro_sel
= fuse
->ring_osc_idx
;
373 gcnt
|= fuse
->quot
- corner
->quot_adjust
;
375 /* Program the step quotient and idle clocks */
376 step_quot
= desc
->idle_clocks
<< RBCPR_STEP_QUOT_IDLE_CLK_SHIFT
;
377 step_quot
|= fuse
->step_quot
& RBCPR_STEP_QUOT_STEPQUOT_MASK
;
378 cpr_write(drv
, REG_RBCPR_STEP_QUOT
, step_quot
);
380 /* Clear the target quotient value and gate count of all ROs */
381 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
382 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
384 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
), gcnt
);
385 ctl
= corner
->save_ctl
;
386 cpr_write(drv
, REG_RBCPR_CTL
, ctl
);
387 irq
= corner
->save_irq
;
388 cpr_irq_set(drv
, irq
);
389 dev_dbg(drv
->dev
, "gcnt = %#08x, ctl = %#08x, irq = %#08x\n", gcnt
,
393 static void cpr_set_acc(struct regmap
*tcsr
, struct fuse_corner
*f
,
394 struct fuse_corner
*end
)
400 for (f
+= 1; f
<= end
; f
++)
401 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
403 for (f
-= 1; f
>= end
; f
--)
404 regmap_multi_reg_write(tcsr
, f
->accs
, f
->num_accs
);
408 static int cpr_pre_voltage(struct cpr_drv
*drv
,
409 struct fuse_corner
*fuse_corner
,
410 enum voltage_change_dir dir
)
412 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
414 if (drv
->tcsr
&& dir
== DOWN
)
415 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
420 static int cpr_post_voltage(struct cpr_drv
*drv
,
421 struct fuse_corner
*fuse_corner
,
422 enum voltage_change_dir dir
)
424 struct fuse_corner
*prev_fuse_corner
= drv
->corner
->fuse_corner
;
426 if (drv
->tcsr
&& dir
== UP
)
427 cpr_set_acc(drv
->tcsr
, prev_fuse_corner
, fuse_corner
);
432 static int cpr_scale_voltage(struct cpr_drv
*drv
, struct corner
*corner
,
433 int new_uV
, enum voltage_change_dir dir
)
436 struct fuse_corner
*fuse_corner
= corner
->fuse_corner
;
438 ret
= cpr_pre_voltage(drv
, fuse_corner
, dir
);
442 ret
= regulator_set_voltage(drv
->vdd_apc
, new_uV
, new_uV
);
444 dev_err_ratelimited(drv
->dev
, "failed to set apc voltage %d\n",
449 ret
= cpr_post_voltage(drv
, fuse_corner
, dir
);
456 static unsigned int cpr_get_cur_perf_state(struct cpr_drv
*drv
)
458 return drv
->corner
? drv
->corner
- drv
->corners
+ 1 : 0;
461 static int cpr_scale(struct cpr_drv
*drv
, enum voltage_change_dir dir
)
463 u32 val
, error_steps
, reg_mask
;
464 int last_uV
, new_uV
, step_uV
, ret
;
465 struct corner
*corner
;
466 const struct cpr_desc
*desc
= drv
->desc
;
468 if (dir
!= UP
&& dir
!= DOWN
)
471 step_uV
= regulator_get_linear_step(drv
->vdd_apc
);
475 corner
= drv
->corner
;
477 val
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
479 error_steps
= val
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
;
480 error_steps
&= RBCPR_RESULT0_ERROR_STEPS_MASK
;
481 last_uV
= corner
->last_uV
;
484 if (desc
->clamp_timer_interval
&&
485 error_steps
< desc
->up_threshold
) {
487 * Handle the case where another measurement started
488 * after the interrupt was triggered due to a core
489 * exiting from power collapse.
491 error_steps
= max(desc
->up_threshold
,
492 desc
->vdd_apc_step_up_limit
);
495 if (last_uV
>= corner
->max_uV
) {
496 cpr_irq_clr_nack(drv
);
498 /* Maximize the UP threshold */
499 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
500 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
502 cpr_ctl_modify(drv
, reg_mask
, val
);
504 /* Disable UP interrupt */
505 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_UP
);
510 if (error_steps
> desc
->vdd_apc_step_up_limit
)
511 error_steps
= desc
->vdd_apc_step_up_limit
;
513 /* Calculate new voltage */
514 new_uV
= last_uV
+ error_steps
* step_uV
;
515 new_uV
= min(new_uV
, corner
->max_uV
);
518 "UP: -> new_uV: %d last_uV: %d perf state: %u\n",
519 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
521 if (desc
->clamp_timer_interval
&&
522 error_steps
< desc
->down_threshold
) {
524 * Handle the case where another measurement started
525 * after the interrupt was triggered due to a core
526 * exiting from power collapse.
528 error_steps
= max(desc
->down_threshold
,
529 desc
->vdd_apc_step_down_limit
);
532 if (last_uV
<= corner
->min_uV
) {
533 cpr_irq_clr_nack(drv
);
535 /* Enable auto nack down */
536 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
537 val
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
539 cpr_ctl_modify(drv
, reg_mask
, val
);
541 /* Disable DOWN interrupt */
542 cpr_irq_set(drv
, CPR_INT_DEFAULT
& ~CPR_INT_DOWN
);
547 if (error_steps
> desc
->vdd_apc_step_down_limit
)
548 error_steps
= desc
->vdd_apc_step_down_limit
;
550 /* Calculate new voltage */
551 new_uV
= last_uV
- error_steps
* step_uV
;
552 new_uV
= max(new_uV
, corner
->min_uV
);
555 "DOWN: -> new_uV: %d last_uV: %d perf state: %u\n",
556 new_uV
, last_uV
, cpr_get_cur_perf_state(drv
));
559 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
561 cpr_irq_clr_nack(drv
);
564 drv
->corner
->last_uV
= new_uV
;
567 /* Disable auto nack down */
568 reg_mask
= RBCPR_CTL_SW_AUTO_CONT_NACK_DN_EN
;
571 /* Restore default threshold for UP */
572 reg_mask
= RBCPR_CTL_UP_THRESHOLD_MASK
;
573 reg_mask
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
574 val
= desc
->up_threshold
;
575 val
<<= RBCPR_CTL_UP_THRESHOLD_SHIFT
;
578 cpr_ctl_modify(drv
, reg_mask
, val
);
580 /* Re-enable default interrupts */
581 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
584 cpr_irq_clr_ack(drv
);
589 static irqreturn_t
cpr_irq_handler(int irq
, void *dev
)
591 struct cpr_drv
*drv
= dev
;
592 const struct cpr_desc
*desc
= drv
->desc
;
593 irqreturn_t ret
= IRQ_HANDLED
;
596 mutex_lock(&drv
->lock
);
598 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
599 if (drv
->flags
& FLAGS_IGNORE_1ST_IRQ_STATUS
)
600 val
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
602 dev_dbg(drv
->dev
, "IRQ_STATUS = %#02x\n", val
);
604 if (!cpr_ctl_is_enabled(drv
)) {
605 dev_dbg(drv
->dev
, "CPR is disabled\n");
607 } else if (cpr_ctl_is_busy(drv
) && !desc
->clamp_timer_interval
) {
608 dev_dbg(drv
->dev
, "CPR measurement is not ready\n");
609 } else if (!cpr_is_allowed(drv
)) {
610 val
= cpr_read(drv
, REG_RBCPR_CTL
);
611 dev_err_ratelimited(drv
->dev
,
612 "Interrupt broken? RBCPR_CTL = %#02x\n",
617 * Following sequence of handling is as per each IRQ's
620 if (val
& CPR_INT_UP
) {
622 } else if (val
& CPR_INT_DOWN
) {
623 cpr_scale(drv
, DOWN
);
624 } else if (val
& CPR_INT_MIN
) {
625 cpr_irq_clr_nack(drv
);
626 } else if (val
& CPR_INT_MAX
) {
627 cpr_irq_clr_nack(drv
);
628 } else if (val
& CPR_INT_MID
) {
629 /* RBCPR_CTL_SW_AUTO_CONT_ACK_EN is enabled */
630 dev_dbg(drv
->dev
, "IRQ occurred for Mid Flag\n");
633 "IRQ occurred for unknown flag (%#08x)\n", val
);
636 /* Save register values for the corner */
637 cpr_corner_save(drv
, drv
->corner
);
640 mutex_unlock(&drv
->lock
);
645 static int cpr_enable(struct cpr_drv
*drv
)
649 ret
= regulator_enable(drv
->vdd_apc
);
653 mutex_lock(&drv
->lock
);
655 if (cpr_is_allowed(drv
) && drv
->corner
) {
657 cpr_corner_restore(drv
, drv
->corner
);
658 cpr_ctl_enable(drv
, drv
->corner
);
661 mutex_unlock(&drv
->lock
);
666 static int cpr_disable(struct cpr_drv
*drv
)
668 mutex_lock(&drv
->lock
);
670 if (cpr_is_allowed(drv
)) {
671 cpr_ctl_disable(drv
);
675 mutex_unlock(&drv
->lock
);
677 return regulator_disable(drv
->vdd_apc
);
680 static int cpr_config(struct cpr_drv
*drv
)
684 struct corner
*corner
;
685 const struct cpr_desc
*desc
= drv
->desc
;
687 /* Disable interrupt and CPR */
688 cpr_write(drv
, REG_RBIF_IRQ_EN(0), 0);
689 cpr_write(drv
, REG_RBCPR_CTL
, 0);
691 /* Program the default HW ceiling, floor and vlevel */
692 val
= (RBIF_LIMIT_CEILING_DEFAULT
& RBIF_LIMIT_CEILING_MASK
)
693 << RBIF_LIMIT_CEILING_SHIFT
;
694 val
|= RBIF_LIMIT_FLOOR_DEFAULT
& RBIF_LIMIT_FLOOR_MASK
;
695 cpr_write(drv
, REG_RBIF_LIMIT
, val
);
696 cpr_write(drv
, REG_RBIF_SW_VLEVEL
, RBIF_SW_VLEVEL_DEFAULT
);
699 * Clear the target quotient value and gate count of all
702 for (i
= 0; i
< CPR_NUM_RING_OSC
; i
++)
703 cpr_write(drv
, REG_RBCPR_GCNT_TARGET(i
), 0);
705 /* Init and save gcnt */
706 gcnt
= (drv
->ref_clk_khz
* desc
->gcnt_us
) / 1000;
707 gcnt
= gcnt
& RBCPR_GCNT_TARGET_GCNT_MASK
;
708 gcnt
<<= RBCPR_GCNT_TARGET_GCNT_SHIFT
;
711 /* Program the delay count for the timer */
712 val
= (drv
->ref_clk_khz
* desc
->timer_delay_us
) / 1000;
713 cpr_write(drv
, REG_RBCPR_TIMER_INTERVAL
, val
);
714 dev_dbg(drv
->dev
, "Timer count: %#0x (for %d us)\n", val
,
715 desc
->timer_delay_us
);
717 /* Program Consecutive Up & Down */
718 val
= desc
->timer_cons_down
<< RBIF_TIMER_ADJ_CONS_DOWN_SHIFT
;
719 val
|= desc
->timer_cons_up
<< RBIF_TIMER_ADJ_CONS_UP_SHIFT
;
720 val
|= desc
->clamp_timer_interval
<< RBIF_TIMER_ADJ_CLAMP_INT_SHIFT
;
721 cpr_write(drv
, REG_RBIF_TIMER_ADJUST
, val
);
723 /* Program the control register */
724 val
= desc
->up_threshold
<< RBCPR_CTL_UP_THRESHOLD_SHIFT
;
725 val
|= desc
->down_threshold
<< RBCPR_CTL_DN_THRESHOLD_SHIFT
;
726 val
|= RBCPR_CTL_TIMER_EN
| RBCPR_CTL_COUNT_MODE
;
727 val
|= RBCPR_CTL_SW_AUTO_CONT_ACK_EN
;
728 cpr_write(drv
, REG_RBCPR_CTL
, val
);
730 for (i
= 0; i
< drv
->num_corners
; i
++) {
731 corner
= &drv
->corners
[i
];
732 corner
->save_ctl
= val
;
733 corner
->save_irq
= CPR_INT_DEFAULT
;
736 cpr_irq_set(drv
, CPR_INT_DEFAULT
);
738 val
= cpr_read(drv
, REG_RBCPR_VERSION
);
739 if (val
<= RBCPR_VER_2
)
740 drv
->flags
|= FLAGS_IGNORE_1ST_IRQ_STATUS
;
745 static int cpr_set_performance_state(struct generic_pm_domain
*domain
,
748 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
749 struct corner
*corner
, *end
;
750 enum voltage_change_dir dir
;
753 guard(mutex
)(&drv
->lock
);
755 dev_dbg(drv
->dev
, "%s: setting perf state: %u (prev state: %u)\n",
756 __func__
, state
, cpr_get_cur_perf_state(drv
));
759 * Determine new corner we're going to.
760 * Remove one since lowest performance state is 1.
762 corner
= drv
->corners
+ state
- 1;
763 end
= &drv
->corners
[drv
->num_corners
- 1];
764 if (corner
> end
|| corner
< drv
->corners
)
767 /* Determine direction */
768 if (drv
->corner
> corner
)
770 else if (drv
->corner
< corner
)
775 if (cpr_is_allowed(drv
))
776 new_uV
= corner
->last_uV
;
780 if (cpr_is_allowed(drv
))
781 cpr_ctl_disable(drv
);
783 ret
= cpr_scale_voltage(drv
, corner
, new_uV
, dir
);
787 if (cpr_is_allowed(drv
)) {
789 if (drv
->corner
!= corner
)
790 cpr_corner_restore(drv
, corner
);
791 cpr_ctl_enable(drv
, corner
);
794 drv
->corner
= corner
;
800 cpr_populate_ring_osc_idx(struct cpr_drv
*drv
)
802 struct fuse_corner
*fuse
= drv
->fuse_corners
;
803 struct fuse_corner
*end
= fuse
+ drv
->desc
->num_fuse_corners
;
804 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
808 for (; fuse
< end
; fuse
++, fuses
++) {
809 ret
= nvmem_cell_read_variable_le_u32(drv
->dev
, fuses
->ring_osc
, &data
);
812 fuse
->ring_osc_idx
= data
;
818 static int cpr_read_fuse_uV(const struct cpr_desc
*desc
,
819 const struct fuse_corner_data
*fdata
,
820 const char *init_v_efuse
,
824 int step_size_uV
, steps
, uV
;
828 ret
= nvmem_cell_read_variable_le_u32(drv
->dev
, init_v_efuse
, &bits
);
832 steps
= bits
& ~BIT(desc
->cpr_fuses
.init_voltage_width
- 1);
833 /* Not two's complement.. instead highest bit is sign bit */
834 if (bits
& BIT(desc
->cpr_fuses
.init_voltage_width
- 1))
837 step_size_uV
= desc
->cpr_fuses
.init_voltage_step
;
839 uV
= fdata
->ref_uV
+ steps
* step_size_uV
;
840 return DIV_ROUND_UP(uV
, step_volt
) * step_volt
;
843 static int cpr_fuse_corner_init(struct cpr_drv
*drv
)
845 const struct cpr_desc
*desc
= drv
->desc
;
846 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
847 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
849 unsigned int step_volt
;
850 struct fuse_corner_data
*fdata
;
851 struct fuse_corner
*fuse
, *end
;
853 const struct reg_sequence
*accs
;
856 accs
= acc_desc
->settings
;
858 step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
862 /* Populate fuse_corner members */
863 fuse
= drv
->fuse_corners
;
864 end
= &fuse
[desc
->num_fuse_corners
- 1];
865 fdata
= desc
->cpr_fuses
.fuse_corner_data
;
867 for (i
= 0; fuse
<= end
; fuse
++, fuses
++, i
++, fdata
++) {
869 * Update SoC voltages: platforms might choose a different
870 * regulators than the one used to characterize the algorithms
871 * (ie, init_voltage_step).
873 fdata
->min_uV
= roundup(fdata
->min_uV
, step_volt
);
874 fdata
->max_uV
= roundup(fdata
->max_uV
, step_volt
);
877 uV
= cpr_read_fuse_uV(desc
, fdata
, fuses
->init_voltage
,
882 fuse
->min_uV
= fdata
->min_uV
;
883 fuse
->max_uV
= fdata
->max_uV
;
884 fuse
->uV
= clamp(uV
, fuse
->min_uV
, fuse
->max_uV
);
888 * Allow the highest fuse corner's PVS voltage to
889 * define the ceiling voltage for that corner in order
890 * to support SoC's in which variable ceiling values
893 end
->max_uV
= max(end
->max_uV
, end
->uV
);
896 /* Populate target quotient by scaling */
897 ret
= nvmem_cell_read_variable_le_u32(drv
->dev
, fuses
->quotient
, &fuse
->quot
);
901 fuse
->quot
*= fdata
->quot_scale
;
902 fuse
->quot
+= fdata
->quot_offset
;
903 fuse
->quot
+= fdata
->quot_adjust
;
904 fuse
->step_quot
= desc
->step_quot
[fuse
->ring_osc_idx
];
906 /* Populate acc settings */
908 fuse
->num_accs
= acc_desc
->num_regs_per_fuse
;
909 accs
+= acc_desc
->num_regs_per_fuse
;
913 * Restrict all fuse corner PVS voltages based upon per corner
914 * ceiling and floor voltages.
916 for (fuse
= drv
->fuse_corners
, i
= 0; fuse
<= end
; fuse
++, i
++) {
917 if (fuse
->uV
> fuse
->max_uV
)
918 fuse
->uV
= fuse
->max_uV
;
919 else if (fuse
->uV
< fuse
->min_uV
)
920 fuse
->uV
= fuse
->min_uV
;
922 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
927 "min uV: %d (fuse corner: %d) not supported by regulator\n",
932 ret
= regulator_is_supported_voltage(drv
->vdd_apc
,
937 "max uV: %d (fuse corner: %d) not supported by regulator\n",
943 "fuse corner %d: [%d %d %d] RO%hhu quot %d squot %d\n",
944 i
, fuse
->min_uV
, fuse
->uV
, fuse
->max_uV
,
945 fuse
->ring_osc_idx
, fuse
->quot
, fuse
->step_quot
);
951 static int cpr_calculate_scaling(const char *quot_offset
,
953 const struct fuse_corner_data
*fdata
,
954 const struct corner
*corner
)
957 unsigned long freq_diff
;
959 const struct fuse_corner
*fuse
, *prev_fuse
;
962 fuse
= corner
->fuse_corner
;
963 prev_fuse
= fuse
- 1;
966 ret
= nvmem_cell_read_variable_le_u32(drv
->dev
, quot_offset
, "_diff
);
970 quot_diff
*= fdata
->quot_offset_scale
;
971 quot_diff
+= fdata
->quot_offset_adjust
;
973 quot_diff
= fuse
->quot
- prev_fuse
->quot
;
976 freq_diff
= fuse
->max_freq
- prev_fuse
->max_freq
;
977 freq_diff
/= 1000000; /* Convert to MHz */
978 scaling
= 1000 * quot_diff
/ freq_diff
;
979 return min(scaling
, fdata
->max_quot_scale
);
982 static int cpr_interpolate(const struct corner
*corner
, int step_volt
,
983 const struct fuse_corner_data
*fdata
)
985 unsigned long f_high
, f_low
, f_diff
;
986 int uV_high
, uV_low
, uV
;
987 u64 temp
, temp_limit
;
988 const struct fuse_corner
*fuse
, *prev_fuse
;
990 fuse
= corner
->fuse_corner
;
991 prev_fuse
= fuse
- 1;
993 f_high
= fuse
->max_freq
;
994 f_low
= prev_fuse
->max_freq
;
996 uV_low
= prev_fuse
->uV
;
997 f_diff
= fuse
->max_freq
- corner
->freq
;
1000 * Don't interpolate in the wrong direction. This could happen
1001 * if the adjusted fuse voltage overlaps with the previous fuse's
1004 if (f_high
<= f_low
|| uV_high
<= uV_low
|| f_high
<= corner
->freq
)
1007 temp
= f_diff
* (uV_high
- uV_low
);
1008 temp
= div64_ul(temp
, f_high
- f_low
);
1011 * max_volt_scale has units of uV/MHz while freq values
1012 * have units of Hz. Divide by 1000000 to convert to.
1014 temp_limit
= f_diff
* fdata
->max_volt_scale
;
1015 do_div(temp_limit
, 1000000);
1017 uV
= uV_high
- min(temp
, temp_limit
);
1018 return roundup(uV
, step_volt
);
1021 static unsigned int cpr_get_fuse_corner(struct dev_pm_opp
*opp
)
1023 struct device_node
*np
;
1024 unsigned int fuse_corner
= 0;
1026 np
= dev_pm_opp_get_of_node(opp
);
1027 if (of_property_read_u32(np
, "qcom,opp-fuse-level", &fuse_corner
))
1028 pr_err("%s: missing 'qcom,opp-fuse-level' property\n",
1036 static unsigned long cpr_get_opp_hz_for_req(struct dev_pm_opp
*ref
,
1037 struct device
*cpu_dev
)
1039 struct device_node
*ref_np
__free(device_node
) = NULL
;
1040 struct device_node
*desc_np
__free(device_node
) =
1041 dev_pm_opp_of_get_opp_desc_node(cpu_dev
);
1046 ref_np
= dev_pm_opp_get_of_node(ref
);
1050 for_each_available_child_of_node_scoped(desc_np
, child_np
) {
1051 struct device_node
*child_req_np
__free(device_node
) =
1052 of_parse_phandle(child_np
, "required-opps", 0);
1054 if (child_req_np
== ref_np
) {
1057 of_property_read_u64(child_np
, "opp-hz", &rate
);
1058 return (unsigned long) rate
;
1065 static int cpr_corner_init(struct cpr_drv
*drv
)
1067 const struct cpr_desc
*desc
= drv
->desc
;
1068 const struct cpr_fuse
*fuses
= drv
->cpr_fuses
;
1069 int i
, level
, scaling
= 0;
1070 unsigned int fnum
, fc
;
1071 const char *quot_offset
;
1072 struct fuse_corner
*fuse
, *prev_fuse
;
1073 struct corner
*corner
, *end
;
1074 struct corner_data
*cdata
;
1075 const struct fuse_corner_data
*fdata
;
1077 unsigned long freq_diff
, freq_diff_mhz
;
1079 int step_volt
= regulator_get_linear_step(drv
->vdd_apc
);
1080 struct dev_pm_opp
*opp
;
1085 corner
= drv
->corners
;
1086 end
= &corner
[drv
->num_corners
- 1];
1088 cdata
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1089 sizeof(struct corner_data
),
1095 * Store maximum frequency for each fuse corner based on the frequency
1098 for (level
= 1; level
<= drv
->num_corners
; level
++) {
1099 opp
= dev_pm_opp_find_level_exact(&drv
->pd
.dev
, level
);
1102 fc
= cpr_get_fuse_corner(opp
);
1104 dev_pm_opp_put(opp
);
1108 freq
= cpr_get_opp_hz_for_req(opp
, drv
->attached_cpu_dev
);
1110 dev_pm_opp_put(opp
);
1113 cdata
[level
- 1].fuse_corner
= fnum
;
1114 cdata
[level
- 1].freq
= freq
;
1116 fuse
= &drv
->fuse_corners
[fnum
];
1117 dev_dbg(drv
->dev
, "freq: %lu level: %u fuse level: %u\n",
1118 freq
, dev_pm_opp_get_level(opp
) - 1, fnum
);
1119 if (freq
> fuse
->max_freq
)
1120 fuse
->max_freq
= freq
;
1121 dev_pm_opp_put(opp
);
1125 * Get the quotient adjustment scaling factor, according to:
1127 * scaling = min(1000 * (QUOT(corner_N) - QUOT(corner_N-1))
1128 * / (freq(corner_N) - freq(corner_N-1)), max_factor)
1130 * QUOT(corner_N): quotient read from fuse for fuse corner N
1131 * QUOT(corner_N-1): quotient read from fuse for fuse corner (N - 1)
1132 * freq(corner_N): max frequency in MHz supported by fuse corner N
1133 * freq(corner_N-1): max frequency in MHz supported by fuse corner
1136 * Then walk through the corners mapped to each fuse corner
1137 * and calculate the quotient adjustment for each one using the
1138 * following formula:
1140 * quot_adjust = (freq_max - freq_corner) * scaling / 1000
1142 * freq_max: max frequency in MHz supported by the fuse corner
1143 * freq_corner: frequency in MHz corresponding to the corner
1144 * scaling: calculated from above equation
1155 * +--------------- +----------------
1156 * 0 1 2 3 4 5 6 0 1 2 3 4 5 6
1163 for (apply_scaling
= false, i
= 0; corner
<= end
; corner
++, i
++) {
1164 fnum
= cdata
[i
].fuse_corner
;
1165 fdata
= &desc
->cpr_fuses
.fuse_corner_data
[fnum
];
1166 quot_offset
= fuses
[fnum
].quotient_offset
;
1167 fuse
= &drv
->fuse_corners
[fnum
];
1169 prev_fuse
= &drv
->fuse_corners
[fnum
- 1];
1173 corner
->fuse_corner
= fuse
;
1174 corner
->freq
= cdata
[i
].freq
;
1175 corner
->uV
= fuse
->uV
;
1177 if (prev_fuse
&& cdata
[i
- 1].freq
== prev_fuse
->max_freq
) {
1178 scaling
= cpr_calculate_scaling(quot_offset
, drv
,
1183 apply_scaling
= true;
1184 } else if (corner
->freq
== fuse
->max_freq
) {
1185 /* This is a fuse corner; don't scale anything */
1186 apply_scaling
= false;
1189 if (apply_scaling
) {
1190 freq_diff
= fuse
->max_freq
- corner
->freq
;
1191 freq_diff_mhz
= freq_diff
/ 1000000;
1192 corner
->quot_adjust
= scaling
* freq_diff_mhz
/ 1000;
1194 corner
->uV
= cpr_interpolate(corner
, step_volt
, fdata
);
1197 corner
->max_uV
= fuse
->max_uV
;
1198 corner
->min_uV
= fuse
->min_uV
;
1199 corner
->uV
= clamp(corner
->uV
, corner
->min_uV
, corner
->max_uV
);
1200 corner
->last_uV
= corner
->uV
;
1202 /* Reduce the ceiling voltage if needed */
1203 if (desc
->reduce_to_corner_uV
&& corner
->uV
< corner
->max_uV
)
1204 corner
->max_uV
= corner
->uV
;
1205 else if (desc
->reduce_to_fuse_uV
&& fuse
->uV
< corner
->max_uV
)
1206 corner
->max_uV
= max(corner
->min_uV
, fuse
->uV
);
1208 dev_dbg(drv
->dev
, "corner %d: [%d %d %d] quot %d\n", i
,
1209 corner
->min_uV
, corner
->uV
, corner
->max_uV
,
1210 fuse
->quot
- corner
->quot_adjust
);
1216 static const struct cpr_fuse
*cpr_get_fuses(struct cpr_drv
*drv
)
1218 const struct cpr_desc
*desc
= drv
->desc
;
1219 struct cpr_fuse
*fuses
;
1222 fuses
= devm_kcalloc(drv
->dev
, desc
->num_fuse_corners
,
1223 sizeof(struct cpr_fuse
),
1226 return ERR_PTR(-ENOMEM
);
1228 for (i
= 0; i
< desc
->num_fuse_corners
; i
++) {
1231 snprintf(tbuf
, 32, "cpr_ring_osc%d", i
+ 1);
1232 fuses
[i
].ring_osc
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1233 if (!fuses
[i
].ring_osc
)
1234 return ERR_PTR(-ENOMEM
);
1236 snprintf(tbuf
, 32, "cpr_init_voltage%d", i
+ 1);
1237 fuses
[i
].init_voltage
= devm_kstrdup(drv
->dev
, tbuf
,
1239 if (!fuses
[i
].init_voltage
)
1240 return ERR_PTR(-ENOMEM
);
1242 snprintf(tbuf
, 32, "cpr_quotient%d", i
+ 1);
1243 fuses
[i
].quotient
= devm_kstrdup(drv
->dev
, tbuf
, GFP_KERNEL
);
1244 if (!fuses
[i
].quotient
)
1245 return ERR_PTR(-ENOMEM
);
1247 snprintf(tbuf
, 32, "cpr_quotient_offset%d", i
+ 1);
1248 fuses
[i
].quotient_offset
= devm_kstrdup(drv
->dev
, tbuf
,
1250 if (!fuses
[i
].quotient_offset
)
1251 return ERR_PTR(-ENOMEM
);
1257 static void cpr_set_loop_allowed(struct cpr_drv
*drv
)
1259 drv
->loop_disabled
= false;
1262 static int cpr_init_parameters(struct cpr_drv
*drv
)
1264 const struct cpr_desc
*desc
= drv
->desc
;
1267 clk
= clk_get(drv
->dev
, "ref");
1269 return PTR_ERR(clk
);
1271 drv
->ref_clk_khz
= clk_get_rate(clk
) / 1000;
1274 if (desc
->timer_cons_up
> RBIF_TIMER_ADJ_CONS_UP_MASK
||
1275 desc
->timer_cons_down
> RBIF_TIMER_ADJ_CONS_DOWN_MASK
||
1276 desc
->up_threshold
> RBCPR_CTL_UP_THRESHOLD_MASK
||
1277 desc
->down_threshold
> RBCPR_CTL_DN_THRESHOLD_MASK
||
1278 desc
->idle_clocks
> RBCPR_STEP_QUOT_IDLE_CLK_MASK
||
1279 desc
->clamp_timer_interval
> RBIF_TIMER_ADJ_CLAMP_INT_MASK
)
1282 dev_dbg(drv
->dev
, "up threshold = %u, down threshold = %u\n",
1283 desc
->up_threshold
, desc
->down_threshold
);
1288 static int cpr_find_initial_corner(struct cpr_drv
*drv
)
1291 const struct corner
*end
;
1292 struct corner
*iter
;
1295 if (!drv
->cpu_clk
) {
1296 dev_err(drv
->dev
, "cannot get rate from NULL clk\n");
1300 end
= &drv
->corners
[drv
->num_corners
- 1];
1301 rate
= clk_get_rate(drv
->cpu_clk
);
1304 * Some bootloaders set a CPU clock frequency that is not defined
1305 * in the OPP table. When running at an unlisted frequency,
1306 * cpufreq_online() will change to the OPP which has the lowest
1307 * frequency, at or above the unlisted frequency.
1308 * Since cpufreq_online() always "rounds up" in the case of an
1309 * unlisted frequency, this function always "rounds down" in case
1310 * of an unlisted frequency. That way, when cpufreq_online()
1311 * triggers the first ever call to cpr_set_performance_state(),
1312 * it will correctly determine the direction as UP.
1314 for (iter
= drv
->corners
; iter
<= end
; iter
++) {
1315 if (iter
->freq
> rate
)
1318 if (iter
->freq
== rate
) {
1322 if (iter
->freq
< rate
)
1327 dev_err(drv
->dev
, "boot up corner not found\n");
1331 dev_dbg(drv
->dev
, "boot up perf state: %u\n", i
);
1336 static const struct cpr_desc qcs404_cpr_desc
= {
1337 .num_fuse_corners
= 3,
1338 .min_diff_quot
= CPR_FUSE_MIN_QUOT_DIFF
,
1339 .step_quot
= (int []){ 25, 25, 25, },
1340 .timer_delay_us
= 5000,
1342 .timer_cons_down
= 2,
1344 .down_threshold
= 3,
1347 .vdd_apc_step_up_limit
= 1,
1348 .vdd_apc_step_down_limit
= 1,
1350 .init_voltage_step
= 8000,
1351 .init_voltage_width
= 6,
1352 .fuse_corner_data
= (struct fuse_corner_data
[]){
1358 .max_volt_scale
= 0,
1359 .max_quot_scale
= 0,
1363 .quot_offset_scale
= 5,
1364 .quot_offset_adjust
= 0,
1371 .max_volt_scale
= 2000,
1372 .max_quot_scale
= 1400,
1376 .quot_offset_scale
= 5,
1377 .quot_offset_adjust
= 0,
1384 .max_volt_scale
= 2000,
1385 .max_quot_scale
= 1400,
1389 .quot_offset_scale
= 5,
1390 .quot_offset_adjust
= 0,
1396 static const struct acc_desc qcs404_acc_desc
= {
1397 .settings
= (struct reg_sequence
[]){
1398 { 0xb120, 0x1041040 },
1405 .config
= (struct reg_sequence
[]){
1409 .num_regs_per_fuse
= 2,
1412 static const struct cpr_acc_desc qcs404_cpr_acc_desc
= {
1413 .cpr_desc
= &qcs404_cpr_desc
,
1414 .acc_desc
= &qcs404_acc_desc
,
1417 static int cpr_power_off(struct generic_pm_domain
*domain
)
1419 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1421 return cpr_disable(drv
);
1424 static int cpr_power_on(struct generic_pm_domain
*domain
)
1426 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1428 return cpr_enable(drv
);
1431 static int cpr_pd_attach_dev(struct generic_pm_domain
*domain
,
1434 struct cpr_drv
*drv
= container_of(domain
, struct cpr_drv
, pd
);
1435 const struct acc_desc
*acc_desc
= drv
->acc_desc
;
1438 guard(mutex
)(&drv
->lock
);
1440 dev_dbg(drv
->dev
, "attach callback for: %s\n", dev_name(dev
));
1443 * This driver only supports scaling voltage for a CPU cluster
1444 * where all CPUs in the cluster share a single regulator.
1445 * Therefore, save the struct device pointer only for the first
1446 * CPU device that gets attached. There is no need to do any
1447 * additional initialization when further CPUs get attached.
1449 if (drv
->attached_cpu_dev
)
1453 * cpr_scale_voltage() requires the direction (if we are changing
1454 * to a higher or lower OPP). The first time
1455 * cpr_set_performance_state() is called, there is no previous
1456 * performance state defined. Therefore, we call
1457 * cpr_find_initial_corner() that gets the CPU clock frequency
1458 * set by the bootloader, so that we can determine the direction
1459 * the first time cpr_set_performance_state() is called.
1461 drv
->cpu_clk
= devm_clk_get(dev
, NULL
);
1462 if (IS_ERR(drv
->cpu_clk
))
1463 return dev_err_probe(drv
->dev
, PTR_ERR(drv
->cpu_clk
),
1464 "could not get cpu clk\n");
1466 drv
->attached_cpu_dev
= dev
;
1468 dev_dbg(drv
->dev
, "using cpu clk from: %s\n",
1469 dev_name(drv
->attached_cpu_dev
));
1472 * Everything related to (virtual) corners has to be initialized
1473 * here, when attaching to the power domain, since we need to know
1474 * the maximum frequency for each fuse corner, and this is only
1475 * available after the cpufreq driver has attached to us.
1476 * The reason for this is that we need to know the highest
1477 * frequency associated with each fuse corner.
1479 ret
= dev_pm_opp_get_opp_count(&drv
->pd
.dev
);
1481 dev_err(drv
->dev
, "could not get OPP count\n");
1484 drv
->num_corners
= ret
;
1486 if (drv
->num_corners
< 2) {
1487 dev_err(drv
->dev
, "need at least 2 OPPs to use CPR\n");
1491 drv
->corners
= devm_kcalloc(drv
->dev
, drv
->num_corners
,
1492 sizeof(*drv
->corners
),
1497 ret
= cpr_corner_init(drv
);
1501 cpr_set_loop_allowed(drv
);
1503 ret
= cpr_init_parameters(drv
);
1507 /* Configure CPR HW but keep it disabled */
1508 ret
= cpr_config(drv
);
1512 ret
= cpr_find_initial_corner(drv
);
1516 if (acc_desc
->config
)
1517 regmap_multi_reg_write(drv
->tcsr
, acc_desc
->config
,
1518 acc_desc
->num_regs_per_fuse
);
1520 /* Enable ACC if required */
1521 if (acc_desc
->enable_mask
)
1522 regmap_update_bits(drv
->tcsr
, acc_desc
->enable_reg
,
1523 acc_desc
->enable_mask
,
1524 acc_desc
->enable_mask
);
1526 dev_info(drv
->dev
, "driver initialized with %u OPPs\n",
1532 static int cpr_debug_info_show(struct seq_file
*s
, void *unused
)
1534 u32 gcnt
, ro_sel
, ctl
, irq_status
, reg
, error_steps
;
1535 u32 step_dn
, step_up
, error
, error_lt0
, busy
;
1536 struct cpr_drv
*drv
= s
->private;
1537 struct fuse_corner
*fuse_corner
;
1538 struct corner
*corner
;
1540 corner
= drv
->corner
;
1541 fuse_corner
= corner
->fuse_corner
;
1543 seq_printf(s
, "corner, current_volt = %d uV\n",
1546 ro_sel
= fuse_corner
->ring_osc_idx
;
1547 gcnt
= cpr_read(drv
, REG_RBCPR_GCNT_TARGET(ro_sel
));
1548 seq_printf(s
, "rbcpr_gcnt_target (%u) = %#02X\n", ro_sel
, gcnt
);
1550 ctl
= cpr_read(drv
, REG_RBCPR_CTL
);
1551 seq_printf(s
, "rbcpr_ctl = %#02X\n", ctl
);
1553 irq_status
= cpr_read(drv
, REG_RBIF_IRQ_STATUS
);
1554 seq_printf(s
, "rbcpr_irq_status = %#02X\n", irq_status
);
1556 reg
= cpr_read(drv
, REG_RBCPR_RESULT_0
);
1557 seq_printf(s
, "rbcpr_result_0 = %#02X\n", reg
);
1559 step_dn
= reg
& 0x01;
1560 step_up
= (reg
>> RBCPR_RESULT0_STEP_UP_SHIFT
) & 0x01;
1561 seq_printf(s
, " [step_dn = %u", step_dn
);
1563 seq_printf(s
, ", step_up = %u", step_up
);
1565 error_steps
= (reg
>> RBCPR_RESULT0_ERROR_STEPS_SHIFT
)
1566 & RBCPR_RESULT0_ERROR_STEPS_MASK
;
1567 seq_printf(s
, ", error_steps = %u", error_steps
);
1569 error
= (reg
>> RBCPR_RESULT0_ERROR_SHIFT
) & RBCPR_RESULT0_ERROR_MASK
;
1570 seq_printf(s
, ", error = %u", error
);
1572 error_lt0
= (reg
>> RBCPR_RESULT0_ERROR_LT0_SHIFT
) & 0x01;
1573 seq_printf(s
, ", error_lt_0 = %u", error_lt0
);
1575 busy
= (reg
>> RBCPR_RESULT0_BUSY_SHIFT
) & 0x01;
1576 seq_printf(s
, ", busy = %u]\n", busy
);
1580 DEFINE_SHOW_ATTRIBUTE(cpr_debug_info
);
1582 static void cpr_debugfs_init(struct cpr_drv
*drv
)
1584 drv
->debugfs
= debugfs_create_dir("qcom_cpr", NULL
);
1586 debugfs_create_file("debug_info", 0444, drv
->debugfs
,
1587 drv
, &cpr_debug_info_fops
);
1590 static int cpr_probe(struct platform_device
*pdev
)
1592 struct device
*dev
= &pdev
->dev
;
1593 struct cpr_drv
*drv
;
1595 const struct cpr_acc_desc
*data
;
1596 struct device_node
*np
;
1597 u32 cpr_rev
= FUSE_REVISION_UNKNOWN
;
1599 data
= of_device_get_match_data(dev
);
1600 if (!data
|| !data
->cpr_desc
|| !data
->acc_desc
)
1603 drv
= devm_kzalloc(dev
, sizeof(*drv
), GFP_KERNEL
);
1607 drv
->desc
= data
->cpr_desc
;
1608 drv
->acc_desc
= data
->acc_desc
;
1610 drv
->fuse_corners
= devm_kcalloc(dev
, drv
->desc
->num_fuse_corners
,
1611 sizeof(*drv
->fuse_corners
),
1613 if (!drv
->fuse_corners
)
1616 np
= of_parse_phandle(dev
->of_node
, "acc-syscon", 0);
1620 drv
->tcsr
= syscon_node_to_regmap(np
);
1622 if (IS_ERR(drv
->tcsr
))
1623 return PTR_ERR(drv
->tcsr
);
1625 drv
->base
= devm_platform_ioremap_resource(pdev
, 0);
1626 if (IS_ERR(drv
->base
))
1627 return PTR_ERR(drv
->base
);
1629 irq
= platform_get_irq(pdev
, 0);
1633 drv
->vdd_apc
= devm_regulator_get(dev
, "vdd-apc");
1634 if (IS_ERR(drv
->vdd_apc
))
1635 return PTR_ERR(drv
->vdd_apc
);
1638 * Initialize fuse corners, since it simply depends
1639 * on data in efuses.
1640 * Everything related to (virtual) corners has to be
1641 * initialized after attaching to the power domain,
1642 * since it depends on the CPU's OPP table.
1644 ret
= nvmem_cell_read_variable_le_u32(dev
, "cpr_fuse_revision", &cpr_rev
);
1648 drv
->cpr_fuses
= cpr_get_fuses(drv
);
1649 if (IS_ERR(drv
->cpr_fuses
))
1650 return PTR_ERR(drv
->cpr_fuses
);
1652 ret
= cpr_populate_ring_osc_idx(drv
);
1656 ret
= cpr_fuse_corner_init(drv
);
1660 mutex_init(&drv
->lock
);
1662 ret
= devm_request_threaded_irq(dev
, irq
, NULL
,
1664 IRQF_ONESHOT
| IRQF_TRIGGER_RISING
,
1669 drv
->pd
.name
= devm_kstrdup_const(dev
, dev
->of_node
->full_name
,
1674 drv
->pd
.power_off
= cpr_power_off
;
1675 drv
->pd
.power_on
= cpr_power_on
;
1676 drv
->pd
.set_performance_state
= cpr_set_performance_state
;
1677 drv
->pd
.attach_dev
= cpr_pd_attach_dev
;
1679 ret
= pm_genpd_init(&drv
->pd
, NULL
, true);
1683 ret
= of_genpd_add_provider_simple(dev
->of_node
, &drv
->pd
);
1685 goto err_remove_genpd
;
1687 platform_set_drvdata(pdev
, drv
);
1688 cpr_debugfs_init(drv
);
1693 pm_genpd_remove(&drv
->pd
);
1697 static void cpr_remove(struct platform_device
*pdev
)
1699 struct cpr_drv
*drv
= platform_get_drvdata(pdev
);
1701 if (cpr_is_allowed(drv
)) {
1702 cpr_ctl_disable(drv
);
1703 cpr_irq_set(drv
, 0);
1706 of_genpd_del_provider(pdev
->dev
.of_node
);
1707 pm_genpd_remove(&drv
->pd
);
1709 debugfs_remove_recursive(drv
->debugfs
);
1712 static const struct of_device_id cpr_match_table
[] = {
1713 { .compatible
= "qcom,qcs404-cpr", .data
= &qcs404_cpr_acc_desc
},
1716 MODULE_DEVICE_TABLE(of
, cpr_match_table
);
1718 static struct platform_driver cpr_driver
= {
1720 .remove_new
= cpr_remove
,
1723 .of_match_table
= cpr_match_table
,
1726 module_platform_driver(cpr_driver
);
1728 MODULE_DESCRIPTION("Core Power Reduction (CPR) driver");
1729 MODULE_LICENSE("GPL v2");