1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
6 #include <linux/bitops.h>
7 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/ktime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/regmap.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/reset-controller.h>
17 #include <linux/slab.h>
20 #define PWR_ON_MASK BIT(31)
21 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
22 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
23 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
24 #define SW_OVERRIDE_MASK BIT(2)
25 #define HW_CONTROL_MASK BIT(1)
26 #define SW_COLLAPSE_MASK BIT(0)
27 #define GMEM_CLAMP_IO_MASK BIT(0)
28 #define GMEM_RESET_MASK BIT(4)
31 #define GDSC_POWER_UP_COMPLETE BIT(16)
32 #define GDSC_POWER_DOWN_COMPLETE BIT(15)
33 #define GDSC_RETAIN_FF_ENABLE BIT(11)
34 #define CFG_GDSCR_OFFSET 0x4
36 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
37 #define EN_REST_WAIT_VAL 0x2
38 #define EN_FEW_WAIT_VAL 0x8
39 #define CLK_DIS_WAIT_VAL 0x2
41 /* Transition delay shifts */
42 #define EN_REST_WAIT_SHIFT 20
43 #define EN_FEW_WAIT_SHIFT 16
44 #define CLK_DIS_WAIT_SHIFT 12
46 #define RETAIN_MEM BIT(14)
47 #define RETAIN_PERIPH BIT(13)
49 #define STATUS_POLL_TIMEOUT_US 1500
50 #define TIMEOUT_US 500
52 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
59 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
60 static int gdsc_check_status(struct gdsc
*sc
, enum gdsc_status status
)
66 if (sc
->flags
& POLL_CFG_GDSCR
)
67 reg
= sc
->gdscr
+ CFG_GDSCR_OFFSET
;
68 else if (sc
->gds_hw_ctrl
)
69 reg
= sc
->gds_hw_ctrl
;
73 ret
= regmap_read(sc
->regmap
, reg
, &val
);
77 if (sc
->flags
& POLL_CFG_GDSCR
) {
80 return !!(val
& GDSC_POWER_UP_COMPLETE
);
82 return !!(val
& GDSC_POWER_DOWN_COMPLETE
);
88 return !!(val
& PWR_ON_MASK
);
90 return !(val
& PWR_ON_MASK
);
96 static int gdsc_hwctrl(struct gdsc
*sc
, bool en
)
98 u32 val
= en
? HW_CONTROL_MASK
: 0;
100 return regmap_update_bits(sc
->regmap
, sc
->gdscr
, HW_CONTROL_MASK
, val
);
103 static int gdsc_poll_status(struct gdsc
*sc
, enum gdsc_status status
)
109 if (gdsc_check_status(sc
, status
))
111 } while (ktime_us_delta(ktime_get(), start
) < STATUS_POLL_TIMEOUT_US
);
113 if (gdsc_check_status(sc
, status
))
119 static int gdsc_update_collapse_bit(struct gdsc
*sc
, bool val
)
124 if (sc
->collapse_mask
) {
125 reg
= sc
->collapse_ctrl
;
126 mask
= sc
->collapse_mask
;
129 mask
= SW_COLLAPSE_MASK
;
132 ret
= regmap_update_bits(sc
->regmap
, reg
, mask
, val
? mask
: 0);
139 static int gdsc_toggle_logic(struct gdsc
*sc
, enum gdsc_status status
,
144 if (status
== GDSC_ON
&& sc
->rsupply
) {
145 ret
= regulator_enable(sc
->rsupply
);
150 ret
= gdsc_update_collapse_bit(sc
, status
== GDSC_OFF
);
152 /* If disabling votable gdscs, don't poll on status */
153 if ((sc
->flags
& VOTABLE
) && status
== GDSC_OFF
&& !wait
) {
155 * Add a short delay here to ensure that an enable
156 * right after it was disabled does not put it in an
163 if (sc
->gds_hw_ctrl
) {
165 * The gds hw controller asserts/de-asserts the status bit soon
166 * after it receives a power on/off request from a master.
167 * The controller then takes around 8 xo cycles to start its
168 * internal state machine and update the status bit. During
169 * this time, the status bit does not reflect the true status
171 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
172 * and polling the status bit.
177 ret
= gdsc_poll_status(sc
, status
);
178 WARN(ret
, "%s status stuck at 'o%s'", sc
->pd
.name
, status
? "ff" : "n");
180 if (!ret
&& status
== GDSC_OFF
&& sc
->rsupply
) {
181 ret
= regulator_disable(sc
->rsupply
);
189 static inline int gdsc_deassert_reset(struct gdsc
*sc
)
193 for (i
= 0; i
< sc
->reset_count
; i
++)
194 sc
->rcdev
->ops
->deassert(sc
->rcdev
, sc
->resets
[i
]);
198 static inline int gdsc_assert_reset(struct gdsc
*sc
)
202 for (i
= 0; i
< sc
->reset_count
; i
++)
203 sc
->rcdev
->ops
->assert(sc
->rcdev
, sc
->resets
[i
]);
207 static inline void gdsc_force_mem_on(struct gdsc
*sc
)
210 u32 mask
= RETAIN_MEM
;
212 if (!(sc
->flags
& NO_RET_PERIPH
))
213 mask
|= RETAIN_PERIPH
;
215 for (i
= 0; i
< sc
->cxc_count
; i
++)
216 regmap_update_bits(sc
->regmap
, sc
->cxcs
[i
], mask
, mask
);
219 static inline void gdsc_clear_mem_on(struct gdsc
*sc
)
222 u32 mask
= RETAIN_MEM
;
224 if (!(sc
->flags
& NO_RET_PERIPH
))
225 mask
|= RETAIN_PERIPH
;
227 for (i
= 0; i
< sc
->cxc_count
; i
++)
228 regmap_update_bits(sc
->regmap
, sc
->cxcs
[i
], mask
, 0);
231 static inline void gdsc_deassert_clamp_io(struct gdsc
*sc
)
233 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
234 GMEM_CLAMP_IO_MASK
, 0);
237 static inline void gdsc_assert_clamp_io(struct gdsc
*sc
)
239 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
240 GMEM_CLAMP_IO_MASK
, 1);
243 static inline void gdsc_assert_reset_aon(struct gdsc
*sc
)
245 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
248 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
252 static void gdsc_retain_ff_on(struct gdsc
*sc
)
254 u32 mask
= GDSC_RETAIN_FF_ENABLE
;
256 regmap_update_bits(sc
->regmap
, sc
->gdscr
, mask
, mask
);
259 static int gdsc_enable(struct generic_pm_domain
*domain
)
261 struct gdsc
*sc
= domain_to_gdsc(domain
);
264 if (sc
->pwrsts
== PWRSTS_ON
)
265 return gdsc_deassert_reset(sc
);
267 if (sc
->flags
& SW_RESET
) {
268 gdsc_assert_reset(sc
);
270 gdsc_deassert_reset(sc
);
273 if (sc
->flags
& CLAMP_IO
) {
274 if (sc
->flags
& AON_RESET
)
275 gdsc_assert_reset_aon(sc
);
276 gdsc_deassert_clamp_io(sc
);
279 ret
= gdsc_toggle_logic(sc
, GDSC_ON
, false);
283 if (sc
->pwrsts
& PWRSTS_OFF
)
284 gdsc_force_mem_on(sc
);
287 * If clocks to this power domain were already on, they will take an
288 * additional 4 clock cycles to re-enable after the power domain is
289 * enabled. Delay to account for this. A delay is also needed to ensure
290 * clocks are not enabled within 400ns of enabling power to the
295 /* Turn on HW trigger mode if supported */
296 if (sc
->flags
& HW_CTRL
) {
297 ret
= gdsc_hwctrl(sc
, true);
301 * Wait for the GDSC to go through a power down and
302 * up cycle. In case a firmware ends up polling status
303 * bits for the gdsc, it might read an 'on' status before
304 * the GDSC can finish the power cycle.
305 * We wait 1us before returning to ensure the firmware
306 * can't immediately poll the status bits.
311 if (sc
->flags
& RETAIN_FF_ENABLE
)
312 gdsc_retain_ff_on(sc
);
317 static int gdsc_disable(struct generic_pm_domain
*domain
)
319 struct gdsc
*sc
= domain_to_gdsc(domain
);
322 if (sc
->pwrsts
== PWRSTS_ON
)
323 return gdsc_assert_reset(sc
);
325 /* Turn off HW trigger mode if supported */
326 if (sc
->flags
& HW_CTRL
) {
327 ret
= gdsc_hwctrl(sc
, false);
331 * Wait for the GDSC to go through a power down and
332 * up cycle. In case we end up polling status
333 * bits for the gdsc before the power cycle is completed
334 * it might read an 'on' status wrongly.
338 ret
= gdsc_poll_status(sc
, GDSC_ON
);
343 if (sc
->pwrsts
& PWRSTS_OFF
)
344 gdsc_clear_mem_on(sc
);
347 * If the GDSC supports only a Retention state, apart from ON,
348 * leave it in ON state.
349 * There is no SW control to transition the GDSC into
350 * Retention state. This happens in HW when the parent
351 * domain goes down to a Low power state
353 if (sc
->pwrsts
== PWRSTS_RET_ON
)
356 ret
= gdsc_toggle_logic(sc
, GDSC_OFF
, domain
->synced_poweroff
);
360 if (sc
->flags
& CLAMP_IO
)
361 gdsc_assert_clamp_io(sc
);
366 static int gdsc_set_hwmode(struct generic_pm_domain
*domain
, struct device
*dev
, bool mode
)
368 struct gdsc
*sc
= domain_to_gdsc(domain
);
371 ret
= gdsc_hwctrl(sc
, mode
);
376 * Wait for the GDSC to go through a power down and
377 * up cycle. If we poll the status register before the
378 * power cycle is finished we might read incorrect values.
383 * When the GDSC is switched to HW mode, HW can disable the GDSC.
384 * When the GDSC is switched back to SW mode, the GDSC will be enabled
385 * again, hence we need to poll for GDSC to complete the power up.
388 return gdsc_poll_status(sc
, GDSC_ON
);
393 static bool gdsc_get_hwmode(struct generic_pm_domain
*domain
, struct device
*dev
)
395 struct gdsc
*sc
= domain_to_gdsc(domain
);
398 regmap_read(sc
->regmap
, sc
->gdscr
, &val
);
400 return !!(val
& HW_CONTROL_MASK
);
403 static int gdsc_init(struct gdsc
*sc
)
409 * Disable HW trigger: collapse/restore occur based on registers writes.
410 * Disable SW override: Use hardware state-machine for sequencing.
411 * Configure wait time between states.
413 mask
= HW_CONTROL_MASK
| SW_OVERRIDE_MASK
|
414 EN_REST_WAIT_MASK
| EN_FEW_WAIT_MASK
| CLK_DIS_WAIT_MASK
;
416 if (!sc
->en_rest_wait_val
)
417 sc
->en_rest_wait_val
= EN_REST_WAIT_VAL
;
418 if (!sc
->en_few_wait_val
)
419 sc
->en_few_wait_val
= EN_FEW_WAIT_VAL
;
420 if (!sc
->clk_dis_wait_val
)
421 sc
->clk_dis_wait_val
= CLK_DIS_WAIT_VAL
;
423 val
= sc
->en_rest_wait_val
<< EN_REST_WAIT_SHIFT
|
424 sc
->en_few_wait_val
<< EN_FEW_WAIT_SHIFT
|
425 sc
->clk_dis_wait_val
<< CLK_DIS_WAIT_SHIFT
;
427 ret
= regmap_update_bits(sc
->regmap
, sc
->gdscr
, mask
, val
);
431 /* Force gdsc ON if only ON state is supported */
432 if (sc
->pwrsts
== PWRSTS_ON
) {
433 ret
= gdsc_toggle_logic(sc
, GDSC_ON
, false);
438 on
= gdsc_check_status(sc
, GDSC_ON
);
443 /* The regulator must be on, sync the kernel state */
445 ret
= regulator_enable(sc
->rsupply
);
451 * Votable GDSCs can be ON due to Vote from other masters.
452 * If a Votable GDSC is ON, make sure we have a Vote.
454 if (sc
->flags
& VOTABLE
) {
455 ret
= gdsc_update_collapse_bit(sc
, false);
457 goto err_disable_supply
;
460 /* Turn on HW trigger mode if supported */
461 if (sc
->flags
& HW_CTRL
) {
462 ret
= gdsc_hwctrl(sc
, true);
464 goto err_disable_supply
;
468 * Make sure the retain bit is set if the GDSC is already on,
469 * otherwise we end up turning off the GDSC and destroying all
470 * the register contents that we thought we were saving.
472 if (sc
->flags
& RETAIN_FF_ENABLE
)
473 gdsc_retain_ff_on(sc
);
474 } else if (sc
->flags
& ALWAYS_ON
) {
475 /* If ALWAYS_ON GDSCs are not ON, turn them ON */
476 gdsc_enable(&sc
->pd
);
480 if (on
|| (sc
->pwrsts
& PWRSTS_RET
))
481 gdsc_force_mem_on(sc
);
483 gdsc_clear_mem_on(sc
);
485 if (sc
->flags
& ALWAYS_ON
)
486 sc
->pd
.flags
|= GENPD_FLAG_ALWAYS_ON
;
487 if (!sc
->pd
.power_off
)
488 sc
->pd
.power_off
= gdsc_disable
;
489 if (!sc
->pd
.power_on
)
490 sc
->pd
.power_on
= gdsc_enable
;
491 if (sc
->flags
& HW_CTRL_TRIGGER
) {
492 sc
->pd
.set_hwmode_dev
= gdsc_set_hwmode
;
493 sc
->pd
.get_hwmode_dev
= gdsc_get_hwmode
;
496 ret
= pm_genpd_init(&sc
->pd
, NULL
, !on
);
498 goto err_disable_supply
;
503 if (on
&& sc
->rsupply
)
504 regulator_disable(sc
->rsupply
);
509 int gdsc_register(struct gdsc_desc
*desc
,
510 struct reset_controller_dev
*rcdev
, struct regmap
*regmap
)
513 struct genpd_onecell_data
*data
;
514 struct device
*dev
= desc
->dev
;
515 struct gdsc
**scs
= desc
->scs
;
516 size_t num
= desc
->num
;
518 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
522 data
->domains
= devm_kcalloc(dev
, num
, sizeof(*data
->domains
),
527 for (i
= 0; i
< num
; i
++) {
528 if (!scs
[i
] || !scs
[i
]->supply
)
531 scs
[i
]->rsupply
= devm_regulator_get_optional(dev
, scs
[i
]->supply
);
532 if (IS_ERR(scs
[i
]->rsupply
)) {
533 ret
= PTR_ERR(scs
[i
]->rsupply
);
537 scs
[i
]->rsupply
= NULL
;
541 data
->num_domains
= num
;
542 for (i
= 0; i
< num
; i
++) {
545 scs
[i
]->regmap
= regmap
;
546 scs
[i
]->rcdev
= rcdev
;
547 ret
= gdsc_init(scs
[i
]);
550 data
->domains
[i
] = &scs
[i
]->pd
;
554 for (i
= 0; i
< num
; i
++) {
558 pm_genpd_add_subdomain(scs
[i
]->parent
, &scs
[i
]->pd
);
559 else if (!IS_ERR_OR_NULL(dev
->pm_domain
))
560 pm_genpd_add_subdomain(pd_to_genpd(dev
->pm_domain
), &scs
[i
]->pd
);
563 return of_genpd_add_provider_onecell(dev
->of_node
, data
);
566 void gdsc_unregister(struct gdsc_desc
*desc
)
569 struct device
*dev
= desc
->dev
;
570 struct gdsc
**scs
= desc
->scs
;
571 size_t num
= desc
->num
;
573 /* Remove subdomains */
574 for (i
= 0; i
< num
; i
++) {
578 pm_genpd_remove_subdomain(scs
[i
]->parent
, &scs
[i
]->pd
);
579 else if (!IS_ERR_OR_NULL(dev
->pm_domain
))
580 pm_genpd_remove_subdomain(pd_to_genpd(dev
->pm_domain
), &scs
[i
]->pd
);
582 of_genpd_del_provider(dev
->of_node
);
586 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
587 * running in the CX domain so the CPU doesn't need to know anything about the
588 * GX domain EXCEPT....
590 * Hardware constraints dictate that the GX be powered down before the CX. If
591 * the GMU crashes it could leave the GX on. In order to successfully bring back
592 * the device the CPU needs to disable the GX headswitch. There being no sane
593 * way to reach in and touch that register from deep inside the GPU driver we
594 * need to set up the infrastructure to be able to ensure that the GPU can
595 * ensure that the GX is off during this super special case. We do this by
596 * defining a GX gdsc with a dummy enable function and a "default" disable
599 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
600 * driver. During power up, nothing will happen from the CPU (and the GMU will
601 * power up normally but during power down this will ensure that the GX domain
602 * is *really* off - this gives us a semi standard way of doing what we need.
604 int gdsc_gx_do_nothing_enable(struct generic_pm_domain
*domain
)
606 struct gdsc
*sc
= domain_to_gdsc(domain
);
609 /* Enable the parent supply, when controlled through the regulator framework. */
611 ret
= regulator_enable(sc
->rsupply
);
613 /* Do nothing with the GDSC itself */
617 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable
);