1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, 2017-2018, The Linux Foundation. All rights reserved.
6 #include <linux/bitops.h>
7 #include <linux/delay.h>
9 #include <linux/export.h>
10 #include <linux/jiffies.h>
11 #include <linux/kernel.h>
12 #include <linux/ktime.h>
13 #include <linux/pm_domain.h>
14 #include <linux/regmap.h>
15 #include <linux/regulator/consumer.h>
16 #include <linux/reset-controller.h>
17 #include <linux/slab.h>
20 #define PWR_ON_MASK BIT(31)
21 #define EN_REST_WAIT_MASK GENMASK_ULL(23, 20)
22 #define EN_FEW_WAIT_MASK GENMASK_ULL(19, 16)
23 #define CLK_DIS_WAIT_MASK GENMASK_ULL(15, 12)
24 #define SW_OVERRIDE_MASK BIT(2)
25 #define HW_CONTROL_MASK BIT(1)
26 #define SW_COLLAPSE_MASK BIT(0)
27 #define GMEM_CLAMP_IO_MASK BIT(0)
28 #define GMEM_RESET_MASK BIT(4)
31 #define GDSC_POWER_UP_COMPLETE BIT(16)
32 #define GDSC_POWER_DOWN_COMPLETE BIT(15)
33 #define GDSC_RETAIN_FF_ENABLE BIT(11)
34 #define CFG_GDSCR_OFFSET 0x4
36 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
37 #define EN_REST_WAIT_VAL (0x2 << 20)
38 #define EN_FEW_WAIT_VAL (0x8 << 16)
39 #define CLK_DIS_WAIT_VAL (0x2 << 12)
41 #define RETAIN_MEM BIT(14)
42 #define RETAIN_PERIPH BIT(13)
44 #define TIMEOUT_US 500
46 #define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
53 /* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
54 static int gdsc_check_status(struct gdsc
*sc
, enum gdsc_status status
)
60 if (sc
->flags
& POLL_CFG_GDSCR
)
61 reg
= sc
->gdscr
+ CFG_GDSCR_OFFSET
;
62 else if (sc
->gds_hw_ctrl
)
63 reg
= sc
->gds_hw_ctrl
;
67 ret
= regmap_read(sc
->regmap
, reg
, &val
);
71 if (sc
->flags
& POLL_CFG_GDSCR
) {
74 return !!(val
& GDSC_POWER_UP_COMPLETE
);
76 return !!(val
& GDSC_POWER_DOWN_COMPLETE
);
82 return !!(val
& PWR_ON_MASK
);
84 return !(val
& PWR_ON_MASK
);
90 static int gdsc_hwctrl(struct gdsc
*sc
, bool en
)
92 u32 val
= en
? HW_CONTROL_MASK
: 0;
94 return regmap_update_bits(sc
->regmap
, sc
->gdscr
, HW_CONTROL_MASK
, val
);
97 static int gdsc_poll_status(struct gdsc
*sc
, enum gdsc_status status
)
103 if (gdsc_check_status(sc
, status
))
105 } while (ktime_us_delta(ktime_get(), start
) < TIMEOUT_US
);
107 if (gdsc_check_status(sc
, status
))
113 static int gdsc_toggle_logic(struct gdsc
*sc
, enum gdsc_status status
)
116 u32 val
= (status
== GDSC_ON
) ? 0 : SW_COLLAPSE_MASK
;
118 if (status
== GDSC_ON
&& sc
->rsupply
) {
119 ret
= regulator_enable(sc
->rsupply
);
124 ret
= regmap_update_bits(sc
->regmap
, sc
->gdscr
, SW_COLLAPSE_MASK
, val
);
128 /* If disabling votable gdscs, don't poll on status */
129 if ((sc
->flags
& VOTABLE
) && status
== GDSC_OFF
) {
131 * Add a short delay here to ensure that an enable
132 * right after it was disabled does not put it in an
139 if (sc
->gds_hw_ctrl
) {
141 * The gds hw controller asserts/de-asserts the status bit soon
142 * after it receives a power on/off request from a master.
143 * The controller then takes around 8 xo cycles to start its
144 * internal state machine and update the status bit. During
145 * this time, the status bit does not reflect the true status
147 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
148 * and polling the status bit.
153 ret
= gdsc_poll_status(sc
, status
);
154 WARN(ret
, "%s status stuck at 'o%s'", sc
->pd
.name
, status
? "ff" : "n");
156 if (!ret
&& status
== GDSC_OFF
&& sc
->rsupply
) {
157 ret
= regulator_disable(sc
->rsupply
);
165 static inline int gdsc_deassert_reset(struct gdsc
*sc
)
169 for (i
= 0; i
< sc
->reset_count
; i
++)
170 sc
->rcdev
->ops
->deassert(sc
->rcdev
, sc
->resets
[i
]);
174 static inline int gdsc_assert_reset(struct gdsc
*sc
)
178 for (i
= 0; i
< sc
->reset_count
; i
++)
179 sc
->rcdev
->ops
->assert(sc
->rcdev
, sc
->resets
[i
]);
183 static inline void gdsc_force_mem_on(struct gdsc
*sc
)
186 u32 mask
= RETAIN_MEM
| RETAIN_PERIPH
;
188 for (i
= 0; i
< sc
->cxc_count
; i
++)
189 regmap_update_bits(sc
->regmap
, sc
->cxcs
[i
], mask
, mask
);
192 static inline void gdsc_clear_mem_on(struct gdsc
*sc
)
195 u32 mask
= RETAIN_MEM
| RETAIN_PERIPH
;
197 for (i
= 0; i
< sc
->cxc_count
; i
++)
198 regmap_update_bits(sc
->regmap
, sc
->cxcs
[i
], mask
, 0);
201 static inline void gdsc_deassert_clamp_io(struct gdsc
*sc
)
203 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
204 GMEM_CLAMP_IO_MASK
, 0);
207 static inline void gdsc_assert_clamp_io(struct gdsc
*sc
)
209 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
210 GMEM_CLAMP_IO_MASK
, 1);
213 static inline void gdsc_assert_reset_aon(struct gdsc
*sc
)
215 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
218 regmap_update_bits(sc
->regmap
, sc
->clamp_io_ctrl
,
222 static void gdsc_retain_ff_on(struct gdsc
*sc
)
224 u32 mask
= GDSC_RETAIN_FF_ENABLE
;
226 regmap_update_bits(sc
->regmap
, sc
->gdscr
, mask
, mask
);
229 static int gdsc_enable(struct generic_pm_domain
*domain
)
231 struct gdsc
*sc
= domain_to_gdsc(domain
);
234 if (sc
->pwrsts
== PWRSTS_ON
)
235 return gdsc_deassert_reset(sc
);
237 if (sc
->flags
& SW_RESET
) {
238 gdsc_assert_reset(sc
);
240 gdsc_deassert_reset(sc
);
243 if (sc
->flags
& CLAMP_IO
) {
244 if (sc
->flags
& AON_RESET
)
245 gdsc_assert_reset_aon(sc
);
246 gdsc_deassert_clamp_io(sc
);
249 ret
= gdsc_toggle_logic(sc
, GDSC_ON
);
253 if (sc
->pwrsts
& PWRSTS_OFF
)
254 gdsc_force_mem_on(sc
);
257 * If clocks to this power domain were already on, they will take an
258 * additional 4 clock cycles to re-enable after the power domain is
259 * enabled. Delay to account for this. A delay is also needed to ensure
260 * clocks are not enabled within 400ns of enabling power to the
265 /* Turn on HW trigger mode if supported */
266 if (sc
->flags
& HW_CTRL
) {
267 ret
= gdsc_hwctrl(sc
, true);
271 * Wait for the GDSC to go through a power down and
272 * up cycle. In case a firmware ends up polling status
273 * bits for the gdsc, it might read an 'on' status before
274 * the GDSC can finish the power cycle.
275 * We wait 1us before returning to ensure the firmware
276 * can't immediately poll the status bits.
281 if (sc
->flags
& RETAIN_FF_ENABLE
)
282 gdsc_retain_ff_on(sc
);
287 static int gdsc_disable(struct generic_pm_domain
*domain
)
289 struct gdsc
*sc
= domain_to_gdsc(domain
);
292 if (sc
->pwrsts
== PWRSTS_ON
)
293 return gdsc_assert_reset(sc
);
295 /* Turn off HW trigger mode if supported */
296 if (sc
->flags
& HW_CTRL
) {
297 ret
= gdsc_hwctrl(sc
, false);
301 * Wait for the GDSC to go through a power down and
302 * up cycle. In case we end up polling status
303 * bits for the gdsc before the power cycle is completed
304 * it might read an 'on' status wrongly.
308 ret
= gdsc_poll_status(sc
, GDSC_ON
);
313 if (sc
->pwrsts
& PWRSTS_OFF
)
314 gdsc_clear_mem_on(sc
);
316 ret
= gdsc_toggle_logic(sc
, GDSC_OFF
);
320 if (sc
->flags
& CLAMP_IO
)
321 gdsc_assert_clamp_io(sc
);
326 static int gdsc_init(struct gdsc
*sc
)
332 * Disable HW trigger: collapse/restore occur based on registers writes.
333 * Disable SW override: Use hardware state-machine for sequencing.
334 * Configure wait time between states.
336 mask
= HW_CONTROL_MASK
| SW_OVERRIDE_MASK
|
337 EN_REST_WAIT_MASK
| EN_FEW_WAIT_MASK
| CLK_DIS_WAIT_MASK
;
338 val
= EN_REST_WAIT_VAL
| EN_FEW_WAIT_VAL
| CLK_DIS_WAIT_VAL
;
339 ret
= regmap_update_bits(sc
->regmap
, sc
->gdscr
, mask
, val
);
343 /* Force gdsc ON if only ON state is supported */
344 if (sc
->pwrsts
== PWRSTS_ON
) {
345 ret
= gdsc_toggle_logic(sc
, GDSC_ON
);
350 on
= gdsc_check_status(sc
, GDSC_ON
);
355 * Votable GDSCs can be ON due to Vote from other masters.
356 * If a Votable GDSC is ON, make sure we have a Vote.
358 if ((sc
->flags
& VOTABLE
) && on
)
359 gdsc_enable(&sc
->pd
);
362 * Make sure the retain bit is set if the GDSC is already on, otherwise
363 * we end up turning off the GDSC and destroying all the register
364 * contents that we thought we were saving.
366 if ((sc
->flags
& RETAIN_FF_ENABLE
) && on
)
367 gdsc_retain_ff_on(sc
);
369 /* If ALWAYS_ON GDSCs are not ON, turn them ON */
370 if (sc
->flags
& ALWAYS_ON
) {
372 gdsc_enable(&sc
->pd
);
374 sc
->pd
.flags
|= GENPD_FLAG_ALWAYS_ON
;
377 if (on
|| (sc
->pwrsts
& PWRSTS_RET
))
378 gdsc_force_mem_on(sc
);
380 gdsc_clear_mem_on(sc
);
382 if (!sc
->pd
.power_off
)
383 sc
->pd
.power_off
= gdsc_disable
;
384 if (!sc
->pd
.power_on
)
385 sc
->pd
.power_on
= gdsc_enable
;
386 pm_genpd_init(&sc
->pd
, NULL
, !on
);
391 int gdsc_register(struct gdsc_desc
*desc
,
392 struct reset_controller_dev
*rcdev
, struct regmap
*regmap
)
395 struct genpd_onecell_data
*data
;
396 struct device
*dev
= desc
->dev
;
397 struct gdsc
**scs
= desc
->scs
;
398 size_t num
= desc
->num
;
400 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
404 data
->domains
= devm_kcalloc(dev
, num
, sizeof(*data
->domains
),
409 for (i
= 0; i
< num
; i
++) {
410 if (!scs
[i
] || !scs
[i
]->supply
)
413 scs
[i
]->rsupply
= devm_regulator_get(dev
, scs
[i
]->supply
);
414 if (IS_ERR(scs
[i
]->rsupply
))
415 return PTR_ERR(scs
[i
]->rsupply
);
418 data
->num_domains
= num
;
419 for (i
= 0; i
< num
; i
++) {
422 scs
[i
]->regmap
= regmap
;
423 scs
[i
]->rcdev
= rcdev
;
424 ret
= gdsc_init(scs
[i
]);
427 data
->domains
[i
] = &scs
[i
]->pd
;
431 for (i
= 0; i
< num
; i
++) {
435 pm_genpd_add_subdomain(scs
[i
]->parent
, &scs
[i
]->pd
);
438 return of_genpd_add_provider_onecell(dev
->of_node
, data
);
441 void gdsc_unregister(struct gdsc_desc
*desc
)
444 struct device
*dev
= desc
->dev
;
445 struct gdsc
**scs
= desc
->scs
;
446 size_t num
= desc
->num
;
448 /* Remove subdomains */
449 for (i
= 0; i
< num
; i
++) {
453 pm_genpd_remove_subdomain(scs
[i
]->parent
, &scs
[i
]->pd
);
455 of_genpd_del_provider(dev
->of_node
);
459 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
460 * running in the CX domain so the CPU doesn't need to know anything about the
461 * GX domain EXCEPT....
463 * Hardware constraints dictate that the GX be powered down before the CX. If
464 * the GMU crashes it could leave the GX on. In order to successfully bring back
465 * the device the CPU needs to disable the GX headswitch. There being no sane
466 * way to reach in and touch that register from deep inside the GPU driver we
467 * need to set up the infrastructure to be able to ensure that the GPU can
468 * ensure that the GX is off during this super special case. We do this by
469 * defining a GX gdsc with a dummy enable function and a "default" disable
472 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
473 * driver. During power up, nothing will happen from the CPU (and the GMU will
474 * power up normally but during power down this will ensure that the GX domain
475 * is *really* off - this gives us a semi standard way of doing what we need.
477 int gdsc_gx_do_nothing_enable(struct generic_pm_domain
*domain
)
479 /* Do nothing but give genpd the impression that we were successful */
482 EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable
);