2 * ARM-specific support for Broadcom STB S2/S3/S5 power management
4 * S2: clock gate CPUs and as many peripherals as possible
5 * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
7 * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
8 * treat this mode like a soft power-off, with wakeup allowed from AON
10 * Copyright © 2014-2017 Broadcom
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
22 #define pr_fmt(fmt) "brcmstb-pm: " fmt
24 #include <linux/bitops.h>
25 #include <linux/compiler.h>
26 #include <linux/delay.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/err.h>
29 #include <linux/init.h>
31 #include <linux/ioport.h>
32 #include <linux/kconfig.h>
33 #include <linux/kernel.h>
34 #include <linux/memblock.h>
35 #include <linux/module.h>
36 #include <linux/notifier.h>
38 #include <linux/of_address.h>
39 #include <linux/platform_device.h>
41 #include <linux/printk.h>
42 #include <linux/proc_fs.h>
43 #include <linux/sizes.h>
44 #include <linux/slab.h>
45 #include <linux/sort.h>
46 #include <linux/suspend.h>
47 #include <linux/types.h>
48 #include <linux/uaccess.h>
49 #include <linux/soc/brcmstb/brcmstb.h>
51 #include <asm/fncpy.h>
52 #include <asm/setup.h>
53 #include <asm/suspend.h>
58 #define SHIMPHY_DDR_PAD_CNTRL 0x8c
61 #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
62 #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
65 #define PWRDWN_SEQ_NO_SEQUENCING 0
66 #define PWRDWN_SEQ_HOLD_CHANNEL 1
67 #define PWRDWN_SEQ_RESET_PLL 2
68 #define PWRDWN_SEQ_POWERDOWN_PLL 3
70 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
71 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
73 #define DDR_FORCE_CKE_RST_N BIT(3)
74 #define DDR_PHY_RST_N BIT(2)
75 #define DDR_PHY_CKE BIT(1)
77 #define DDR_PHY_NO_CHANNEL 0xffffffff
79 #define MAX_NUM_MEMC 3
82 void __iomem
*ddr_phy_base
;
83 void __iomem
*ddr_shimphy_base
;
84 void __iomem
*ddr_ctrl
;
87 struct brcmstb_pm_control
{
88 void __iomem
*aon_ctrl_base
;
89 void __iomem
*aon_sram
;
90 struct brcmstb_memc memcs
[MAX_NUM_MEMC
];
92 void __iomem
*boot_sram
;
95 bool support_warm_boot
;
96 size_t pll_status_offset
;
99 struct brcmstb_s3_params
*s3_params
;
100 dma_addr_t s3_params_pa
;
102 u32 warm_boot_offset
;
103 u32 phy_a_standby_ctrl_offs
;
104 u32 phy_b_standby_ctrl_offs
;
106 struct platform_device
*pdev
;
109 enum bsp_initiate_command
{
110 BSP_CLOCK_STOP
= 0x00,
111 BSP_GEN_RANDOM_KEY
= 0x4A,
112 BSP_RESTORE_RANDOM_KEY
= 0x55,
113 BSP_GEN_FIXED_KEY
= 0x63,
116 #define PM_INITIATE 0x01
117 #define PM_INITIATE_SUCCESS 0x00
118 #define PM_INITIATE_FAIL 0xfe
120 static struct brcmstb_pm_control ctrl
;
122 static int (*brcmstb_pm_do_s2_sram
)(void __iomem
*aon_ctrl_base
,
123 void __iomem
*ddr_phy_pll_status
);
125 static int brcmstb_init_sram(struct device_node
*dn
)
131 ret
= of_address_to_resource(dn
, 0, &res
);
135 /* Uncached, executable remapping of SRAM */
136 sram
= __arm_ioremap_exec(res
.start
, resource_size(&res
), false);
140 ctrl
.boot_sram
= sram
;
141 ctrl
.boot_sram_len
= resource_size(&res
);
146 static const struct of_device_id sram_dt_ids
[] = {
147 { .compatible
= "mmio-sram" },
151 static int do_bsp_initiate_command(enum bsp_initiate_command cmd
)
153 void __iomem
*base
= ctrl
.aon_ctrl_base
;
155 int timeo
= 1000 * 1000; /* 1 second */
157 writel_relaxed(0, base
+ AON_CTRL_PM_INITIATE
);
158 (void)readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
161 writel_relaxed((cmd
<< 1) | PM_INITIATE
, base
+ AON_CTRL_PM_INITIATE
);
164 * If firmware doesn't support the 'ack', then just assume it's done
165 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
167 if (of_machine_is_compatible("brcm,bcm74371a0")) {
168 (void)readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
174 ret
= readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
175 if (!(ret
& PM_INITIATE
))
178 pr_err("error: timeout waiting for BSP (%x)\n", ret
);
185 return (ret
& 0xff) != PM_INITIATE_SUCCESS
;
188 static int brcmstb_pm_handshake(void)
190 void __iomem
*base
= ctrl
.aon_ctrl_base
;
194 /* BSP power handshake, v1 */
195 tmp
= readl_relaxed(base
+ AON_CTRL_HOST_MISC_CMDS
);
197 writel_relaxed(tmp
, base
+ AON_CTRL_HOST_MISC_CMDS
);
198 (void)readl_relaxed(base
+ AON_CTRL_HOST_MISC_CMDS
);
200 ret
= do_bsp_initiate_command(BSP_CLOCK_STOP
);
202 pr_err("BSP handshake failed\n");
205 * HACK: BSP may have internal race on the CLOCK_STOP command.
206 * Avoid touching the BSP for a few milliseconds.
213 static inline void shimphy_set(u32 value
, u32 mask
)
217 if (!ctrl
.needs_ddr_pad
)
220 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
223 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_shimphy_base
+
224 SHIMPHY_DDR_PAD_CNTRL
);
225 tmp
= value
| (tmp
& mask
);
226 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_shimphy_base
+
227 SHIMPHY_DDR_PAD_CNTRL
);
229 wmb(); /* Complete sequence in order. */
232 static inline void ddr_ctrl_set(bool warmboot
)
236 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
239 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_ctrl
+
240 ctrl
.warm_boot_offset
);
244 tmp
&= ~1; /* Cold boot */
245 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_ctrl
+
246 ctrl
.warm_boot_offset
);
248 /* Complete sequence in order */
252 static inline void s3entry_method0(void)
254 shimphy_set(SHIMPHY_PAD_GATE_PLL_S3
| SHIMPHY_PAD_PLL_SEQUENCE
,
258 static inline void s3entry_method1(void)
263 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
264 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
266 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
267 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
268 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
273 static inline void s5entry_method1(void)
280 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
281 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
282 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
283 * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
285 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
286 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
287 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
291 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
294 /* Step 3: Channel A (RST_N = CKE = 0) */
295 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_phy_base
+
296 ctrl
.phy_a_standby_ctrl_offs
);
297 tmp
&= ~(DDR_PHY_RST_N
| DDR_PHY_RST_N
);
298 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_phy_base
+
299 ctrl
.phy_a_standby_ctrl_offs
);
301 /* Step 3: Channel B? */
302 if (ctrl
.phy_b_standby_ctrl_offs
!= DDR_PHY_NO_CHANNEL
) {
303 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_phy_base
+
304 ctrl
.phy_b_standby_ctrl_offs
);
305 tmp
&= ~(DDR_PHY_RST_N
| DDR_PHY_RST_N
);
306 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_phy_base
+
307 ctrl
.phy_b_standby_ctrl_offs
);
315 * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
316 * into a low-power mode
318 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd
, bool onewrite
)
320 void __iomem
*base
= ctrl
.aon_ctrl_base
;
322 if ((ctrl
.s3entry_method
== 1) && (base_cmd
== PM_COLD_CONFIG
))
325 /* pm_start_pwrdn transition 0->1 */
326 writel_relaxed(base_cmd
, base
+ AON_CTRL_PM_CTRL
);
329 (void)readl_relaxed(base
+ AON_CTRL_PM_CTRL
);
331 writel_relaxed(base_cmd
| PM_PWR_DOWN
, base
+ AON_CTRL_PM_CTRL
);
332 (void)readl_relaxed(base
+ AON_CTRL_PM_CTRL
);
337 /* Support S5 cold boot out of "poweroff" */
338 static void brcmstb_pm_poweroff(void)
340 brcmstb_pm_handshake();
342 /* Clear magic S3 warm-boot value */
343 writel_relaxed(0, ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
344 (void)readl_relaxed(ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
346 /* Skip wait-for-interrupt signal; just use a countdown */
347 writel_relaxed(0x10, ctrl
.aon_ctrl_base
+ AON_CTRL_PM_CPU_WAIT_COUNT
);
348 (void)readl_relaxed(ctrl
.aon_ctrl_base
+ AON_CTRL_PM_CPU_WAIT_COUNT
);
350 if (ctrl
.s3entry_method
== 1) {
351 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
352 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
353 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
355 brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG
, true);
356 return; /* We should never actually get here */
359 brcmstb_do_pmsm_power_down(PM_COLD_CONFIG
, false);
362 static void *brcmstb_pm_copy_to_sram(void *fn
, size_t len
)
364 unsigned int size
= ALIGN(len
, FNCPY_ALIGN
);
366 if (ctrl
.boot_sram_len
< size
) {
367 pr_err("standby code will not fit in SRAM\n");
371 return fncpy(ctrl
.boot_sram
, fn
, size
);
375 * S2 suspend/resume picks up where we left off, so we must execute carefully
376 * from SRAM, in order to allow DDR to come back up safely before we continue.
378 static int brcmstb_pm_s2(void)
380 /* A previous S3 can set a value hazardous to S2, so make sure. */
381 if (ctrl
.s3entry_method
== 1) {
382 shimphy_set((PWRDWN_SEQ_NO_SEQUENCING
<<
383 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
384 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
388 brcmstb_pm_do_s2_sram
= brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2
,
389 brcmstb_pm_do_s2_sz
);
390 if (!brcmstb_pm_do_s2_sram
)
393 return brcmstb_pm_do_s2_sram(ctrl
.aon_ctrl_base
,
394 ctrl
.memcs
[0].ddr_phy_base
+
395 ctrl
.pll_status_offset
);
399 * This function is called on a new stack, so don't allow inlining (which will
400 * generate stack references on the old stack). It cannot be made static because
401 * it is referenced from brcmstb_pm_s3()
403 noinline
int brcmstb_pm_s3_finish(void)
405 struct brcmstb_s3_params
*params
= ctrl
.s3_params
;
406 dma_addr_t params_pa
= ctrl
.s3_params_pa
;
407 phys_addr_t reentry
= virt_to_phys(&cpu_resume
);
408 enum bsp_initiate_command cmd
;
412 * Clear parameter structure, but not DTU area, which has already been
413 * filled in. We know DTU is a the end, so we can just subtract its
416 memset(params
, 0, sizeof(*params
) - sizeof(params
->dtu
));
418 flags
= readl_relaxed(ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
420 flags
&= S3_BOOTLOADER_RESERVED
;
421 flags
|= S3_FLAG_NO_MEM_VERIFY
;
422 flags
|= S3_FLAG_LOAD_RANDKEY
;
424 /* Load random / fixed key */
425 if (flags
& S3_FLAG_LOAD_RANDKEY
)
426 cmd
= BSP_GEN_RANDOM_KEY
;
428 cmd
= BSP_GEN_FIXED_KEY
;
429 if (do_bsp_initiate_command(cmd
)) {
430 pr_info("key loading failed\n");
434 params
->magic
= BRCMSTB_S3_MAGIC
;
435 params
->reentry
= reentry
;
437 /* No more writes to DRAM */
440 flags
|= BRCMSTB_S3_MAGIC_SHORT
;
442 writel_relaxed(flags
, ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
443 writel_relaxed(lower_32_bits(params_pa
),
444 ctrl
.aon_sram
+ AON_REG_CONTROL_LOW
);
445 writel_relaxed(upper_32_bits(params_pa
),
446 ctrl
.aon_sram
+ AON_REG_CONTROL_HIGH
);
448 switch (ctrl
.s3entry_method
) {
451 brcmstb_do_pmsm_power_down(PM_WARM_CONFIG
, false);
455 brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG
, true);
461 /* Must have been interrupted from wfi()? */
465 static int brcmstb_pm_do_s3(unsigned long sp
)
467 unsigned long save_sp
;
473 "bl brcmstb_pm_s3_finish\n"
477 : [save
] "=&r" (save_sp
), [ret
] "=&r" (ret
)
484 static int brcmstb_pm_s3(void)
486 void __iomem
*sp
= ctrl
.boot_sram
+ ctrl
.boot_sram_len
;
488 return cpu_suspend((unsigned long)sp
, brcmstb_pm_do_s3
);
491 static int brcmstb_pm_standby(bool deep_standby
)
495 if (brcmstb_pm_handshake())
499 ret
= brcmstb_pm_s3();
501 ret
= brcmstb_pm_s2();
503 pr_err("%s: standby failed\n", __func__
);
508 static int brcmstb_pm_enter(suspend_state_t state
)
513 case PM_SUSPEND_STANDBY
:
514 ret
= brcmstb_pm_standby(false);
517 ret
= brcmstb_pm_standby(true);
524 static int brcmstb_pm_valid(suspend_state_t state
)
527 case PM_SUSPEND_STANDBY
:
530 return ctrl
.support_warm_boot
;
536 static const struct platform_suspend_ops brcmstb_pm_ops
= {
537 .enter
= brcmstb_pm_enter
,
538 .valid
= brcmstb_pm_valid
,
541 static const struct of_device_id aon_ctrl_dt_ids
[] = {
542 { .compatible
= "brcm,brcmstb-aon-ctrl" },
546 struct ddr_phy_ofdata
{
547 bool supports_warm_boot
;
548 size_t pll_status_offset
;
550 u32 warm_boot_offset
;
551 u32 phy_a_standby_ctrl_offs
;
552 u32 phy_b_standby_ctrl_offs
;
555 static struct ddr_phy_ofdata ddr_phy_71_1
= {
556 .supports_warm_boot
= true,
557 .pll_status_offset
= 0x0c,
559 .warm_boot_offset
= 0x2c,
560 .phy_a_standby_ctrl_offs
= 0x198,
561 .phy_b_standby_ctrl_offs
= DDR_PHY_NO_CHANNEL
564 static struct ddr_phy_ofdata ddr_phy_72_0
= {
565 .supports_warm_boot
= true,
566 .pll_status_offset
= 0x10,
568 .warm_boot_offset
= 0x40,
569 .phy_a_standby_ctrl_offs
= 0x2a4,
570 .phy_b_standby_ctrl_offs
= 0x8a4
573 static struct ddr_phy_ofdata ddr_phy_225_1
= {
574 .supports_warm_boot
= false,
575 .pll_status_offset
= 0x4,
579 static struct ddr_phy_ofdata ddr_phy_240_1
= {
580 .supports_warm_boot
= true,
581 .pll_status_offset
= 0x4,
585 static const struct of_device_id ddr_phy_dt_ids
[] = {
587 .compatible
= "brcm,brcmstb-ddr-phy-v71.1",
588 .data
= &ddr_phy_71_1
,
591 .compatible
= "brcm,brcmstb-ddr-phy-v72.0",
592 .data
= &ddr_phy_72_0
,
595 .compatible
= "brcm,brcmstb-ddr-phy-v225.1",
596 .data
= &ddr_phy_225_1
,
599 .compatible
= "brcm,brcmstb-ddr-phy-v240.1",
600 .data
= &ddr_phy_240_1
,
603 /* Same as v240.1, for the registers we care about */
604 .compatible
= "brcm,brcmstb-ddr-phy-v240.2",
605 .data
= &ddr_phy_240_1
,
610 struct ddr_seq_ofdata
{
612 u32 warm_boot_offset
;
615 static const struct ddr_seq_ofdata ddr_seq_b22
= {
616 .needs_ddr_pad
= false,
617 .warm_boot_offset
= 0x2c,
620 static const struct ddr_seq_ofdata ddr_seq
= {
621 .needs_ddr_pad
= true,
624 static const struct of_device_id ddr_shimphy_dt_ids
[] = {
625 { .compatible
= "brcm,brcmstb-ddr-shimphy-v1.0" },
629 static const struct of_device_id brcmstb_memc_of_match
[] = {
631 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.2.2",
632 .data
= &ddr_seq_b22
,
635 .compatible
= "brcm,brcmstb-memc-ddr",
641 static void __iomem
*brcmstb_ioremap_match(const struct of_device_id
*matches
,
642 int index
, const void **ofdata
)
644 struct device_node
*dn
;
645 const struct of_device_id
*match
;
647 dn
= of_find_matching_node_and_match(NULL
, matches
, &match
);
649 return ERR_PTR(-EINVAL
);
652 *ofdata
= match
->data
;
654 return of_io_request_and_map(dn
, index
, dn
->full_name
);
657 static int brcmstb_pm_panic_notify(struct notifier_block
*nb
,
658 unsigned long action
, void *data
)
660 writel_relaxed(BRCMSTB_PANIC_MAGIC
, ctrl
.aon_sram
+ AON_REG_PANIC
);
665 static struct notifier_block brcmstb_pm_panic_nb
= {
666 .notifier_call
= brcmstb_pm_panic_notify
,
669 static int brcmstb_pm_probe(struct platform_device
*pdev
)
671 const struct ddr_phy_ofdata
*ddr_phy_data
;
672 const struct ddr_seq_ofdata
*ddr_seq_data
;
673 const struct of_device_id
*of_id
= NULL
;
674 struct device_node
*dn
;
678 /* AON ctrl registers */
679 base
= brcmstb_ioremap_match(aon_ctrl_dt_ids
, 0, NULL
);
681 pr_err("error mapping AON_CTRL\n");
682 return PTR_ERR(base
);
684 ctrl
.aon_ctrl_base
= base
;
686 /* AON SRAM registers */
687 base
= brcmstb_ioremap_match(aon_ctrl_dt_ids
, 1, NULL
);
689 /* Assume standard offset */
690 ctrl
.aon_sram
= ctrl
.aon_ctrl_base
+
691 AON_CTRL_SYSTEM_DATA_RAM_OFS
;
693 ctrl
.aon_sram
= base
;
696 writel_relaxed(0, ctrl
.aon_sram
+ AON_REG_PANIC
);
698 /* DDR PHY registers */
699 base
= brcmstb_ioremap_match(ddr_phy_dt_ids
, 0,
700 (const void **)&ddr_phy_data
);
702 pr_err("error mapping DDR PHY\n");
703 return PTR_ERR(base
);
705 ctrl
.support_warm_boot
= ddr_phy_data
->supports_warm_boot
;
706 ctrl
.pll_status_offset
= ddr_phy_data
->pll_status_offset
;
707 /* Only need DDR PHY 0 for now? */
708 ctrl
.memcs
[0].ddr_phy_base
= base
;
709 ctrl
.s3entry_method
= ddr_phy_data
->s3entry_method
;
710 ctrl
.phy_a_standby_ctrl_offs
= ddr_phy_data
->phy_a_standby_ctrl_offs
;
711 ctrl
.phy_b_standby_ctrl_offs
= ddr_phy_data
->phy_b_standby_ctrl_offs
;
713 * Slightly grosss to use the phy ver to get a memc,
714 * offset but that is the only versioned things so far
717 ctrl
.warm_boot_offset
= ddr_phy_data
->warm_boot_offset
;
719 /* DDR SHIM-PHY registers */
720 for_each_matching_node(dn
, ddr_shimphy_dt_ids
) {
722 if (i
>= MAX_NUM_MEMC
) {
723 pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC
);
727 base
= of_io_request_and_map(dn
, 0, dn
->full_name
);
729 if (!ctrl
.support_warm_boot
)
732 pr_err("error mapping DDR SHIMPHY %d\n", i
);
733 return PTR_ERR(base
);
735 ctrl
.memcs
[i
].ddr_shimphy_base
= base
;
739 /* Sequencer DRAM Param and Control Registers */
741 for_each_matching_node(dn
, brcmstb_memc_of_match
) {
742 base
= of_iomap(dn
, 0);
744 pr_err("error mapping DDR Sequencer %d\n", i
);
748 of_id
= of_match_node(brcmstb_memc_of_match
, dn
);
754 ddr_seq_data
= of_id
->data
;
755 ctrl
.needs_ddr_pad
= ddr_seq_data
->needs_ddr_pad
;
756 /* Adjust warm boot offset based on the DDR sequencer */
757 if (ddr_seq_data
->warm_boot_offset
)
758 ctrl
.warm_boot_offset
= ddr_seq_data
->warm_boot_offset
;
760 ctrl
.memcs
[i
].ddr_ctrl
= base
;
764 pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
765 ctrl
.support_warm_boot
, ctrl
.s3entry_method
,
766 ctrl
.warm_boot_offset
);
768 dn
= of_find_matching_node(NULL
, sram_dt_ids
);
770 pr_err("SRAM not found\n");
774 ret
= brcmstb_init_sram(dn
);
776 pr_err("error setting up SRAM for PM\n");
782 ctrl
.s3_params
= kmalloc(sizeof(*ctrl
.s3_params
), GFP_KERNEL
);
785 ctrl
.s3_params_pa
= dma_map_single(&pdev
->dev
, ctrl
.s3_params
,
786 sizeof(*ctrl
.s3_params
),
788 if (dma_mapping_error(&pdev
->dev
, ctrl
.s3_params_pa
)) {
789 pr_err("error mapping DMA memory\n");
794 atomic_notifier_chain_register(&panic_notifier_list
,
795 &brcmstb_pm_panic_nb
);
797 pm_power_off
= brcmstb_pm_poweroff
;
798 suspend_set_ops(&brcmstb_pm_ops
);
803 kfree(ctrl
.s3_params
);
805 pr_warn("PM: initialization failed with code %d\n", ret
);
810 static struct platform_driver brcmstb_pm_driver
= {
812 .name
= "brcmstb-pm",
813 .of_match_table
= aon_ctrl_dt_ids
,
817 static int __init
brcmstb_pm_init(void)
819 return platform_driver_probe(&brcmstb_pm_driver
,
822 module_init(brcmstb_pm_init
);