1 // SPDX-License-Identifier: GPL-2.0-only
3 * ARM-specific support for Broadcom STB S2/S3/S5 power management
5 * S2: clock gate CPUs and as many peripherals as possible
6 * S3: power off all of the chip except the Always ON (AON) island; keep DDR is
8 * S5: (a.k.a. S3 cold boot) much like S3, except DDR is powered down, so we
9 * treat this mode like a soft power-off, with wakeup allowed from AON
11 * Copyright © 2014-2017 Broadcom
14 #define pr_fmt(fmt) "brcmstb-pm: " fmt
16 #include <linux/bitops.h>
17 #include <linux/compiler.h>
18 #include <linux/delay.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/kconfig.h>
25 #include <linux/kernel.h>
26 #include <linux/memblock.h>
27 #include <linux/module.h>
28 #include <linux/notifier.h>
30 #include <linux/of_address.h>
31 #include <linux/platform_device.h>
33 #include <linux/printk.h>
34 #include <linux/proc_fs.h>
35 #include <linux/sizes.h>
36 #include <linux/slab.h>
37 #include <linux/sort.h>
38 #include <linux/suspend.h>
39 #include <linux/types.h>
40 #include <linux/uaccess.h>
41 #include <linux/soc/brcmstb/brcmstb.h>
43 #include <asm/fncpy.h>
44 #include <asm/setup.h>
45 #include <asm/suspend.h>
50 #define SHIMPHY_DDR_PAD_CNTRL 0x8c
53 #define SHIMPHY_PAD_PLL_SEQUENCE BIT(8)
54 #define SHIMPHY_PAD_GATE_PLL_S3 BIT(9)
57 #define PWRDWN_SEQ_NO_SEQUENCING 0
58 #define PWRDWN_SEQ_HOLD_CHANNEL 1
59 #define PWRDWN_SEQ_RESET_PLL 2
60 #define PWRDWN_SEQ_POWERDOWN_PLL 3
62 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK 0x00f00000
63 #define SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT 20
65 #define DDR_FORCE_CKE_RST_N BIT(3)
66 #define DDR_PHY_RST_N BIT(2)
67 #define DDR_PHY_CKE BIT(1)
69 #define DDR_PHY_NO_CHANNEL 0xffffffff
71 #define MAX_NUM_MEMC 3
74 void __iomem
*ddr_phy_base
;
75 void __iomem
*ddr_shimphy_base
;
76 void __iomem
*ddr_ctrl
;
79 struct brcmstb_pm_control
{
80 void __iomem
*aon_ctrl_base
;
81 void __iomem
*aon_sram
;
82 struct brcmstb_memc memcs
[MAX_NUM_MEMC
];
84 void __iomem
*boot_sram
;
87 bool support_warm_boot
;
88 size_t pll_status_offset
;
91 struct brcmstb_s3_params
*s3_params
;
92 dma_addr_t s3_params_pa
;
95 u32 phy_a_standby_ctrl_offs
;
96 u32 phy_b_standby_ctrl_offs
;
98 struct platform_device
*pdev
;
101 enum bsp_initiate_command
{
102 BSP_CLOCK_STOP
= 0x00,
103 BSP_GEN_RANDOM_KEY
= 0x4A,
104 BSP_RESTORE_RANDOM_KEY
= 0x55,
105 BSP_GEN_FIXED_KEY
= 0x63,
108 #define PM_INITIATE 0x01
109 #define PM_INITIATE_SUCCESS 0x00
110 #define PM_INITIATE_FAIL 0xfe
112 static struct brcmstb_pm_control ctrl
;
114 static int (*brcmstb_pm_do_s2_sram
)(void __iomem
*aon_ctrl_base
,
115 void __iomem
*ddr_phy_pll_status
);
117 static int brcmstb_init_sram(struct device_node
*dn
)
123 ret
= of_address_to_resource(dn
, 0, &res
);
127 /* Uncached, executable remapping of SRAM */
128 sram
= __arm_ioremap_exec(res
.start
, resource_size(&res
), false);
132 ctrl
.boot_sram
= sram
;
133 ctrl
.boot_sram_len
= resource_size(&res
);
138 static const struct of_device_id sram_dt_ids
[] = {
139 { .compatible
= "mmio-sram" },
143 static int do_bsp_initiate_command(enum bsp_initiate_command cmd
)
145 void __iomem
*base
= ctrl
.aon_ctrl_base
;
147 int timeo
= 1000 * 1000; /* 1 second */
149 writel_relaxed(0, base
+ AON_CTRL_PM_INITIATE
);
150 (void)readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
153 writel_relaxed((cmd
<< 1) | PM_INITIATE
, base
+ AON_CTRL_PM_INITIATE
);
156 * If firmware doesn't support the 'ack', then just assume it's done
157 * after 10ms. Note that this only works for command 0, BSP_CLOCK_STOP
159 if (of_machine_is_compatible("brcm,bcm74371a0")) {
160 (void)readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
166 ret
= readl_relaxed(base
+ AON_CTRL_PM_INITIATE
);
167 if (!(ret
& PM_INITIATE
))
170 pr_err("error: timeout waiting for BSP (%x)\n", ret
);
177 return (ret
& 0xff) != PM_INITIATE_SUCCESS
;
180 static int brcmstb_pm_handshake(void)
182 void __iomem
*base
= ctrl
.aon_ctrl_base
;
186 /* BSP power handshake, v1 */
187 tmp
= readl_relaxed(base
+ AON_CTRL_HOST_MISC_CMDS
);
189 writel_relaxed(tmp
, base
+ AON_CTRL_HOST_MISC_CMDS
);
190 (void)readl_relaxed(base
+ AON_CTRL_HOST_MISC_CMDS
);
192 ret
= do_bsp_initiate_command(BSP_CLOCK_STOP
);
194 pr_err("BSP handshake failed\n");
197 * HACK: BSP may have internal race on the CLOCK_STOP command.
198 * Avoid touching the BSP for a few milliseconds.
205 static inline void shimphy_set(u32 value
, u32 mask
)
209 if (!ctrl
.needs_ddr_pad
)
212 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
215 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_shimphy_base
+
216 SHIMPHY_DDR_PAD_CNTRL
);
217 tmp
= value
| (tmp
& mask
);
218 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_shimphy_base
+
219 SHIMPHY_DDR_PAD_CNTRL
);
221 wmb(); /* Complete sequence in order. */
224 static inline void ddr_ctrl_set(bool warmboot
)
228 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
231 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_ctrl
+
232 ctrl
.warm_boot_offset
);
236 tmp
&= ~1; /* Cold boot */
237 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_ctrl
+
238 ctrl
.warm_boot_offset
);
240 /* Complete sequence in order */
244 static inline void s3entry_method0(void)
246 shimphy_set(SHIMPHY_PAD_GATE_PLL_S3
| SHIMPHY_PAD_PLL_SEQUENCE
,
250 static inline void s3entry_method1(void)
255 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
256 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 1
258 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
259 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
260 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
265 static inline void s5entry_method1(void)
272 * Step 1: SHIMPHY_ADDR_CNTL_0_DDR_PAD_CNTRL [ S3_PWRDWN_SEQ ] = 3
273 * Step 2: MEMC_DDR_0_WARM_BOOT [ WARM_BOOT ] = 0
274 * Step 3: DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ CKE ] = 0
275 * DDR_PHY_CONTROL_REGS_[AB]_0_STANDBY_CONTROL[ RST_N ] = 0
277 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
278 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
279 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
283 for (i
= 0; i
< ctrl
.num_memc
; i
++) {
286 /* Step 3: Channel A (RST_N = CKE = 0) */
287 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_phy_base
+
288 ctrl
.phy_a_standby_ctrl_offs
);
289 tmp
&= ~(DDR_PHY_RST_N
| DDR_PHY_RST_N
);
290 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_phy_base
+
291 ctrl
.phy_a_standby_ctrl_offs
);
293 /* Step 3: Channel B? */
294 if (ctrl
.phy_b_standby_ctrl_offs
!= DDR_PHY_NO_CHANNEL
) {
295 tmp
= readl_relaxed(ctrl
.memcs
[i
].ddr_phy_base
+
296 ctrl
.phy_b_standby_ctrl_offs
);
297 tmp
&= ~(DDR_PHY_RST_N
| DDR_PHY_RST_N
);
298 writel_relaxed(tmp
, ctrl
.memcs
[i
].ddr_phy_base
+
299 ctrl
.phy_b_standby_ctrl_offs
);
307 * Run a Power Management State Machine (PMSM) shutdown command and put the CPU
308 * into a low-power mode
310 static void brcmstb_do_pmsm_power_down(unsigned long base_cmd
, bool onewrite
)
312 void __iomem
*base
= ctrl
.aon_ctrl_base
;
314 if ((ctrl
.s3entry_method
== 1) && (base_cmd
== PM_COLD_CONFIG
))
317 /* pm_start_pwrdn transition 0->1 */
318 writel_relaxed(base_cmd
, base
+ AON_CTRL_PM_CTRL
);
321 (void)readl_relaxed(base
+ AON_CTRL_PM_CTRL
);
323 writel_relaxed(base_cmd
| PM_PWR_DOWN
, base
+ AON_CTRL_PM_CTRL
);
324 (void)readl_relaxed(base
+ AON_CTRL_PM_CTRL
);
329 /* Support S5 cold boot out of "poweroff" */
330 static void brcmstb_pm_poweroff(void)
332 brcmstb_pm_handshake();
334 /* Clear magic S3 warm-boot value */
335 writel_relaxed(0, ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
336 (void)readl_relaxed(ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
338 /* Skip wait-for-interrupt signal; just use a countdown */
339 writel_relaxed(0x10, ctrl
.aon_ctrl_base
+ AON_CTRL_PM_CPU_WAIT_COUNT
);
340 (void)readl_relaxed(ctrl
.aon_ctrl_base
+ AON_CTRL_PM_CPU_WAIT_COUNT
);
342 if (ctrl
.s3entry_method
== 1) {
343 shimphy_set((PWRDWN_SEQ_POWERDOWN_PLL
<<
344 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
345 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
347 brcmstb_do_pmsm_power_down(M1_PM_COLD_CONFIG
, true);
348 return; /* We should never actually get here */
351 brcmstb_do_pmsm_power_down(PM_COLD_CONFIG
, false);
354 static void *brcmstb_pm_copy_to_sram(void *fn
, size_t len
)
356 unsigned int size
= ALIGN(len
, FNCPY_ALIGN
);
358 if (ctrl
.boot_sram_len
< size
) {
359 pr_err("standby code will not fit in SRAM\n");
363 return fncpy(ctrl
.boot_sram
, fn
, size
);
367 * S2 suspend/resume picks up where we left off, so we must execute carefully
368 * from SRAM, in order to allow DDR to come back up safely before we continue.
370 static int brcmstb_pm_s2(void)
372 /* A previous S3 can set a value hazardous to S2, so make sure. */
373 if (ctrl
.s3entry_method
== 1) {
374 shimphy_set((PWRDWN_SEQ_NO_SEQUENCING
<<
375 SHIMPHY_PAD_S3_PWRDWN_SEQ_SHIFT
),
376 ~SHIMPHY_PAD_S3_PWRDWN_SEQ_MASK
);
380 brcmstb_pm_do_s2_sram
= brcmstb_pm_copy_to_sram(&brcmstb_pm_do_s2
,
381 brcmstb_pm_do_s2_sz
);
382 if (!brcmstb_pm_do_s2_sram
)
385 return brcmstb_pm_do_s2_sram(ctrl
.aon_ctrl_base
,
386 ctrl
.memcs
[0].ddr_phy_base
+
387 ctrl
.pll_status_offset
);
391 * This function is called on a new stack, so don't allow inlining (which will
392 * generate stack references on the old stack). It cannot be made static because
393 * it is referenced from brcmstb_pm_s3()
395 noinline
int brcmstb_pm_s3_finish(void)
397 struct brcmstb_s3_params
*params
= ctrl
.s3_params
;
398 dma_addr_t params_pa
= ctrl
.s3_params_pa
;
399 phys_addr_t reentry
= virt_to_phys(&cpu_resume_arm
);
400 enum bsp_initiate_command cmd
;
404 * Clear parameter structure, but not DTU area, which has already been
405 * filled in. We know DTU is a the end, so we can just subtract its
408 memset(params
, 0, sizeof(*params
) - sizeof(params
->dtu
));
410 flags
= readl_relaxed(ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
412 flags
&= S3_BOOTLOADER_RESERVED
;
413 flags
|= S3_FLAG_NO_MEM_VERIFY
;
414 flags
|= S3_FLAG_LOAD_RANDKEY
;
416 /* Load random / fixed key */
417 if (flags
& S3_FLAG_LOAD_RANDKEY
)
418 cmd
= BSP_GEN_RANDOM_KEY
;
420 cmd
= BSP_GEN_FIXED_KEY
;
421 if (do_bsp_initiate_command(cmd
)) {
422 pr_info("key loading failed\n");
426 params
->magic
= BRCMSTB_S3_MAGIC
;
427 params
->reentry
= reentry
;
429 /* No more writes to DRAM */
432 flags
|= BRCMSTB_S3_MAGIC_SHORT
;
434 writel_relaxed(flags
, ctrl
.aon_sram
+ AON_REG_MAGIC_FLAGS
);
435 writel_relaxed(lower_32_bits(params_pa
),
436 ctrl
.aon_sram
+ AON_REG_CONTROL_LOW
);
437 writel_relaxed(upper_32_bits(params_pa
),
438 ctrl
.aon_sram
+ AON_REG_CONTROL_HIGH
);
440 switch (ctrl
.s3entry_method
) {
443 brcmstb_do_pmsm_power_down(PM_WARM_CONFIG
, false);
447 brcmstb_do_pmsm_power_down(M1_PM_WARM_CONFIG
, true);
453 /* Must have been interrupted from wfi()? */
457 static int brcmstb_pm_do_s3(unsigned long sp
)
459 unsigned long save_sp
;
465 "bl brcmstb_pm_s3_finish\n"
469 : [save
] "=&r" (save_sp
), [ret
] "=&r" (ret
)
476 static int brcmstb_pm_s3(void)
478 void __iomem
*sp
= ctrl
.boot_sram
+ ctrl
.boot_sram_len
;
480 return cpu_suspend((unsigned long)sp
, brcmstb_pm_do_s3
);
483 static int brcmstb_pm_standby(bool deep_standby
)
487 if (brcmstb_pm_handshake())
491 ret
= brcmstb_pm_s3();
493 ret
= brcmstb_pm_s2();
495 pr_err("%s: standby failed\n", __func__
);
500 static int brcmstb_pm_enter(suspend_state_t state
)
505 case PM_SUSPEND_STANDBY
:
506 ret
= brcmstb_pm_standby(false);
509 ret
= brcmstb_pm_standby(true);
516 static int brcmstb_pm_valid(suspend_state_t state
)
519 case PM_SUSPEND_STANDBY
:
522 return ctrl
.support_warm_boot
;
528 static const struct platform_suspend_ops brcmstb_pm_ops
= {
529 .enter
= brcmstb_pm_enter
,
530 .valid
= brcmstb_pm_valid
,
533 static const struct of_device_id aon_ctrl_dt_ids
[] = {
534 { .compatible
= "brcm,brcmstb-aon-ctrl" },
538 struct ddr_phy_ofdata
{
539 bool supports_warm_boot
;
540 size_t pll_status_offset
;
542 u32 warm_boot_offset
;
543 u32 phy_a_standby_ctrl_offs
;
544 u32 phy_b_standby_ctrl_offs
;
547 static struct ddr_phy_ofdata ddr_phy_71_1
= {
548 .supports_warm_boot
= true,
549 .pll_status_offset
= 0x0c,
551 .warm_boot_offset
= 0x2c,
552 .phy_a_standby_ctrl_offs
= 0x198,
553 .phy_b_standby_ctrl_offs
= DDR_PHY_NO_CHANNEL
556 static struct ddr_phy_ofdata ddr_phy_72_0
= {
557 .supports_warm_boot
= true,
558 .pll_status_offset
= 0x10,
560 .warm_boot_offset
= 0x40,
561 .phy_a_standby_ctrl_offs
= 0x2a4,
562 .phy_b_standby_ctrl_offs
= 0x8a4
565 static struct ddr_phy_ofdata ddr_phy_225_1
= {
566 .supports_warm_boot
= false,
567 .pll_status_offset
= 0x4,
571 static struct ddr_phy_ofdata ddr_phy_240_1
= {
572 .supports_warm_boot
= true,
573 .pll_status_offset
= 0x4,
577 static const struct of_device_id ddr_phy_dt_ids
[] = {
579 .compatible
= "brcm,brcmstb-ddr-phy-v71.1",
580 .data
= &ddr_phy_71_1
,
583 .compatible
= "brcm,brcmstb-ddr-phy-v72.0",
584 .data
= &ddr_phy_72_0
,
587 .compatible
= "brcm,brcmstb-ddr-phy-v225.1",
588 .data
= &ddr_phy_225_1
,
591 .compatible
= "brcm,brcmstb-ddr-phy-v240.1",
592 .data
= &ddr_phy_240_1
,
595 /* Same as v240.1, for the registers we care about */
596 .compatible
= "brcm,brcmstb-ddr-phy-v240.2",
597 .data
= &ddr_phy_240_1
,
602 struct ddr_seq_ofdata
{
604 u32 warm_boot_offset
;
607 static const struct ddr_seq_ofdata ddr_seq_b22
= {
608 .needs_ddr_pad
= false,
609 .warm_boot_offset
= 0x2c,
612 static const struct ddr_seq_ofdata ddr_seq
= {
613 .needs_ddr_pad
= true,
616 static const struct of_device_id ddr_shimphy_dt_ids
[] = {
617 { .compatible
= "brcm,brcmstb-ddr-shimphy-v1.0" },
621 static const struct of_device_id brcmstb_memc_of_match
[] = {
623 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.2.1",
627 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.2.2",
628 .data
= &ddr_seq_b22
,
631 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.2.3",
632 .data
= &ddr_seq_b22
,
635 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.3.0",
636 .data
= &ddr_seq_b22
,
639 .compatible
= "brcm,brcmstb-memc-ddr-rev-b.3.1",
640 .data
= &ddr_seq_b22
,
643 .compatible
= "brcm,brcmstb-memc-ddr",
649 static void __iomem
*brcmstb_ioremap_match(const struct of_device_id
*matches
,
650 int index
, const void **ofdata
)
652 struct device_node
*dn
;
653 const struct of_device_id
*match
;
655 dn
= of_find_matching_node_and_match(NULL
, matches
, &match
);
657 return ERR_PTR(-EINVAL
);
660 *ofdata
= match
->data
;
662 return of_io_request_and_map(dn
, index
, dn
->full_name
);
665 static int brcmstb_pm_panic_notify(struct notifier_block
*nb
,
666 unsigned long action
, void *data
)
668 writel_relaxed(BRCMSTB_PANIC_MAGIC
, ctrl
.aon_sram
+ AON_REG_PANIC
);
673 static struct notifier_block brcmstb_pm_panic_nb
= {
674 .notifier_call
= brcmstb_pm_panic_notify
,
677 static int brcmstb_pm_probe(struct platform_device
*pdev
)
679 const struct ddr_phy_ofdata
*ddr_phy_data
;
680 const struct ddr_seq_ofdata
*ddr_seq_data
;
681 const struct of_device_id
*of_id
= NULL
;
682 struct device_node
*dn
;
686 /* AON ctrl registers */
687 base
= brcmstb_ioremap_match(aon_ctrl_dt_ids
, 0, NULL
);
689 pr_err("error mapping AON_CTRL\n");
690 return PTR_ERR(base
);
692 ctrl
.aon_ctrl_base
= base
;
694 /* AON SRAM registers */
695 base
= brcmstb_ioremap_match(aon_ctrl_dt_ids
, 1, NULL
);
697 /* Assume standard offset */
698 ctrl
.aon_sram
= ctrl
.aon_ctrl_base
+
699 AON_CTRL_SYSTEM_DATA_RAM_OFS
;
701 ctrl
.aon_sram
= base
;
704 writel_relaxed(0, ctrl
.aon_sram
+ AON_REG_PANIC
);
706 /* DDR PHY registers */
707 base
= brcmstb_ioremap_match(ddr_phy_dt_ids
, 0,
708 (const void **)&ddr_phy_data
);
710 pr_err("error mapping DDR PHY\n");
711 return PTR_ERR(base
);
713 ctrl
.support_warm_boot
= ddr_phy_data
->supports_warm_boot
;
714 ctrl
.pll_status_offset
= ddr_phy_data
->pll_status_offset
;
715 /* Only need DDR PHY 0 for now? */
716 ctrl
.memcs
[0].ddr_phy_base
= base
;
717 ctrl
.s3entry_method
= ddr_phy_data
->s3entry_method
;
718 ctrl
.phy_a_standby_ctrl_offs
= ddr_phy_data
->phy_a_standby_ctrl_offs
;
719 ctrl
.phy_b_standby_ctrl_offs
= ddr_phy_data
->phy_b_standby_ctrl_offs
;
721 * Slightly grosss to use the phy ver to get a memc,
722 * offset but that is the only versioned things so far
725 ctrl
.warm_boot_offset
= ddr_phy_data
->warm_boot_offset
;
727 /* DDR SHIM-PHY registers */
728 for_each_matching_node(dn
, ddr_shimphy_dt_ids
) {
730 if (i
>= MAX_NUM_MEMC
) {
731 pr_warn("too many MEMCs (max %d)\n", MAX_NUM_MEMC
);
735 base
= of_io_request_and_map(dn
, 0, dn
->full_name
);
737 if (!ctrl
.support_warm_boot
)
740 pr_err("error mapping DDR SHIMPHY %d\n", i
);
741 return PTR_ERR(base
);
743 ctrl
.memcs
[i
].ddr_shimphy_base
= base
;
747 /* Sequencer DRAM Param and Control Registers */
749 for_each_matching_node(dn
, brcmstb_memc_of_match
) {
750 base
= of_iomap(dn
, 0);
752 pr_err("error mapping DDR Sequencer %d\n", i
);
756 of_id
= of_match_node(brcmstb_memc_of_match
, dn
);
762 ddr_seq_data
= of_id
->data
;
763 ctrl
.needs_ddr_pad
= ddr_seq_data
->needs_ddr_pad
;
764 /* Adjust warm boot offset based on the DDR sequencer */
765 if (ddr_seq_data
->warm_boot_offset
)
766 ctrl
.warm_boot_offset
= ddr_seq_data
->warm_boot_offset
;
768 ctrl
.memcs
[i
].ddr_ctrl
= base
;
772 pr_debug("PM: supports warm boot:%d, method:%d, wboffs:%x\n",
773 ctrl
.support_warm_boot
, ctrl
.s3entry_method
,
774 ctrl
.warm_boot_offset
);
776 dn
= of_find_matching_node(NULL
, sram_dt_ids
);
778 pr_err("SRAM not found\n");
782 ret
= brcmstb_init_sram(dn
);
784 pr_err("error setting up SRAM for PM\n");
790 ctrl
.s3_params
= kmalloc(sizeof(*ctrl
.s3_params
), GFP_KERNEL
);
793 ctrl
.s3_params_pa
= dma_map_single(&pdev
->dev
, ctrl
.s3_params
,
794 sizeof(*ctrl
.s3_params
),
796 if (dma_mapping_error(&pdev
->dev
, ctrl
.s3_params_pa
)) {
797 pr_err("error mapping DMA memory\n");
802 atomic_notifier_chain_register(&panic_notifier_list
,
803 &brcmstb_pm_panic_nb
);
805 pm_power_off
= brcmstb_pm_poweroff
;
806 suspend_set_ops(&brcmstb_pm_ops
);
811 kfree(ctrl
.s3_params
);
813 pr_warn("PM: initialization failed with code %d\n", ret
);
818 static struct platform_driver brcmstb_pm_driver
= {
820 .name
= "brcmstb-pm",
821 .of_match_table
= aon_ctrl_dt_ids
,
825 static int __init
brcmstb_pm_init(void)
827 return platform_driver_probe(&brcmstb_pm_driver
,
830 module_init(brcmstb_pm_init
);