2 * Suspend support specific for s390.
4 * Copyright IBM Corp. 2009
6 * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com>
10 #include <linux/suspend.h>
12 #include <linux/pci.h>
13 #include <asm/ctl_reg.h>
16 #include <asm/sections.h>
20 * The restore of the saved pages in an hibernation image will set
21 * the change and referenced bits in the storage key for each page.
22 * Overindication of the referenced bits after an hibernation cycle
23 * does not cause any harm but the overindication of the change bits
24 * would cause trouble.
25 * Use the ARCH_SAVE_PAGE_KEYS hooks to save the storage key of each
26 * page to the most significant byte of the associated page frame
27 * number in the hibernation image.
31 * Key storage is allocated as a linked list of pages.
32 * The size of the keys array is (PAGE_SIZE - sizeof(long))
34 struct page_key_data
{
35 struct page_key_data
*next
;
39 #define PAGE_KEY_DATA_SIZE (PAGE_SIZE - sizeof(struct page_key_data *))
41 static struct page_key_data
*page_key_data
;
42 static struct page_key_data
*page_key_rp
, *page_key_wp
;
43 static unsigned long page_key_rx
, page_key_wx
;
44 unsigned long suspend_zero_pages
;
47 * For each page in the hibernation image one additional byte is
48 * stored in the most significant byte of the page frame number.
49 * On suspend no additional memory is required but on resume the
50 * keys need to be memorized until the page data has been restored.
51 * Only then can the storage keys be set to their old state.
53 unsigned long page_key_additional_pages(unsigned long pages
)
55 return DIV_ROUND_UP(pages
, PAGE_KEY_DATA_SIZE
);
59 * Free page_key_data list of arrays.
61 void page_key_free(void)
63 struct page_key_data
*pkd
;
65 while (page_key_data
) {
67 page_key_data
= pkd
->next
;
68 free_page((unsigned long) pkd
);
73 * Allocate page_key_data list of arrays with enough room to store
74 * one byte for each page in the hibernation image.
76 int page_key_alloc(unsigned long pages
)
78 struct page_key_data
*pk
;
81 size
= DIV_ROUND_UP(pages
, PAGE_KEY_DATA_SIZE
);
83 pk
= (struct page_key_data
*) get_zeroed_page(GFP_KERNEL
);
88 pk
->next
= page_key_data
;
91 page_key_rp
= page_key_wp
= page_key_data
;
92 page_key_rx
= page_key_wx
= 0;
97 * Save the storage key into the upper 8 bits of the page frame number.
99 void page_key_read(unsigned long *pfn
)
103 addr
= (unsigned long) page_address(pfn_to_page(*pfn
));
104 *(unsigned char *) pfn
= (unsigned char) page_get_storage_key(addr
);
108 * Extract the storage key from the upper 8 bits of the page frame number
109 * and store it in the page_key_data list of arrays.
111 void page_key_memorize(unsigned long *pfn
)
113 page_key_wp
->data
[page_key_wx
] = *(unsigned char *) pfn
;
114 *(unsigned char *) pfn
= 0;
115 if (++page_key_wx
< PAGE_KEY_DATA_SIZE
)
117 page_key_wp
= page_key_wp
->next
;
122 * Get the next key from the page_key_data list of arrays and set the
123 * storage key of the page referred by @address. If @address refers to
124 * a "safe" page the swsusp_arch_resume code will transfer the storage
125 * key from the buffer page to the original page.
127 void page_key_write(void *address
)
129 page_set_storage_key((unsigned long) address
,
130 page_key_rp
->data
[page_key_rx
], 0);
131 if (++page_key_rx
>= PAGE_KEY_DATA_SIZE
)
133 page_key_rp
= page_key_rp
->next
;
137 int pfn_is_nosave(unsigned long pfn
)
139 unsigned long nosave_begin_pfn
= PFN_DOWN(__pa(&__nosave_begin
));
140 unsigned long nosave_end_pfn
= PFN_DOWN(__pa(&__nosave_end
));
141 unsigned long eshared_pfn
= PFN_DOWN(__pa(&_eshared
)) - 1;
142 unsigned long stext_pfn
= PFN_DOWN(__pa(&_stext
));
144 /* Always save lowcore pages (LC protection might be enabled). */
147 if (pfn
>= nosave_begin_pfn
&& pfn
< nosave_end_pfn
)
149 /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
150 if (pfn
>= stext_pfn
&& pfn
<= eshared_pfn
)
151 return ipl_info
.type
== IPL_TYPE_NSS
? 1 : 0;
152 if (tprot(PFN_PHYS(pfn
)))
158 * PM notifier callback for suspend
160 static int suspend_pm_cb(struct notifier_block
*nb
, unsigned long action
,
164 case PM_SUSPEND_PREPARE
:
165 case PM_HIBERNATION_PREPARE
:
166 suspend_zero_pages
= __get_free_pages(GFP_KERNEL
, LC_ORDER
);
167 if (!suspend_zero_pages
)
170 case PM_POST_SUSPEND
:
171 case PM_POST_HIBERNATION
:
172 free_pages(suspend_zero_pages
, LC_ORDER
);
180 static int __init
suspend_pm_init(void)
182 pm_notifier(suspend_pm_cb
, 0);
185 arch_initcall(suspend_pm_init
);
187 void save_processor_state(void)
189 /* swsusp_arch_suspend() actually saves all cpu register contents.
190 * Machine checks must be disabled since swsusp_arch_suspend() stores
191 * register contents to their lowcore save areas. That's the same
192 * place where register contents on machine checks would be saved.
193 * To avoid register corruption disable machine checks.
194 * We must also disable machine checks in the new psw mask for
195 * program checks, since swsusp_arch_suspend() may generate program
196 * checks. Disabling machine checks for all other new psw masks is
199 local_mcck_disable();
200 /* Disable lowcore protection */
201 __ctl_clear_bit(0,28);
202 S390_lowcore
.external_new_psw
.mask
&= ~PSW_MASK_MCHECK
;
203 S390_lowcore
.svc_new_psw
.mask
&= ~PSW_MASK_MCHECK
;
204 S390_lowcore
.io_new_psw
.mask
&= ~PSW_MASK_MCHECK
;
205 S390_lowcore
.program_new_psw
.mask
&= ~PSW_MASK_MCHECK
;
208 void restore_processor_state(void)
210 S390_lowcore
.external_new_psw
.mask
|= PSW_MASK_MCHECK
;
211 S390_lowcore
.svc_new_psw
.mask
|= PSW_MASK_MCHECK
;
212 S390_lowcore
.io_new_psw
.mask
|= PSW_MASK_MCHECK
;
213 S390_lowcore
.program_new_psw
.mask
|= PSW_MASK_MCHECK
;
214 /* Enable lowcore protection */
219 /* Called at the end of swsusp_arch_resume */
220 void s390_early_resume(void)
223 channel_subsystem_reinit();