signal/blackfin: Move the blackfin specific si_codes to asm-generic/siginfo.h
[cris-mirror.git] / arch / arm64 / kernel / hibernate.c
blob3009b8b80f08043e99802e8623022c35398f02e4
1 /*:
2 * Hibernate support specific for ARM64
4 * Derived from work on ARM hibernation support by:
6 * Ubuntu project, hibernation support for mach-dove
7 * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
8 * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
9 * https://lkml.org/lkml/2010/6/18/4
10 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
11 * https://patchwork.kernel.org/patch/96442/
13 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
15 * License terms: GNU General Public License (GPL) version 2
17 #define pr_fmt(x) "hibernate: " x
18 #include <linux/cpu.h>
19 #include <linux/kvm_host.h>
20 #include <linux/mm.h>
21 #include <linux/pm.h>
22 #include <linux/sched.h>
23 #include <linux/suspend.h>
24 #include <linux/utsname.h>
25 #include <linux/version.h>
27 #include <asm/barrier.h>
28 #include <asm/cacheflush.h>
29 #include <asm/cputype.h>
30 #include <asm/daifflags.h>
31 #include <asm/irqflags.h>
32 #include <asm/kexec.h>
33 #include <asm/memory.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgalloc.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgtable-hwdef.h>
38 #include <asm/sections.h>
39 #include <asm/smp.h>
40 #include <asm/smp_plat.h>
41 #include <asm/suspend.h>
42 #include <asm/sysreg.h>
43 #include <asm/virt.h>
46 * Hibernate core relies on this value being 0 on resume, and marks it
47 * __nosavedata assuming it will keep the resume kernel's '0' value. This
48 * doesn't happen with either KASLR.
50 * defined as "__visible int in_suspend __nosavedata" in
51 * kernel/power/hibernate.c
53 extern int in_suspend;
55 /* Do we need to reset el2? */
56 #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
58 /* temporary el2 vectors in the __hibernate_exit_text section. */
59 extern char hibernate_el2_vectors[];
61 /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
62 extern char __hyp_stub_vectors[];
65 * The logical cpu number we should resume on, initialised to a non-cpu
66 * number.
68 static int sleep_cpu = -EINVAL;
71 * Values that may not change over hibernate/resume. We put the build number
72 * and date in here so that we guarantee not to resume with a different
73 * kernel.
75 struct arch_hibernate_hdr_invariants {
76 char uts_version[__NEW_UTS_LEN + 1];
79 /* These values need to be know across a hibernate/restore. */
80 static struct arch_hibernate_hdr {
81 struct arch_hibernate_hdr_invariants invariants;
83 /* These are needed to find the relocated kernel if built with kaslr */
84 phys_addr_t ttbr1_el1;
85 void (*reenter_kernel)(void);
88 * We need to know where the __hyp_stub_vectors are after restore to
89 * re-configure el2.
91 phys_addr_t __hyp_stub_vectors;
93 u64 sleep_cpu_mpidr;
94 } resume_hdr;
96 static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
98 memset(i, 0, sizeof(*i));
99 memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
102 int pfn_is_nosave(unsigned long pfn)
104 unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
105 unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
107 return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
108 crash_is_nosave(pfn);
111 void notrace save_processor_state(void)
113 WARN_ON(num_online_cpus() != 1);
116 void notrace restore_processor_state(void)
120 int arch_hibernation_header_save(void *addr, unsigned int max_size)
122 struct arch_hibernate_hdr *hdr = addr;
124 if (max_size < sizeof(*hdr))
125 return -EOVERFLOW;
127 arch_hdr_invariants(&hdr->invariants);
128 hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir);
129 hdr->reenter_kernel = _cpu_resume;
131 /* We can't use __hyp_get_vectors() because kvm may still be loaded */
132 if (el2_reset_needed())
133 hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
134 else
135 hdr->__hyp_stub_vectors = 0;
137 /* Save the mpidr of the cpu we called cpu_suspend() on... */
138 if (sleep_cpu < 0) {
139 pr_err("Failing to hibernate on an unknown CPU.\n");
140 return -ENODEV;
142 hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
143 pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
144 hdr->sleep_cpu_mpidr);
146 return 0;
148 EXPORT_SYMBOL(arch_hibernation_header_save);
150 int arch_hibernation_header_restore(void *addr)
152 int ret;
153 struct arch_hibernate_hdr_invariants invariants;
154 struct arch_hibernate_hdr *hdr = addr;
156 arch_hdr_invariants(&invariants);
157 if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
158 pr_crit("Hibernate image not generated by this kernel!\n");
159 return -EINVAL;
162 sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
163 pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
164 hdr->sleep_cpu_mpidr);
165 if (sleep_cpu < 0) {
166 pr_crit("Hibernated on a CPU not known to this kernel!\n");
167 sleep_cpu = -EINVAL;
168 return -EINVAL;
170 if (!cpu_online(sleep_cpu)) {
171 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
172 ret = cpu_up(sleep_cpu);
173 if (ret) {
174 pr_err("Failed to bring hibernate-CPU up!\n");
175 sleep_cpu = -EINVAL;
176 return ret;
180 resume_hdr = *hdr;
182 return 0;
184 EXPORT_SYMBOL(arch_hibernation_header_restore);
187 * Copies length bytes, starting at src_start into an new page,
188 * perform cache maintentance, then maps it at the specified address low
189 * address as executable.
191 * This is used by hibernate to copy the code it needs to execute when
192 * overwriting the kernel text. This function generates a new set of page
193 * tables, which it loads into ttbr0.
195 * Length is provided as we probably only want 4K of data, even on a 64K
196 * page system.
198 static int create_safe_exec_page(void *src_start, size_t length,
199 unsigned long dst_addr,
200 phys_addr_t *phys_dst_addr,
201 void *(*allocator)(gfp_t mask),
202 gfp_t mask)
204 int rc = 0;
205 pgd_t *pgd;
206 pud_t *pud;
207 pmd_t *pmd;
208 pte_t *pte;
209 unsigned long dst = (unsigned long)allocator(mask);
211 if (!dst) {
212 rc = -ENOMEM;
213 goto out;
216 memcpy((void *)dst, src_start, length);
217 flush_icache_range(dst, dst + length);
219 pgd = pgd_offset_raw(allocator(mask), dst_addr);
220 if (pgd_none(*pgd)) {
221 pud = allocator(mask);
222 if (!pud) {
223 rc = -ENOMEM;
224 goto out;
226 pgd_populate(&init_mm, pgd, pud);
229 pud = pud_offset(pgd, dst_addr);
230 if (pud_none(*pud)) {
231 pmd = allocator(mask);
232 if (!pmd) {
233 rc = -ENOMEM;
234 goto out;
236 pud_populate(&init_mm, pud, pmd);
239 pmd = pmd_offset(pud, dst_addr);
240 if (pmd_none(*pmd)) {
241 pte = allocator(mask);
242 if (!pte) {
243 rc = -ENOMEM;
244 goto out;
246 pmd_populate_kernel(&init_mm, pmd, pte);
249 pte = pte_offset_kernel(pmd, dst_addr);
250 set_pte(pte, __pte(virt_to_phys((void *)dst) |
251 pgprot_val(PAGE_KERNEL_EXEC)));
254 * Load our new page tables. A strict BBM approach requires that we
255 * ensure that TLBs are free of any entries that may overlap with the
256 * global mappings we are about to install.
258 * For a real hibernate/resume cycle TTBR0 currently points to a zero
259 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
260 * runtime services), while for a userspace-driven test_resume cycle it
261 * points to userspace page tables (and we must point it at a zero page
262 * ourselves). Elsewhere we only (un)install the idmap with preemption
263 * disabled, so T0SZ should be as required regardless.
265 cpu_set_reserved_ttbr0();
266 local_flush_tlb_all();
267 write_sysreg(virt_to_phys(pgd), ttbr0_el1);
268 isb();
270 *phys_dst_addr = virt_to_phys((void *)dst);
272 out:
273 return rc;
276 #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
278 int swsusp_arch_suspend(void)
280 int ret = 0;
281 unsigned long flags;
282 struct sleep_stack_data state;
284 if (cpus_are_stuck_in_kernel()) {
285 pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
286 return -EBUSY;
289 flags = local_daif_save();
291 if (__cpu_suspend_enter(&state)) {
292 /* make the crash dump kernel image visible/saveable */
293 crash_prepare_suspend();
295 sleep_cpu = smp_processor_id();
296 ret = swsusp_save();
297 } else {
298 /* Clean kernel core startup/idle code to PoC*/
299 dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
300 dcache_clean_range(__idmap_text_start, __idmap_text_end);
302 /* Clean kvm setup code to PoC? */
303 if (el2_reset_needed())
304 dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
306 /* make the crash dump kernel image protected again */
307 crash_post_resume();
310 * Tell the hibernation core that we've just restored
311 * the memory
313 in_suspend = 0;
315 sleep_cpu = -EINVAL;
316 __cpu_suspend_exit();
319 local_daif_restore(flags);
321 return ret;
324 static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
326 pte_t pte = *src_pte;
328 if (pte_valid(pte)) {
330 * Resume will overwrite areas that may be marked
331 * read only (code, rodata). Clear the RDONLY bit from
332 * the temporary mappings we use during restore.
334 set_pte(dst_pte, pte_mkwrite(pte));
335 } else if (debug_pagealloc_enabled() && !pte_none(pte)) {
337 * debug_pagealloc will removed the PTE_VALID bit if
338 * the page isn't in use by the resume kernel. It may have
339 * been in use by the original kernel, in which case we need
340 * to put it back in our copy to do the restore.
342 * Before marking this entry valid, check the pfn should
343 * be mapped.
345 BUG_ON(!pfn_valid(pte_pfn(pte)));
347 set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
351 static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
352 unsigned long end)
354 pte_t *src_pte;
355 pte_t *dst_pte;
356 unsigned long addr = start;
358 dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
359 if (!dst_pte)
360 return -ENOMEM;
361 pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
362 dst_pte = pte_offset_kernel(dst_pmd, start);
364 src_pte = pte_offset_kernel(src_pmd, start);
365 do {
366 _copy_pte(dst_pte, src_pte, addr);
367 } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
369 return 0;
372 static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
373 unsigned long end)
375 pmd_t *src_pmd;
376 pmd_t *dst_pmd;
377 unsigned long next;
378 unsigned long addr = start;
380 if (pud_none(*dst_pud)) {
381 dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
382 if (!dst_pmd)
383 return -ENOMEM;
384 pud_populate(&init_mm, dst_pud, dst_pmd);
386 dst_pmd = pmd_offset(dst_pud, start);
388 src_pmd = pmd_offset(src_pud, start);
389 do {
390 next = pmd_addr_end(addr, end);
391 if (pmd_none(*src_pmd))
392 continue;
393 if (pmd_table(*src_pmd)) {
394 if (copy_pte(dst_pmd, src_pmd, addr, next))
395 return -ENOMEM;
396 } else {
397 set_pmd(dst_pmd,
398 __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
400 } while (dst_pmd++, src_pmd++, addr = next, addr != end);
402 return 0;
405 static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
406 unsigned long end)
408 pud_t *dst_pud;
409 pud_t *src_pud;
410 unsigned long next;
411 unsigned long addr = start;
413 if (pgd_none(*dst_pgd)) {
414 dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
415 if (!dst_pud)
416 return -ENOMEM;
417 pgd_populate(&init_mm, dst_pgd, dst_pud);
419 dst_pud = pud_offset(dst_pgd, start);
421 src_pud = pud_offset(src_pgd, start);
422 do {
423 next = pud_addr_end(addr, end);
424 if (pud_none(*src_pud))
425 continue;
426 if (pud_table(*(src_pud))) {
427 if (copy_pmd(dst_pud, src_pud, addr, next))
428 return -ENOMEM;
429 } else {
430 set_pud(dst_pud,
431 __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
433 } while (dst_pud++, src_pud++, addr = next, addr != end);
435 return 0;
438 static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
439 unsigned long end)
441 unsigned long next;
442 unsigned long addr = start;
443 pgd_t *src_pgd = pgd_offset_k(start);
445 dst_pgd = pgd_offset_raw(dst_pgd, start);
446 do {
447 next = pgd_addr_end(addr, end);
448 if (pgd_none(*src_pgd))
449 continue;
450 if (copy_pud(dst_pgd, src_pgd, addr, next))
451 return -ENOMEM;
452 } while (dst_pgd++, src_pgd++, addr = next, addr != end);
454 return 0;
458 * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
460 * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
461 * we don't need to free it here.
463 int swsusp_arch_resume(void)
465 int rc = 0;
466 void *zero_page;
467 size_t exit_size;
468 pgd_t *tmp_pg_dir;
469 phys_addr_t phys_hibernate_exit;
470 void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
471 void *, phys_addr_t, phys_addr_t);
474 * Restoring the memory image will overwrite the ttbr1 page tables.
475 * Create a second copy of just the linear map, and use this when
476 * restoring.
478 tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
479 if (!tmp_pg_dir) {
480 pr_err("Failed to allocate memory for temporary page tables.\n");
481 rc = -ENOMEM;
482 goto out;
484 rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
485 if (rc)
486 goto out;
489 * We need a zero page that is zero before & after resume in order to
490 * to break before make on the ttbr1 page tables.
492 zero_page = (void *)get_safe_page(GFP_ATOMIC);
493 if (!zero_page) {
494 pr_err("Failed to allocate zero page.\n");
495 rc = -ENOMEM;
496 goto out;
500 * Locate the exit code in the bottom-but-one page, so that *NULL
501 * still has disastrous affects.
503 hibernate_exit = (void *)PAGE_SIZE;
504 exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
506 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
507 * a new set of ttbr0 page tables and load them.
509 rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
510 (unsigned long)hibernate_exit,
511 &phys_hibernate_exit,
512 (void *)get_safe_page, GFP_ATOMIC);
513 if (rc) {
514 pr_err("Failed to create safe executable page for hibernate_exit code.\n");
515 goto out;
519 * The hibernate exit text contains a set of el2 vectors, that will
520 * be executed at el2 with the mmu off in order to reload hyp-stub.
522 __flush_dcache_area(hibernate_exit, exit_size);
525 * KASLR will cause the el2 vectors to be in a different location in
526 * the resumed kernel. Load hibernate's temporary copy into el2.
528 * We can skip this step if we booted at EL1, or are running with VHE.
530 if (el2_reset_needed()) {
531 phys_addr_t el2_vectors = phys_hibernate_exit; /* base */
532 el2_vectors += hibernate_el2_vectors -
533 __hibernate_exit_text_start; /* offset */
535 __hyp_set_vectors(el2_vectors);
538 hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
539 resume_hdr.reenter_kernel, restore_pblist,
540 resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
542 out:
543 return rc;
546 int hibernate_resume_nonboot_cpu_disable(void)
548 if (sleep_cpu < 0) {
549 pr_err("Failing to resume from hibernate on an unknown CPU.\n");
550 return -ENODEV;
553 return freeze_secondary_cpus(sleep_cpu);