spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / arch / unicore32 / kernel / hibernate.c
blob7d0f0b7983a0e2c7d5fc5d312ba9680fdd03b659
1 /*
2 * linux/arch/unicore32/kernel/hibernate.c
4 * Code specific to PKUnity SoC and UniCore ISA
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/gfp.h>
15 #include <linux/suspend.h>
16 #include <linux/bootmem.h>
18 #include <asm/system.h>
19 #include <asm/page.h>
20 #include <asm/pgtable.h>
21 #include <asm/pgalloc.h>
22 #include <asm/suspend.h>
24 #include "mach/pm.h"
26 /* Pointer to the temporary resume page tables */
27 pgd_t *resume_pg_dir;
29 struct swsusp_arch_regs swsusp_arch_regs_cpu0;
32 * Create a middle page table on a resume-safe page and put a pointer to it in
33 * the given global directory entry. This only returns the gd entry
34 * in non-PAE compilation mode, since the middle layer is folded.
36 static pmd_t *resume_one_md_table_init(pgd_t *pgd)
38 pud_t *pud;
39 pmd_t *pmd_table;
41 pud = pud_offset(pgd, 0);
42 pmd_table = pmd_offset(pud, 0);
44 return pmd_table;
48 * Create a page table on a resume-safe page and place a pointer to it in
49 * a middle page directory entry.
51 static pte_t *resume_one_page_table_init(pmd_t *pmd)
53 if (pmd_none(*pmd)) {
54 pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
55 if (!page_table)
56 return NULL;
58 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
60 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
62 return page_table;
65 return pte_offset_kernel(pmd, 0);
69 * This maps the physical memory to kernel virtual address space, a total
70 * of max_low_pfn pages, by creating page tables starting from address
71 * PAGE_OFFSET. The page tables are allocated out of resume-safe pages.
73 static int resume_physical_mapping_init(pgd_t *pgd_base)
75 unsigned long pfn;
76 pgd_t *pgd;
77 pmd_t *pmd;
78 pte_t *pte;
79 int pgd_idx, pmd_idx;
81 pgd_idx = pgd_index(PAGE_OFFSET);
82 pgd = pgd_base + pgd_idx;
83 pfn = 0;
85 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
86 pmd = resume_one_md_table_init(pgd);
87 if (!pmd)
88 return -ENOMEM;
90 if (pfn >= max_low_pfn)
91 continue;
93 for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
94 pte_t *max_pte;
96 if (pfn >= max_low_pfn)
97 break;
99 /* Map with normal page tables.
100 * NOTE: We can mark everything as executable here
102 pte = resume_one_page_table_init(pmd);
103 if (!pte)
104 return -ENOMEM;
106 max_pte = pte + PTRS_PER_PTE;
107 for (; pte < max_pte; pte++, pfn++) {
108 if (pfn >= max_low_pfn)
109 break;
111 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
116 return 0;
119 static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
123 int swsusp_arch_resume(void)
125 int error;
127 resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
128 if (!resume_pg_dir)
129 return -ENOMEM;
131 resume_init_first_level_page_table(resume_pg_dir);
132 error = resume_physical_mapping_init(resume_pg_dir);
133 if (error)
134 return error;
136 /* We have got enough memory and from now on we cannot recover */
137 restore_image(resume_pg_dir, restore_pblist);
138 return 0;
142 * pfn_is_nosave - check if given pfn is in the 'nosave' section
145 int pfn_is_nosave(unsigned long pfn)
147 unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
148 unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
150 return (pfn >= begin_pfn) && (pfn < end_pfn);
153 void save_processor_state(void)
157 void restore_processor_state(void)
159 local_flush_tlb_all();