Merge tag 'powerpc-4.6-4' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux/fpc-iii.git] / arch / powerpc / kernel / paca.c
blob93dae296b6be693d635a56cf2b894c3dc20f8433
1 /*
2 * c 2001 PPC 64 Team, IBM Corp
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
10 #include <linux/smp.h>
11 #include <linux/export.h>
12 #include <linux/memblock.h>
14 #include <asm/lppaca.h>
15 #include <asm/paca.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
18 #include <asm/kexec.h>
20 #ifdef CONFIG_PPC_BOOK3S
23 * The structure which the hypervisor knows about - this structure
24 * should not cross a page boundary. The vpa_init/register_vpa call
25 * is now known to fail if the lppaca structure crosses a page
26 * boundary. The lppaca is also used on POWER5 pSeries boxes.
27 * The lppaca is 640 bytes long, and cannot readily
28 * change since the hypervisor knows its layout, so a 1kB alignment
29 * will suffice to ensure that it doesn't cross a page boundary.
31 struct lppaca lppaca[] = {
32 [0 ... (NR_LPPACAS-1)] = {
33 .desc = cpu_to_be32(0xd397d781), /* "LpPa" */
34 .size = cpu_to_be16(sizeof(struct lppaca)),
35 .fpregs_in_use = 1,
36 .slb_count = cpu_to_be16(64),
37 .vmxregs_in_use = 0,
38 .page_ins = 0,
42 static struct lppaca *extra_lppacas;
43 static long __initdata lppaca_size;
45 static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
47 if (nr_cpus <= NR_LPPACAS)
48 return;
50 lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) *
51 (nr_cpus - NR_LPPACAS));
52 extra_lppacas = __va(memblock_alloc_base(lppaca_size,
53 PAGE_SIZE, limit));
56 static struct lppaca * __init new_lppaca(int cpu)
58 struct lppaca *lp;
60 if (cpu < NR_LPPACAS)
61 return &lppaca[cpu];
63 lp = extra_lppacas + (cpu - NR_LPPACAS);
64 *lp = lppaca[0];
66 return lp;
69 static void __init free_lppacas(void)
71 long new_size = 0, nr;
73 if (!lppaca_size)
74 return;
75 nr = num_possible_cpus() - NR_LPPACAS;
76 if (nr > 0)
77 new_size = PAGE_ALIGN(nr * sizeof(struct lppaca));
78 if (new_size >= lppaca_size)
79 return;
81 memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size);
82 lppaca_size = new_size;
85 #else
87 static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { }
88 static inline void free_lppacas(void) { }
90 #endif /* CONFIG_PPC_BOOK3S */
92 #ifdef CONFIG_PPC_STD_MMU_64
95 * 3 persistent SLBs are registered here. The buffer will be zero
96 * initially, hence will all be invaild until we actually write them.
98 * If you make the number of persistent SLB entries dynamic, please also
99 * update PR KVM to flush and restore them accordingly.
101 static struct slb_shadow *slb_shadow;
103 static void __init allocate_slb_shadows(int nr_cpus, int limit)
105 int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
106 slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
107 memset(slb_shadow, 0, size);
110 static struct slb_shadow * __init init_slb_shadow(int cpu)
112 struct slb_shadow *s = &slb_shadow[cpu];
115 * When we come through here to initialise boot_paca, the slb_shadow
116 * buffers are not allocated yet. That's OK, we'll get one later in
117 * boot, but make sure we don't corrupt memory at 0.
119 if (!slb_shadow)
120 return NULL;
122 s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
123 s->buffer_length = cpu_to_be32(sizeof(*s));
125 return s;
128 #else /* CONFIG_PPC_STD_MMU_64 */
130 static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
132 #endif /* CONFIG_PPC_STD_MMU_64 */
134 /* The Paca is an array with one entry per processor. Each contains an
135 * lppaca, which contains the information shared between the
136 * hypervisor and Linux.
137 * On systems with hardware multi-threading, there are two threads
138 * per processor. The Paca array must contain an entry for each thread.
139 * The VPD Areas will give a max logical processors = 2 * max physical
140 * processors. The processor VPD array needs one entry per physical
141 * processor (not thread).
143 struct paca_struct *paca;
144 EXPORT_SYMBOL(paca);
146 void __init initialise_paca(struct paca_struct *new_paca, int cpu)
148 #ifdef CONFIG_PPC_BOOK3S
149 new_paca->lppaca_ptr = new_lppaca(cpu);
150 #else
151 new_paca->kernel_pgd = swapper_pg_dir;
152 #endif
153 new_paca->lock_token = 0x8000;
154 new_paca->paca_index = cpu;
155 new_paca->kernel_toc = kernel_toc_addr();
156 new_paca->kernelbase = (unsigned long) _stext;
157 /* Only set MSR:IR/DR when MMU is initialized */
158 new_paca->kernel_msr = MSR_KERNEL & ~(MSR_IR | MSR_DR);
159 new_paca->hw_cpu_id = 0xffff;
160 new_paca->kexec_state = KEXEC_STATE_NONE;
161 new_paca->__current = &init_task;
162 new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
163 #ifdef CONFIG_PPC_STD_MMU_64
164 new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
165 #endif /* CONFIG_PPC_STD_MMU_64 */
167 #ifdef CONFIG_PPC_BOOK3E
168 /* For now -- if we have threads this will be adjusted later */
169 new_paca->tcd_ptr = &new_paca->tcd;
170 #endif
173 /* Put the paca pointer into r13 and SPRG_PACA */
174 void setup_paca(struct paca_struct *new_paca)
176 /* Setup r13 */
177 local_paca = new_paca;
179 #ifdef CONFIG_PPC_BOOK3E
180 /* On Book3E, initialize the TLB miss exception frames */
181 mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb);
182 #else
183 /* In HV mode, we setup both HPACA and PACA to avoid problems
184 * if we do a GET_PACA() before the feature fixups have been
185 * applied
187 if (cpu_has_feature(CPU_FTR_HVMODE))
188 mtspr(SPRN_SPRG_HPACA, local_paca);
189 #endif
190 mtspr(SPRN_SPRG_PACA, local_paca);
194 static int __initdata paca_size;
196 void __init allocate_pacas(void)
198 u64 limit;
199 int cpu;
201 limit = ppc64_rma_size;
203 #ifdef CONFIG_PPC_BOOK3S_64
205 * We can't take SLB misses on the paca, and we want to access them
206 * in real mode, so allocate them within the RMA and also within
207 * the first segment.
209 limit = min(0x10000000ULL, limit);
210 #endif
212 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
214 paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
215 memset(paca, 0, paca_size);
217 printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n",
218 paca_size, nr_cpu_ids, paca);
220 allocate_lppacas(nr_cpu_ids, limit);
222 allocate_slb_shadows(nr_cpu_ids, limit);
224 /* Can't use for_each_*_cpu, as they aren't functional yet */
225 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
226 initialise_paca(&paca[cpu], cpu);
229 void __init free_unused_pacas(void)
231 int new_size;
233 new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
235 if (new_size >= paca_size)
236 return;
238 memblock_free(__pa(paca) + new_size, paca_size - new_size);
240 printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
241 paca_size - new_size);
243 paca_size = new_size;
245 free_lppacas();