2 * PowerPC64 SLB support.
4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5 * Based on earlier code writteh by:
6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
7 * Copyright (c) 2001 Dave Engebretsen
8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/config.h>
18 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
22 #include <asm/cputable.h>
24 extern void slb_allocate(unsigned long ea
);
26 static inline unsigned long mk_esid_data(unsigned long ea
, unsigned long slot
)
28 return (ea
& ESID_MASK
) | SLB_ESID_V
| slot
;
31 static inline unsigned long mk_vsid_data(unsigned long ea
, unsigned long flags
)
33 return (get_kernel_vsid(ea
) << SLB_VSID_SHIFT
) | flags
;
36 static inline void create_slbe(unsigned long ea
, unsigned long vsid
,
37 unsigned long flags
, unsigned long entry
)
39 asm volatile("slbmte %0,%1" :
40 : "r" (mk_vsid_data(ea
, flags
)),
41 "r" (mk_esid_data(ea
, entry
))
45 static void slb_flush_and_rebolt(void)
47 /* If you change this make sure you change SLB_NUM_BOLTED
48 * appropriately too. */
49 unsigned long ksp_flags
= SLB_VSID_KERNEL
;
50 unsigned long ksp_esid_data
;
52 WARN_ON(!irqs_disabled());
54 if (cpu_has_feature(CPU_FTR_16M_PAGE
))
55 ksp_flags
|= SLB_VSID_L
;
57 ksp_esid_data
= mk_esid_data(get_paca()->kstack
, 2);
58 if ((ksp_esid_data
& ESID_MASK
) == KERNELBASE
)
59 ksp_esid_data
&= ~SLB_ESID_V
;
61 /* We need to do this all in asm, so we're sure we don't touch
62 * the stack between the slbia and rebolting it. */
63 asm volatile("isync\n"
65 /* Slot 1 - first VMALLOC segment */
67 /* Slot 2 - kernel stack */
70 :: "r"(mk_vsid_data(VMALLOCBASE
, SLB_VSID_KERNEL
)),
71 "r"(mk_esid_data(VMALLOCBASE
, 1)),
72 "r"(mk_vsid_data(ksp_esid_data
, ksp_flags
)),
77 /* Flush all user entries from the segment table of the current processor. */
78 void switch_slb(struct task_struct
*tsk
, struct mm_struct
*mm
)
80 unsigned long offset
= get_paca()->slb_cache_ptr
;
81 unsigned long esid_data
= 0;
82 unsigned long pc
= KSTK_EIP(tsk
);
83 unsigned long stack
= KSTK_ESP(tsk
);
84 unsigned long unmapped_base
;
86 if (offset
<= SLB_CACHE_ENTRIES
) {
88 asm volatile("isync" : : : "memory");
89 for (i
= 0; i
< offset
; i
++) {
90 esid_data
= (unsigned long)get_paca()->slb_cache
[i
]
92 asm volatile("slbie %0" : : "r" (esid_data
));
94 asm volatile("isync" : : : "memory");
96 slb_flush_and_rebolt();
99 /* Workaround POWER5 < DD2.1 issue */
100 if (offset
== 1 || offset
> SLB_CACHE_ENTRIES
)
101 asm volatile("slbie %0" : : "r" (esid_data
));
103 get_paca()->slb_cache_ptr
= 0;
104 get_paca()->context
= mm
->context
;
107 * preload some userspace segments into the SLB.
109 if (test_tsk_thread_flag(tsk
, TIF_32BIT
))
110 unmapped_base
= TASK_UNMAPPED_BASE_USER32
;
112 unmapped_base
= TASK_UNMAPPED_BASE_USER64
;
114 if (pc
>= KERNELBASE
)
118 if (GET_ESID(pc
) == GET_ESID(stack
))
121 if (stack
>= KERNELBASE
)
125 if ((GET_ESID(pc
) == GET_ESID(unmapped_base
))
126 || (GET_ESID(stack
) == GET_ESID(unmapped_base
)))
129 if (unmapped_base
>= KERNELBASE
)
131 slb_allocate(unmapped_base
);
134 void slb_initialize(void)
136 /* On iSeries the bolted entries have already been set up by
137 * the hypervisor from the lparMap data in head.S */
138 #ifndef CONFIG_PPC_ISERIES
139 unsigned long flags
= SLB_VSID_KERNEL
;
141 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
142 if (cpu_has_feature(CPU_FTR_16M_PAGE
))
145 asm volatile("isync":::"memory");
146 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
147 asm volatile("isync; slbia; isync":::"memory");
148 create_slbe(KERNELBASE
, get_kernel_vsid(KERNELBASE
), flags
, 0);
149 create_slbe(VMALLOCBASE
, get_kernel_vsid(KERNELBASE
),
151 /* We don't bolt the stack for the time being - we're in boot,
152 * so the stack is in the bolted segment. By the time it goes
153 * elsewhere, we'll call _switch() which will bolt in the new
155 asm volatile("isync":::"memory");
158 get_paca()->stab_rr
= SLB_NUM_BOLTED
;