1 #ifndef _ASM_POWERPC_MMU_H_
2 #define _ASM_POWERPC_MMU_H_
6 #include <asm-ppc/mmu.h>
10 * PowerPC memory management structures
12 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
21 #include <asm/asm-compat.h>
28 #define STE_ESID_V 0x80
29 #define STE_ESID_KS 0x20
30 #define STE_ESID_KP 0x10
31 #define STE_ESID_N 0x08
33 #define STE_VSID_SHIFT 12
35 /* Location of cpu0's segment table */
36 #define STAB0_PAGE 0x6
37 #define STAB0_OFFSET (STAB0_PAGE << 12)
38 #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
41 extern char initial_stab
[];
42 #endif /* ! __ASSEMBLY */
48 #define SLB_NUM_BOLTED 3
49 #define SLB_CACHE_ENTRIES 8
51 /* Bits in the SLB ESID word */
52 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
54 /* Bits in the SLB VSID word */
55 #define SLB_VSID_SHIFT 12
56 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
57 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
58 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
59 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
60 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
61 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
62 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
63 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
64 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
65 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
66 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
67 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
68 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
69 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
71 #define SLB_VSID_KERNEL (SLB_VSID_KP)
72 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
74 #define SLBIE_C (0x08000000)
80 #define HPTES_PER_GROUP 8
82 #define HPTE_V_AVPN_SHIFT 7
83 #define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
84 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
85 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & HPTE_V_AVPN))
86 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
87 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
88 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
89 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
90 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
92 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
93 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
94 #define HPTE_R_RPN_SHIFT 12
95 #define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
96 #define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
97 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
98 #define HPTE_R_N ASM_CONST(0x0000000000000004)
100 /* Values for PP (assumes Ks=0, Kp=1) */
101 /* pp0 will always be 0 for linux */
102 #define PP_RWXX 0 /* Supervisor read/write, User none */
103 #define PP_RWRX 1 /* Supervisor read/write, User read */
104 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
105 #define PP_RXRX 3 /* Supervisor read, User read */
114 extern hpte_t
*htab_address
;
115 extern unsigned long htab_size_bytes
;
116 extern unsigned long htab_hash_mask
;
119 * Page size definition
121 * shift : is the "PAGE_SHIFT" value for that page size
122 * sllp : is a bit mask with the value of SLB L || LP to be or'ed
123 * directly to a slbmte "vsid" value
124 * penc : is the HPTE encoding mask for the "LP" field:
129 unsigned int shift
; /* number of bits */
130 unsigned int penc
; /* HPTE encoding */
131 unsigned int tlbiel
; /* tlbiel supported for that page size */
132 unsigned long avpnm
; /* bits to mask out in AVPN in the HPTE */
133 unsigned long sllp
; /* SLB L||LP (exact mask to use in slbmte) */
136 #endif /* __ASSEMBLY__ */
139 * The kernel use the constants below to index in the page sizes array.
140 * The use of fixed constants for this purpose is better for performances
141 * of the low level hash refill handlers.
143 * A non supported page size has a "shift" field set to 0
145 * Any new page size being implemented can get a new entry in here. Whether
146 * the kernel will use it or not is a different matter though. The actual page
147 * size used by hugetlbfs is not defined here and may be made variable
150 #define MMU_PAGE_4K 0 /* 4K */
151 #define MMU_PAGE_64K 1 /* 64K */
152 #define MMU_PAGE_64K_AP 2 /* 64K Admixed (in a 4K segment) */
153 #define MMU_PAGE_1M 3 /* 1M */
154 #define MMU_PAGE_16M 4 /* 16M */
155 #define MMU_PAGE_16G 5 /* 16G */
156 #define MMU_PAGE_COUNT 6
161 * The current system page sizes
163 extern struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
];
164 extern int mmu_linear_psize
;
165 extern int mmu_virtual_psize
;
167 #ifdef CONFIG_HUGETLB_PAGE
169 * The page size index of the huge pages for use by hugetlbfs
171 extern int mmu_huge_psize
;
173 #endif /* CONFIG_HUGETLB_PAGE */
176 * This function sets the AVPN and L fields of the HPTE appropriately
179 static inline unsigned long hpte_encode_v(unsigned long va
, int psize
)
182 v
= (va
>> 23) & ~(mmu_psize_defs
[psize
].avpnm
);
183 v
<<= HPTE_V_AVPN_SHIFT
;
184 if (psize
!= MMU_PAGE_4K
)
190 * This function sets the ARPN, and LP fields of the HPTE appropriately
191 * for the page size. We assume the pa is already "clean" that is properly
192 * aligned for the requested page size
194 static inline unsigned long hpte_encode_r(unsigned long pa
, int psize
)
198 /* A 4K page needs no special encoding */
199 if (psize
== MMU_PAGE_4K
)
200 return pa
& HPTE_R_RPN
;
202 unsigned int penc
= mmu_psize_defs
[psize
].penc
;
203 unsigned int shift
= mmu_psize_defs
[psize
].shift
;
204 return (pa
& ~((1ul << shift
) - 1)) | (penc
<< 12);
210 * This hashes a virtual address for a 256Mb segment only for now
213 static inline unsigned long hpt_hash(unsigned long va
, unsigned int shift
)
215 return ((va
>> 28) & 0x7fffffffffUL
) ^ ((va
& 0x0fffffffUL
) >> shift
);
218 extern int __hash_page_4K(unsigned long ea
, unsigned long access
,
219 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
221 extern int __hash_page_64K(unsigned long ea
, unsigned long access
,
222 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
225 extern int hash_huge_page(struct mm_struct
*mm
, unsigned long access
,
226 unsigned long ea
, unsigned long vsid
, int local
,
229 extern void htab_finish_init(void);
230 extern int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
231 unsigned long pstart
, unsigned long mode
,
234 extern void htab_initialize(void);
235 extern void htab_initialize_secondary(void);
236 extern void hpte_init_native(void);
237 extern void hpte_init_lpar(void);
238 extern void hpte_init_iSeries(void);
239 extern void mm_init_ppc64(void);
241 extern long pSeries_lpar_hpte_insert(unsigned long hpte_group
,
242 unsigned long va
, unsigned long prpn
,
243 unsigned long rflags
,
244 unsigned long vflags
, int psize
);
246 extern long native_hpte_insert(unsigned long hpte_group
,
247 unsigned long va
, unsigned long prpn
,
248 unsigned long rflags
,
249 unsigned long vflags
, int psize
);
251 extern long iSeries_hpte_insert(unsigned long hpte_group
,
252 unsigned long va
, unsigned long prpn
,
253 unsigned long rflags
,
254 unsigned long vflags
, int psize
);
256 extern void stabs_alloc(void);
257 extern void slb_initialize(void);
258 extern void stab_initialize(unsigned long stab
);
260 #endif /* __ASSEMBLY__ */
265 * We first generate a 36-bit "proto-VSID". For kernel addresses this
266 * is equal to the ESID, for user addresses it is:
267 * (context << 15) | (esid & 0x7fff)
269 * The two forms are distinguishable because the top bit is 0 for user
270 * addresses, whereas the top two bits are 1 for kernel addresses.
271 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
274 * The proto-VSIDs are then scrambled into real VSIDs with the
275 * multiplicative hash:
277 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
278 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
279 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
281 * This scramble is only well defined for proto-VSIDs below
282 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
283 * reserved. VSID_MULTIPLIER is prime, so in particular it is
284 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
285 * Because the modulus is 2^n-1 we can compute it efficiently without
286 * a divide or extra multiply (see below).
288 * This scheme has several advantages over older methods:
290 * - We have VSIDs allocated for every kernel address
291 * (i.e. everything above 0xC000000000000000), except the very top
292 * segment, which simplifies several things.
294 * - We allow for 15 significant bits of ESID and 20 bits of
295 * context for user addresses. i.e. 8T (43 bits) of address space for
296 * up to 1M contexts (although the page table structure and context
297 * allocation will need changes to take advantage of this).
299 * - The scramble function gives robust scattering in the hash
300 * table (at least based on some initial results). The previous
301 * method was more susceptible to pathological cases giving excessive
305 * WARNING - If you change these you must make sure the asm
306 * implementations in slb_allocate (slb_low.S), do_stab_bolted
307 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
309 * You'll also need to change the precomputed VSID values in head.S
310 * which are used by the iSeries firmware.
313 #define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
315 #define VSID_MODULUS ((1UL<<VSID_BITS)-1)
317 #define CONTEXT_BITS 19
318 #define USER_ESID_BITS 16
320 #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
323 * This macro generates asm code to compute the VSID scramble
324 * function. Used in slb_allocate() and do_stab_bolted. The function
325 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
327 * rt = register continaing the proto-VSID and into which the
328 * VSID will be stored
329 * rx = scratch register (clobbered)
331 * - rt and rx must be different registers
332 * - The answer will end up in the low 36 bits of rt. The higher
333 * bits may contain other garbage, so you may need to mask the
336 #define ASM_VSID_SCRAMBLE(rt, rx) \
337 lis rx,VSID_MULTIPLIER@h; \
338 ori rx,rx,VSID_MULTIPLIER@l; \
339 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
341 srdi rx,rt,VSID_BITS; \
342 clrldi rt,rt,(64-VSID_BITS); \
343 add rt,rt,rx; /* add high and low bits */ \
344 /* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
345 * 2^36-1+2^28-1. That in particular means that if r3 >= \
346 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
347 * the bit clear, r3 already has the answer we want, if it \
348 * doesn't, the answer is the low 36 bits of r3+1. So in all \
349 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
351 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
357 typedef unsigned long mm_context_id_t
;
361 #ifdef CONFIG_HUGETLB_PAGE
362 u16 low_htlb_areas
, high_htlb_areas
;
367 static inline unsigned long vsid_scramble(unsigned long protovsid
)
370 /* The code below is equivalent to this function for arguments
371 * < 2^VSID_BITS, which is all this should ever be called
372 * with. However gcc is not clever enough to compute the
373 * modulus (2^n-1) without a second multiply. */
374 return ((protovsid
* VSID_MULTIPLIER
) % VSID_MODULUS
);
378 x
= protovsid
* VSID_MULTIPLIER
;
379 x
= (x
>> VSID_BITS
) + (x
& VSID_MODULUS
);
380 return (x
+ ((x
+1) >> VSID_BITS
)) & VSID_MODULUS
;
384 /* This is only valid for addresses >= KERNELBASE */
385 static inline unsigned long get_kernel_vsid(unsigned long ea
)
387 return vsid_scramble(ea
>> SID_SHIFT
);
390 /* This is only valid for user addresses (which are below 2^41) */
391 static inline unsigned long get_vsid(unsigned long context
, unsigned long ea
)
393 return vsid_scramble((context
<< USER_ESID_BITS
)
394 | (ea
>> SID_SHIFT
));
397 #define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
398 #define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
400 /* Physical address used by some IO functions */
401 typedef unsigned long phys_addr_t
;
404 #endif /* __ASSEMBLY */
406 #endif /* CONFIG_PPC64 */
407 #endif /* __KERNEL__ */
408 #endif /* _ASM_POWERPC_MMU_H_ */