1 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
2 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
4 * PowerPC64 memory management structures
6 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
15 #include <asm/asm-compat.h>
20 * This is necessary to get the definition of PGTABLE_RANGE which we
21 * need for various slices related matters. Note that this isn't the
22 * complete pgtable.h but only a portion of it.
24 #include <asm/book3s/64/pgtable.h>
26 #include <asm/processor.h>
27 #include <asm/cpu_has_feature.h>
33 #define SLB_NUM_BOLTED 3
34 #define SLB_CACHE_ENTRIES 8
35 #define SLB_MIN_SIZE 32
37 /* Bits in the SLB ESID word */
38 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
40 /* Bits in the SLB VSID word */
41 #define SLB_VSID_SHIFT 12
42 #define SLB_VSID_SHIFT_1T 24
43 #define SLB_VSID_SSIZE_SHIFT 62
44 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
45 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
46 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
47 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
48 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
49 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
50 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
51 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
52 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
53 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
54 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
55 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
56 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
57 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
59 #define SLB_VSID_KERNEL (SLB_VSID_KP)
60 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
62 #define SLBIE_C (0x08000000)
63 #define SLBIE_SSIZE_SHIFT 25
69 #define HPTES_PER_GROUP 8
71 #define HPTE_V_SSIZE_SHIFT 62
72 #define HPTE_V_AVPN_SHIFT 7
73 #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
74 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
75 #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
76 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
77 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
78 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
79 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
80 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
81 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
82 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
85 * ISA 3.0 has a different HPTE format.
87 #define HPTE_R_3_0_SSIZE_SHIFT 58
88 #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
89 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
90 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
91 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
92 #define HPTE_R_RPN_SHIFT 12
93 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
94 #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
95 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
96 #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
97 #define HPTE_R_N ASM_CONST(0x0000000000000004)
98 #define HPTE_R_G ASM_CONST(0x0000000000000008)
99 #define HPTE_R_M ASM_CONST(0x0000000000000010)
100 #define HPTE_R_I ASM_CONST(0x0000000000000020)
101 #define HPTE_R_W ASM_CONST(0x0000000000000040)
102 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
103 #define HPTE_R_C ASM_CONST(0x0000000000000080)
104 #define HPTE_R_R ASM_CONST(0x0000000000000100)
105 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
107 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
108 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
110 /* Values for PP (assumes Ks=0, Kp=1) */
111 #define PP_RWXX 0 /* Supervisor read/write, User none */
112 #define PP_RWRX 1 /* Supervisor read/write, User read */
113 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
114 #define PP_RXRX 3 /* Supervisor read, User read */
115 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
117 /* Fields for tlbiel instruction in architecture 2.06 */
118 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
119 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
120 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
121 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
122 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
123 #define TLBIEL_INVAL_SET_SHIFT 12
125 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
126 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
127 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
128 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
132 struct mmu_hash_ops
{
133 void (*hpte_invalidate
)(unsigned long slot
,
135 int bpsize
, int apsize
,
136 int ssize
, int local
);
137 long (*hpte_updatepp
)(unsigned long slot
,
140 int bpsize
, int apsize
,
141 int ssize
, unsigned long flags
);
142 void (*hpte_updateboltedpp
)(unsigned long newpp
,
144 int psize
, int ssize
);
145 long (*hpte_insert
)(unsigned long hpte_group
,
148 unsigned long rflags
,
149 unsigned long vflags
,
150 int psize
, int apsize
,
152 long (*hpte_remove
)(unsigned long hpte_group
);
153 int (*hpte_removebolted
)(unsigned long ea
,
154 int psize
, int ssize
);
155 void (*flush_hash_range
)(unsigned long number
, int local
);
156 void (*hugepage_invalidate
)(unsigned long vsid
,
158 unsigned char *hpte_slot_array
,
159 int psize
, int ssize
, int local
);
160 int (*resize_hpt
)(unsigned long shift
);
163 * To be called in real mode with interrupts disabled. No locks are
164 * taken as such, concurrent access on pre POWER5 hardware could result
166 * The linear mapping is destroyed as well.
168 void (*hpte_clear_all
)(void);
170 extern struct mmu_hash_ops mmu_hash_ops
;
177 extern struct hash_pte
*htab_address
;
178 extern unsigned long htab_size_bytes
;
179 extern unsigned long htab_hash_mask
;
182 static inline int shift_to_mmu_psize(unsigned int shift
)
186 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
)
187 if (mmu_psize_defs
[psize
].shift
== shift
)
192 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize
)
194 if (mmu_psize_defs
[mmu_psize
].shift
)
195 return mmu_psize_defs
[mmu_psize
].shift
;
199 static inline unsigned long get_sllp_encoding(int psize
)
203 sllp
= ((mmu_psize_defs
[psize
].sllp
& SLB_VSID_L
) >> 6) |
204 ((mmu_psize_defs
[psize
].sllp
& SLB_VSID_LP
) >> 4);
208 #endif /* __ASSEMBLY__ */
212 * These are the values used by hardware in the B field of
213 * SLB entries and the first dword of MMU hashtable entries.
214 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
216 #define MMU_SEGSIZE_256M 0
217 #define MMU_SEGSIZE_1T 1
220 * encode page number shift.
221 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
222 * 12 bits. This enable us to address upto 76 bit va.
223 * For hpt hash from a va we can ignore the page size bits of va and for
224 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
225 * we work in all cases including 4k page size.
230 * HPTE Large Page (LP) details
234 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
238 static inline int slb_vsid_shift(int ssize
)
240 if (ssize
== MMU_SEGSIZE_256M
)
241 return SLB_VSID_SHIFT
;
242 return SLB_VSID_SHIFT_1T
;
245 static inline int segment_shift(int ssize
)
247 if (ssize
== MMU_SEGSIZE_256M
)
253 * This array is indexed by the LP field of the HPTE second dword.
254 * Since this field may contain some RPN bits, some entries are
255 * replicated so that we get the same value irrespective of RPN.
256 * The top 4 bits are the page size index (MMU_PAGE_*) for the
257 * actual page size, the bottom 4 bits are the base page size.
259 extern u8 hpte_page_sizes
[1 << LP_BITS
];
261 static inline unsigned long __hpte_page_size(unsigned long h
, unsigned long l
,
266 if (!(h
& HPTE_V_LARGE
))
269 /* Look at the 8 bit LP value */
270 lp
= (l
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
271 i
= hpte_page_sizes
[lp
];
276 return 1ul << mmu_psize_defs
[i
& 0xf].shift
;
279 static inline unsigned long hpte_page_size(unsigned long h
, unsigned long l
)
281 return __hpte_page_size(h
, l
, 0);
284 static inline unsigned long hpte_base_page_size(unsigned long h
, unsigned long l
)
286 return __hpte_page_size(h
, l
, 1);
290 * The current system page and segment sizes
292 extern int mmu_kernel_ssize
;
293 extern int mmu_highuser_ssize
;
294 extern u16 mmu_slb_size
;
295 extern unsigned long tce_alloc_start
, tce_alloc_end
;
298 * If the processor supports 64k normal pages but not 64k cache
299 * inhibited pages, we have to be prepared to switch processes
300 * to use 4k pages when they create cache-inhibited mappings.
301 * If this is the case, mmu_ci_restrictions will be set to 1.
303 extern int mmu_ci_restrictions
;
306 * This computes the AVPN and B fields of the first dword of a HPTE,
307 * for use when we want to match an existing PTE. The bottom 7 bits
308 * of the returned value are zero.
310 static inline unsigned long hpte_encode_avpn(unsigned long vpn
, int psize
,
315 * The AVA field omits the low-order 23 bits of the 78 bits VA.
316 * These bits are not needed in the PTE, because the
317 * low-order b of these bits are part of the byte offset
318 * into the virtual page and, if b < 23, the high-order
319 * 23-b of these bits are always used in selecting the
320 * PTEGs to be searched
322 v
= (vpn
>> (23 - VPN_SHIFT
)) & ~(mmu_psize_defs
[psize
].avpnm
);
323 v
<<= HPTE_V_AVPN_SHIFT
;
324 v
|= ((unsigned long) ssize
) << HPTE_V_SSIZE_SHIFT
;
329 * ISA v3.0 defines a new HPTE format, which differs from the old
330 * format in having smaller AVPN and ARPN fields, and the B field
331 * in the second dword instead of the first.
333 static inline unsigned long hpte_old_to_new_v(unsigned long v
)
335 /* trim AVPN, drop B */
336 return v
& HPTE_V_COMMON_BITS
;
339 static inline unsigned long hpte_old_to_new_r(unsigned long v
, unsigned long r
)
341 /* move B field from 1st to 2nd dword, trim ARPN */
342 return (r
& ~HPTE_R_3_0_SSIZE_MASK
) |
343 (((v
) >> HPTE_V_SSIZE_SHIFT
) << HPTE_R_3_0_SSIZE_SHIFT
);
346 static inline unsigned long hpte_new_to_old_v(unsigned long v
, unsigned long r
)
349 return (v
& HPTE_V_COMMON_BITS
) |
350 ((r
& HPTE_R_3_0_SSIZE_MASK
) <<
351 (HPTE_V_SSIZE_SHIFT
- HPTE_R_3_0_SSIZE_SHIFT
));
354 static inline unsigned long hpte_new_to_old_r(unsigned long r
)
356 /* clear out B field */
357 return r
& ~HPTE_R_3_0_SSIZE_MASK
;
361 * This function sets the AVPN and L fields of the HPTE appropriately
362 * using the base page size and actual page size.
364 static inline unsigned long hpte_encode_v(unsigned long vpn
, int base_psize
,
365 int actual_psize
, int ssize
)
368 v
= hpte_encode_avpn(vpn
, base_psize
, ssize
);
369 if (actual_psize
!= MMU_PAGE_4K
)
375 * This function sets the ARPN, and LP fields of the HPTE appropriately
376 * for the page size. We assume the pa is already "clean" that is properly
377 * aligned for the requested page size
379 static inline unsigned long hpte_encode_r(unsigned long pa
, int base_psize
,
382 /* A 4K page needs no special encoding */
383 if (actual_psize
== MMU_PAGE_4K
)
384 return pa
& HPTE_R_RPN
;
386 unsigned int penc
= mmu_psize_defs
[base_psize
].penc
[actual_psize
];
387 unsigned int shift
= mmu_psize_defs
[actual_psize
].shift
;
388 return (pa
& ~((1ul << shift
) - 1)) | (penc
<< LP_SHIFT
);
393 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
395 static inline unsigned long hpt_vpn(unsigned long ea
,
396 unsigned long vsid
, int ssize
)
399 int s_shift
= segment_shift(ssize
);
401 mask
= (1ul << (s_shift
- VPN_SHIFT
)) - 1;
402 return (vsid
<< (s_shift
- VPN_SHIFT
)) | ((ea
>> VPN_SHIFT
) & mask
);
406 * This hashes a virtual address
408 static inline unsigned long hpt_hash(unsigned long vpn
,
409 unsigned int shift
, int ssize
)
412 unsigned long hash
, vsid
;
414 /* VPN_SHIFT can be atmost 12 */
415 if (ssize
== MMU_SEGSIZE_256M
) {
416 mask
= (1ul << (SID_SHIFT
- VPN_SHIFT
)) - 1;
417 hash
= (vpn
>> (SID_SHIFT
- VPN_SHIFT
)) ^
418 ((vpn
& mask
) >> (shift
- VPN_SHIFT
));
420 mask
= (1ul << (SID_SHIFT_1T
- VPN_SHIFT
)) - 1;
421 vsid
= vpn
>> (SID_SHIFT_1T
- VPN_SHIFT
);
422 hash
= vsid
^ (vsid
<< 25) ^
423 ((vpn
& mask
) >> (shift
- VPN_SHIFT
)) ;
425 return hash
& 0x7fffffffffUL
;
428 #define HPTE_LOCAL_UPDATE 0x1
429 #define HPTE_NOHPTE_UPDATE 0x2
431 extern int __hash_page_4K(unsigned long ea
, unsigned long access
,
432 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
433 unsigned long flags
, int ssize
, int subpage_prot
);
434 extern int __hash_page_64K(unsigned long ea
, unsigned long access
,
435 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
436 unsigned long flags
, int ssize
);
438 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
);
439 extern int hash_page_mm(struct mm_struct
*mm
, unsigned long ea
,
440 unsigned long access
, unsigned long trap
,
441 unsigned long flags
);
442 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
,
443 unsigned long dsisr
);
444 int __hash_page_huge(unsigned long ea
, unsigned long access
, unsigned long vsid
,
445 pte_t
*ptep
, unsigned long trap
, unsigned long flags
,
446 int ssize
, unsigned int shift
, unsigned int mmu_psize
);
447 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
448 extern int __hash_page_thp(unsigned long ea
, unsigned long access
,
449 unsigned long vsid
, pmd_t
*pmdp
, unsigned long trap
,
450 unsigned long flags
, int ssize
, unsigned int psize
);
452 static inline int __hash_page_thp(unsigned long ea
, unsigned long access
,
453 unsigned long vsid
, pmd_t
*pmdp
,
454 unsigned long trap
, unsigned long flags
,
455 int ssize
, unsigned int psize
)
461 extern void hash_failure_debug(unsigned long ea
, unsigned long access
,
462 unsigned long vsid
, unsigned long trap
,
463 int ssize
, int psize
, int lpsize
,
465 extern int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
466 unsigned long pstart
, unsigned long prot
,
467 int psize
, int ssize
);
468 int htab_remove_mapping(unsigned long vstart
, unsigned long vend
,
469 int psize
, int ssize
);
470 extern void add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
);
471 extern void demote_segment_4k(struct mm_struct
*mm
, unsigned long addr
);
473 #ifdef CONFIG_PPC_PSERIES
474 void hpte_init_pseries(void);
476 static inline void hpte_init_pseries(void) { }
479 extern void hpte_init_native(void);
481 extern void slb_initialize(void);
482 extern void slb_flush_and_rebolt(void);
484 extern void slb_vmalloc_update(void);
485 extern void slb_set_size(u16 size
);
486 #endif /* __ASSEMBLY__ */
489 * VSID allocation (256MB segment)
491 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
492 * from mmu context id and effective segment id of the address.
494 * For user processes max context id is limited to ((1ul << 19) - 5)
495 * for kernel space, we use the top 4 context ids to map address as below
496 * NOTE: each context only support 64TB now.
497 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
498 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
499 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
500 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
502 * The proto-VSIDs are then scrambled into real VSIDs with the
503 * multiplicative hash:
505 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
507 * VSID_MULTIPLIER is prime, so in particular it is
508 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
509 * Because the modulus is 2^n-1 we can compute it efficiently without
510 * a divide or extra multiply (see below). The scramble function gives
511 * robust scattering in the hash table (at least based on some initial
514 * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
515 * bad address. This enables us to consolidate bad address handling in
518 * We also need to avoid the last segment of the last context, because that
519 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
520 * because of the modulo operation in vsid scramble. But the vmemmap
521 * (which is what uses region 0xf) will never be close to 64TB in size
522 * (it's 56 bytes per page of system memory).
525 #define CONTEXT_BITS 19
527 #define ESID_BITS_1T 6
529 #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
530 #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
534 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
535 * available for user + kernel mapping. The top 4 contexts are used for
536 * kernel mapping. Each segment contains 2^28 bytes. Each
537 * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
538 * (19 == 37 + 28 - 46).
540 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
543 * This should be computed such that protovosid * vsid_mulitplier
544 * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
546 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
547 #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
548 #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
550 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
551 #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
552 #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
555 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
558 * This macro generates asm code to compute the VSID scramble
559 * function. Used in slb_allocate() and do_stab_bolted. The function
560 * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
562 * rt = register containing the proto-VSID and into which the
563 * VSID will be stored
564 * rx = scratch register (clobbered)
566 * - rt and rx must be different registers
567 * - The answer will end up in the low VSID_BITS bits of rt. The higher
568 * bits may contain other garbage, so you may need to mask the
571 #define ASM_VSID_SCRAMBLE(rt, rx, size) \
572 lis rx,VSID_MULTIPLIER_##size@h; \
573 ori rx,rx,VSID_MULTIPLIER_##size@l; \
574 mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
576 srdi rx,rt,VSID_BITS_##size; \
577 clrldi rt,rt,(64-VSID_BITS_##size); \
578 add rt,rt,rx; /* add high and low bits */ \
579 /* NOTE: explanation based on VSID_BITS_##size = 36 \
580 * Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
581 * 2^36-1+2^28-1. That in particular means that if r3 >= \
582 * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
583 * the bit clear, r3 already has the answer we want, if it \
584 * doesn't, the answer is the low 36 bits of r3+1. So in all \
585 * cases the answer is the low 36 bits of (r3 + ((r3+1) >> 36))*/\
587 srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
590 /* 4 bits per slice and we have one slice per 1TB */
591 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
595 #ifdef CONFIG_PPC_SUBPAGE_PROT
597 * For the sub-page protection option, we extend the PGD with one of
598 * these. Basically we have a 3-level tree, with the top level being
599 * the protptrs array. To optimize speed and memory consumption when
600 * only addresses < 4GB are being protected, pointers to the first
601 * four pages of sub-page protection words are stored in the low_prot
603 * Each page of sub-page protection words protects 1GB (4 bytes
604 * protects 64k). For the 3-level tree, each page of pointers then
607 struct subpage_prot_table
{
608 unsigned long maxaddr
; /* only addresses < this are protected */
609 unsigned int **protptrs
[(TASK_SIZE_USER64
>> 43)];
610 unsigned int *low_prot
[4];
613 #define SBP_L1_BITS (PAGE_SHIFT - 2)
614 #define SBP_L2_BITS (PAGE_SHIFT - 3)
615 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
616 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
617 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
618 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
620 extern void subpage_prot_free(struct mm_struct
*mm
);
621 extern void subpage_prot_init_new_context(struct mm_struct
*mm
);
623 static inline void subpage_prot_free(struct mm_struct
*mm
) {}
624 static inline void subpage_prot_init_new_context(struct mm_struct
*mm
) { }
625 #endif /* CONFIG_PPC_SUBPAGE_PROT */
629 * The code below is equivalent to this function for arguments
630 * < 2^VSID_BITS, which is all this should ever be called
631 * with. However gcc is not clever enough to compute the
632 * modulus (2^n-1) without a second multiply.
634 #define vsid_scramble(protovsid, size) \
635 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
638 #define vsid_scramble(protovsid, size) \
641 x = (protovsid) * VSID_MULTIPLIER_##size; \
642 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
643 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
647 /* Returns the segment size indicator for a user address */
648 static inline int user_segment_size(unsigned long addr
)
650 /* Use 1T segments if possible for addresses >= 1T */
651 if (addr
>= (1UL << SID_SHIFT_1T
))
652 return mmu_highuser_ssize
;
653 return MMU_SEGSIZE_256M
;
656 static inline unsigned long get_vsid(unsigned long context
, unsigned long ea
,
660 * Bad address. We return VSID 0 for that
662 if ((ea
& ~REGION_MASK
) >= H_PGTABLE_RANGE
)
665 if (ssize
== MMU_SEGSIZE_256M
)
666 return vsid_scramble((context
<< ESID_BITS
)
667 | ((ea
>> SID_SHIFT
) & ESID_BITS_MASK
), 256M
);
668 return vsid_scramble((context
<< ESID_BITS_1T
)
669 | ((ea
>> SID_SHIFT_1T
) & ESID_BITS_1T_MASK
), 1T
);
673 * This is only valid for addresses >= PAGE_OFFSET
675 * For kernel space, we use the top 4 context ids to map address as below
676 * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
677 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
678 * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
679 * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
681 static inline unsigned long get_kernel_vsid(unsigned long ea
, int ssize
)
683 unsigned long context
;
686 * kernel take the top 4 context from the available range
688 context
= (MAX_USER_CONTEXT
) + ((ea
>> 60) - 0xc) + 1;
689 return get_vsid(context
, ea
, ssize
);
692 unsigned htab_shift_for_mem_size(unsigned long mem_size
);
694 #endif /* __ASSEMBLY__ */
696 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */