1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3 #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
5 * PowerPC64 memory management structures
7 * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
13 #include <asm/asm-const.h>
16 * This is necessary to get the definition of PGTABLE_RANGE which we
17 * need for various slices related matters. Note that this isn't the
18 * complete pgtable.h but only a portion of it.
20 #include <asm/book3s/64/pgtable.h>
22 #include <asm/task_size_64.h>
23 #include <asm/cpu_has_feature.h>
29 #define SLB_NUM_BOLTED 2
30 #define SLB_CACHE_ENTRIES 8
31 #define SLB_MIN_SIZE 32
33 /* Bits in the SLB ESID word */
34 #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
36 /* Bits in the SLB VSID word */
37 #define SLB_VSID_SHIFT 12
38 #define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
39 #define SLB_VSID_SHIFT_1T 24
40 #define SLB_VSID_SSIZE_SHIFT 62
41 #define SLB_VSID_B ASM_CONST(0xc000000000000000)
42 #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
43 #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
44 #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
45 #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
46 #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
47 #define SLB_VSID_L ASM_CONST(0x0000000000000100)
48 #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
49 #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
50 #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
51 #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
52 #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
53 #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
54 #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
56 #define SLB_VSID_KERNEL (SLB_VSID_KP)
57 #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
59 #define SLBIE_C (0x08000000)
60 #define SLBIE_SSIZE_SHIFT 25
66 #define HPTES_PER_GROUP 8
68 #define HPTE_V_SSIZE_SHIFT 62
69 #define HPTE_V_AVPN_SHIFT 7
70 #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
71 #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
72 #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
73 #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74 #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
75 #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
76 #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
77 #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
78 #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
79 #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
82 * ISA 3.0 has a different HPTE format.
84 #define HPTE_R_3_0_SSIZE_SHIFT 58
85 #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
86 #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
87 #define HPTE_R_TS ASM_CONST(0x4000000000000000)
88 #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
89 #define HPTE_R_KEY_BIT0 ASM_CONST(0x2000000000000000)
90 #define HPTE_R_KEY_BIT1 ASM_CONST(0x1000000000000000)
91 #define HPTE_R_RPN_SHIFT 12
92 #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
93 #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
94 #define HPTE_R_PP ASM_CONST(0x0000000000000003)
95 #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
96 #define HPTE_R_N ASM_CONST(0x0000000000000004)
97 #define HPTE_R_G ASM_CONST(0x0000000000000008)
98 #define HPTE_R_M ASM_CONST(0x0000000000000010)
99 #define HPTE_R_I ASM_CONST(0x0000000000000020)
100 #define HPTE_R_W ASM_CONST(0x0000000000000040)
101 #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
102 #define HPTE_R_C ASM_CONST(0x0000000000000080)
103 #define HPTE_R_R ASM_CONST(0x0000000000000100)
104 #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
105 #define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
106 #define HPTE_R_KEY_BIT3 ASM_CONST(0x0000000000000400)
107 #define HPTE_R_KEY_BIT4 ASM_CONST(0x0000000000000200)
108 #define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
110 #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
111 #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
113 /* Values for PP (assumes Ks=0, Kp=1) */
114 #define PP_RWXX 0 /* Supervisor read/write, User none */
115 #define PP_RWRX 1 /* Supervisor read/write, User read */
116 #define PP_RWRW 2 /* Supervisor read/write, User read/write */
117 #define PP_RXRX 3 /* Supervisor read, User read */
118 #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
120 /* Fields for tlbiel instruction in architecture 2.06 */
121 #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
122 #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
123 #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
124 #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
125 #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
126 #define TLBIEL_INVAL_SET_SHIFT 12
128 #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
129 #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
130 #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
131 #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
135 struct mmu_hash_ops
{
136 void (*hpte_invalidate
)(unsigned long slot
,
138 int bpsize
, int apsize
,
139 int ssize
, int local
);
140 long (*hpte_updatepp
)(unsigned long slot
,
143 int bpsize
, int apsize
,
144 int ssize
, unsigned long flags
);
145 void (*hpte_updateboltedpp
)(unsigned long newpp
,
147 int psize
, int ssize
);
148 long (*hpte_insert
)(unsigned long hpte_group
,
151 unsigned long rflags
,
152 unsigned long vflags
,
153 int psize
, int apsize
,
155 long (*hpte_remove
)(unsigned long hpte_group
);
156 int (*hpte_removebolted
)(unsigned long ea
,
157 int psize
, int ssize
);
158 void (*flush_hash_range
)(unsigned long number
, int local
);
159 void (*hugepage_invalidate
)(unsigned long vsid
,
161 unsigned char *hpte_slot_array
,
162 int psize
, int ssize
, int local
);
163 int (*resize_hpt
)(unsigned long shift
);
166 * To be called in real mode with interrupts disabled. No locks are
167 * taken as such, concurrent access on pre POWER5 hardware could result
169 * The linear mapping is destroyed as well.
171 void (*hpte_clear_all
)(void);
173 extern struct mmu_hash_ops mmu_hash_ops
;
180 extern struct hash_pte
*htab_address
;
181 extern unsigned long htab_size_bytes
;
182 extern unsigned long htab_hash_mask
;
185 static inline int shift_to_mmu_psize(unsigned int shift
)
189 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
)
190 if (mmu_psize_defs
[psize
].shift
== shift
)
195 static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize
)
197 if (mmu_psize_defs
[mmu_psize
].shift
)
198 return mmu_psize_defs
[mmu_psize
].shift
;
202 static inline unsigned int ap_to_shift(unsigned long ap
)
206 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
207 if (mmu_psize_defs
[psize
].ap
== ap
)
208 return mmu_psize_defs
[psize
].shift
;
214 static inline unsigned long get_sllp_encoding(int psize
)
218 sllp
= ((mmu_psize_defs
[psize
].sllp
& SLB_VSID_L
) >> 6) |
219 ((mmu_psize_defs
[psize
].sllp
& SLB_VSID_LP
) >> 4);
223 #endif /* __ASSEMBLY__ */
227 * These are the values used by hardware in the B field of
228 * SLB entries and the first dword of MMU hashtable entries.
229 * The B field is 2 bits; the values 2 and 3 are unused and reserved.
231 #define MMU_SEGSIZE_256M 0
232 #define MMU_SEGSIZE_1T 1
235 * encode page number shift.
236 * in order to fit the 78 bit va in a 64 bit variable we shift the va by
237 * 12 bits. This enable us to address upto 76 bit va.
238 * For hpt hash from a va we can ignore the page size bits of va and for
239 * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
240 * we work in all cases including 4k page size.
245 * HPTE Large Page (LP) details
249 #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
253 static inline int slb_vsid_shift(int ssize
)
255 if (ssize
== MMU_SEGSIZE_256M
)
256 return SLB_VSID_SHIFT
;
257 return SLB_VSID_SHIFT_1T
;
260 static inline int segment_shift(int ssize
)
262 if (ssize
== MMU_SEGSIZE_256M
)
268 * This array is indexed by the LP field of the HPTE second dword.
269 * Since this field may contain some RPN bits, some entries are
270 * replicated so that we get the same value irrespective of RPN.
271 * The top 4 bits are the page size index (MMU_PAGE_*) for the
272 * actual page size, the bottom 4 bits are the base page size.
274 extern u8 hpte_page_sizes
[1 << LP_BITS
];
276 static inline unsigned long __hpte_page_size(unsigned long h
, unsigned long l
,
281 if (!(h
& HPTE_V_LARGE
))
284 /* Look at the 8 bit LP value */
285 lp
= (l
>> LP_SHIFT
) & ((1 << LP_BITS
) - 1);
286 i
= hpte_page_sizes
[lp
];
291 return 1ul << mmu_psize_defs
[i
& 0xf].shift
;
294 static inline unsigned long hpte_page_size(unsigned long h
, unsigned long l
)
296 return __hpte_page_size(h
, l
, 0);
299 static inline unsigned long hpte_base_page_size(unsigned long h
, unsigned long l
)
301 return __hpte_page_size(h
, l
, 1);
305 * The current system page and segment sizes
307 extern int mmu_kernel_ssize
;
308 extern int mmu_highuser_ssize
;
309 extern u16 mmu_slb_size
;
310 extern unsigned long tce_alloc_start
, tce_alloc_end
;
313 * If the processor supports 64k normal pages but not 64k cache
314 * inhibited pages, we have to be prepared to switch processes
315 * to use 4k pages when they create cache-inhibited mappings.
316 * If this is the case, mmu_ci_restrictions will be set to 1.
318 extern int mmu_ci_restrictions
;
321 * This computes the AVPN and B fields of the first dword of a HPTE,
322 * for use when we want to match an existing PTE. The bottom 7 bits
323 * of the returned value are zero.
325 static inline unsigned long hpte_encode_avpn(unsigned long vpn
, int psize
,
330 * The AVA field omits the low-order 23 bits of the 78 bits VA.
331 * These bits are not needed in the PTE, because the
332 * low-order b of these bits are part of the byte offset
333 * into the virtual page and, if b < 23, the high-order
334 * 23-b of these bits are always used in selecting the
335 * PTEGs to be searched
337 v
= (vpn
>> (23 - VPN_SHIFT
)) & ~(mmu_psize_defs
[psize
].avpnm
);
338 v
<<= HPTE_V_AVPN_SHIFT
;
339 v
|= ((unsigned long) ssize
) << HPTE_V_SSIZE_SHIFT
;
344 * ISA v3.0 defines a new HPTE format, which differs from the old
345 * format in having smaller AVPN and ARPN fields, and the B field
346 * in the second dword instead of the first.
348 static inline unsigned long hpte_old_to_new_v(unsigned long v
)
350 /* trim AVPN, drop B */
351 return v
& HPTE_V_COMMON_BITS
;
354 static inline unsigned long hpte_old_to_new_r(unsigned long v
, unsigned long r
)
356 /* move B field from 1st to 2nd dword, trim ARPN */
357 return (r
& ~HPTE_R_3_0_SSIZE_MASK
) |
358 (((v
) >> HPTE_V_SSIZE_SHIFT
) << HPTE_R_3_0_SSIZE_SHIFT
);
361 static inline unsigned long hpte_new_to_old_v(unsigned long v
, unsigned long r
)
364 return (v
& HPTE_V_COMMON_BITS
) |
365 ((r
& HPTE_R_3_0_SSIZE_MASK
) <<
366 (HPTE_V_SSIZE_SHIFT
- HPTE_R_3_0_SSIZE_SHIFT
));
369 static inline unsigned long hpte_new_to_old_r(unsigned long r
)
371 /* clear out B field */
372 return r
& ~HPTE_R_3_0_SSIZE_MASK
;
375 static inline unsigned long hpte_get_old_v(struct hash_pte
*hptep
)
377 unsigned long hpte_v
;
379 hpte_v
= be64_to_cpu(hptep
->v
);
380 if (cpu_has_feature(CPU_FTR_ARCH_300
))
381 hpte_v
= hpte_new_to_old_v(hpte_v
, be64_to_cpu(hptep
->r
));
386 * This function sets the AVPN and L fields of the HPTE appropriately
387 * using the base page size and actual page size.
389 static inline unsigned long hpte_encode_v(unsigned long vpn
, int base_psize
,
390 int actual_psize
, int ssize
)
393 v
= hpte_encode_avpn(vpn
, base_psize
, ssize
);
394 if (actual_psize
!= MMU_PAGE_4K
)
400 * This function sets the ARPN, and LP fields of the HPTE appropriately
401 * for the page size. We assume the pa is already "clean" that is properly
402 * aligned for the requested page size
404 static inline unsigned long hpte_encode_r(unsigned long pa
, int base_psize
,
407 /* A 4K page needs no special encoding */
408 if (actual_psize
== MMU_PAGE_4K
)
409 return pa
& HPTE_R_RPN
;
411 unsigned int penc
= mmu_psize_defs
[base_psize
].penc
[actual_psize
];
412 unsigned int shift
= mmu_psize_defs
[actual_psize
].shift
;
413 return (pa
& ~((1ul << shift
) - 1)) | (penc
<< LP_SHIFT
);
418 * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
420 static inline unsigned long hpt_vpn(unsigned long ea
,
421 unsigned long vsid
, int ssize
)
424 int s_shift
= segment_shift(ssize
);
426 mask
= (1ul << (s_shift
- VPN_SHIFT
)) - 1;
427 return (vsid
<< (s_shift
- VPN_SHIFT
)) | ((ea
>> VPN_SHIFT
) & mask
);
431 * This hashes a virtual address
433 static inline unsigned long hpt_hash(unsigned long vpn
,
434 unsigned int shift
, int ssize
)
437 unsigned long hash
, vsid
;
439 /* VPN_SHIFT can be atmost 12 */
440 if (ssize
== MMU_SEGSIZE_256M
) {
441 mask
= (1ul << (SID_SHIFT
- VPN_SHIFT
)) - 1;
442 hash
= (vpn
>> (SID_SHIFT
- VPN_SHIFT
)) ^
443 ((vpn
& mask
) >> (shift
- VPN_SHIFT
));
445 mask
= (1ul << (SID_SHIFT_1T
- VPN_SHIFT
)) - 1;
446 vsid
= vpn
>> (SID_SHIFT_1T
- VPN_SHIFT
);
447 hash
= vsid
^ (vsid
<< 25) ^
448 ((vpn
& mask
) >> (shift
- VPN_SHIFT
)) ;
450 return hash
& 0x7fffffffffUL
;
453 #define HPTE_LOCAL_UPDATE 0x1
454 #define HPTE_NOHPTE_UPDATE 0x2
456 extern int __hash_page_4K(unsigned long ea
, unsigned long access
,
457 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
458 unsigned long flags
, int ssize
, int subpage_prot
);
459 extern int __hash_page_64K(unsigned long ea
, unsigned long access
,
460 unsigned long vsid
, pte_t
*ptep
, unsigned long trap
,
461 unsigned long flags
, int ssize
);
463 unsigned int hash_page_do_lazy_icache(unsigned int pp
, pte_t pte
, int trap
);
464 extern int hash_page_mm(struct mm_struct
*mm
, unsigned long ea
,
465 unsigned long access
, unsigned long trap
,
466 unsigned long flags
);
467 extern int hash_page(unsigned long ea
, unsigned long access
, unsigned long trap
,
468 unsigned long dsisr
);
469 int __hash_page_huge(unsigned long ea
, unsigned long access
, unsigned long vsid
,
470 pte_t
*ptep
, unsigned long trap
, unsigned long flags
,
471 int ssize
, unsigned int shift
, unsigned int mmu_psize
);
472 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
473 extern int __hash_page_thp(unsigned long ea
, unsigned long access
,
474 unsigned long vsid
, pmd_t
*pmdp
, unsigned long trap
,
475 unsigned long flags
, int ssize
, unsigned int psize
);
477 static inline int __hash_page_thp(unsigned long ea
, unsigned long access
,
478 unsigned long vsid
, pmd_t
*pmdp
,
479 unsigned long trap
, unsigned long flags
,
480 int ssize
, unsigned int psize
)
486 extern void hash_failure_debug(unsigned long ea
, unsigned long access
,
487 unsigned long vsid
, unsigned long trap
,
488 int ssize
, int psize
, int lpsize
,
490 extern int htab_bolt_mapping(unsigned long vstart
, unsigned long vend
,
491 unsigned long pstart
, unsigned long prot
,
492 int psize
, int ssize
);
493 int htab_remove_mapping(unsigned long vstart
, unsigned long vend
,
494 int psize
, int ssize
);
495 extern void pseries_add_gpage(u64 addr
, u64 page_size
, unsigned long number_of_pages
);
496 extern void demote_segment_4k(struct mm_struct
*mm
, unsigned long addr
);
498 extern void hash__setup_new_exec(void);
500 #ifdef CONFIG_PPC_PSERIES
501 void hpte_init_pseries(void);
503 static inline void hpte_init_pseries(void) { }
506 extern void hpte_init_native(void);
513 extern void slb_initialize(void);
514 void slb_flush_and_restore_bolted(void);
515 void slb_flush_all_realmode(void);
516 void __slb_restore_bolted_realmode(void);
517 void slb_restore_bolted_realmode(void);
518 void slb_save_contents(struct slb_entry
*slb_ptr
);
519 void slb_dump_contents(struct slb_entry
*slb_ptr
);
521 extern void slb_vmalloc_update(void);
522 extern void slb_set_size(u16 size
);
523 #endif /* __ASSEMBLY__ */
526 * VSID allocation (256MB segment)
528 * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
529 * from mmu context id and effective segment id of the address.
531 * For user processes max context id is limited to MAX_USER_CONTEXT.
532 * more details in get_user_context
534 * For kernel space get_kernel_context
536 * The proto-VSIDs are then scrambled into real VSIDs with the
537 * multiplicative hash:
539 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
541 * VSID_MULTIPLIER is prime, so in particular it is
542 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
543 * Because the modulus is 2^n-1 we can compute it efficiently without
544 * a divide or extra multiply (see below). The scramble function gives
545 * robust scattering in the hash table (at least based on some initial
548 * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
549 * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
550 * will produce a VSID of 0.
552 * We also need to avoid the last segment of the last context, because that
553 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
554 * because of the modulo operation in vsid scramble.
558 * Max Va bits we support as of now is 68 bits. We want 19 bit
561 * GPU has restrictions of not able to access beyond 128TB
562 * (47 bit effective address). We also cannot do more than 20bit PID.
563 * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
564 * to 16 bits (ie, we can only have 2^16 pids at the same time).
567 #define CONTEXT_BITS 19
568 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
569 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
571 #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
572 #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
575 * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
576 * to use more than one context for linear mapping the kernel.
577 * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
578 * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
580 #if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
581 #define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
583 #define MAX_KERNEL_CTX_CNT 1
586 #define MAX_VMALLOC_CTX_CNT 1
587 #define MAX_IO_CTX_CNT 1
588 #define MAX_VMEMMAP_CTX_CNT 1
592 * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
593 * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
594 * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
595 * context maps 2^49 bytes (512TB).
597 * We also need to avoid the last segment of the last context, because that
598 * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
599 * because of the modulo operation in vsid scramble.
602 #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
604 // The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
605 #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
606 MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
609 * For platforms that support on 65bit VA we limit the context bits
611 #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
614 * This should be computed such that protovosid * vsid_mulitplier
615 * doesn't overflow 64 bits. The vsid_mutliplier should also be
616 * co-prime to vsid_modulus. We also need to make sure that number
617 * of bits in multiplied result (dividend) is less than twice the number of
618 * protovsid bits for our modulus optmization to work.
620 * The below table shows the current values used.
621 * |-------+------------+----------------------+------------+-------------------|
622 * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
623 * |-------+------------+----------------------+------------+-------------------|
624 * | 1T | 24 | 25 | 49 | 50 |
625 * |-------+------------+----------------------+------------+-------------------|
626 * | 256MB | 24 | 37 | 61 | 74 |
627 * |-------+------------+----------------------+------------+-------------------|
629 * |-------+------------+----------------------+------------+--------------------|
630 * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
631 * |-------+------------+----------------------+------------+--------------------|
632 * | 1T | 24 | 28 | 52 | 56 |
633 * |-------+------------+----------------------+------------+--------------------|
634 * | 256MB | 24 | 40 | 64 | 80 |
635 * |-------+------------+----------------------+------------+--------------------|
638 #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
639 #define VSID_BITS_256M (VA_BITS - SID_SHIFT)
640 #define VSID_BITS_65_256M (65 - SID_SHIFT)
642 * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
644 #define VSID_MULINV_256M ASM_CONST(665548017062)
646 #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
647 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
648 #define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
649 #define VSID_MULINV_1T ASM_CONST(209034062)
651 /* 1TB VSID reserved for VRMA */
652 #define VRMA_VSID 0x1ffffffUL
653 #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
655 /* 4 bits per slice and we have one slice per 1TB */
656 #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
657 #define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
658 #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
661 #ifdef CONFIG_PPC_SUBPAGE_PROT
663 * For the sub-page protection option, we extend the PGD with one of
664 * these. Basically we have a 3-level tree, with the top level being
665 * the protptrs array. To optimize speed and memory consumption when
666 * only addresses < 4GB are being protected, pointers to the first
667 * four pages of sub-page protection words are stored in the low_prot
669 * Each page of sub-page protection words protects 1GB (4 bytes
670 * protects 64k). For the 3-level tree, each page of pointers then
673 struct subpage_prot_table
{
674 unsigned long maxaddr
; /* only addresses < this are protected */
675 unsigned int **protptrs
[(TASK_SIZE_USER64
>> 43)];
676 unsigned int *low_prot
[4];
679 #define SBP_L1_BITS (PAGE_SHIFT - 2)
680 #define SBP_L2_BITS (PAGE_SHIFT - 3)
681 #define SBP_L1_COUNT (1 << SBP_L1_BITS)
682 #define SBP_L2_COUNT (1 << SBP_L2_BITS)
683 #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
684 #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
686 extern void subpage_prot_free(struct mm_struct
*mm
);
688 static inline void subpage_prot_free(struct mm_struct
*mm
) {}
689 #endif /* CONFIG_PPC_SUBPAGE_PROT */
692 * One bit per slice. We have lower slices which cover 256MB segments
693 * upto 4G range. That gets us 16 low slices. For the rest we track slices
698 DECLARE_BITMAP(high_slices
, SLICE_NUM_HIGH
);
701 struct hash_mm_context
{
702 u16 user_psize
; /* page size index */
704 /* SLB page size encodings*/
705 unsigned char low_slices_psize
[LOW_SLICE_ARRAY_SZ
];
706 unsigned char high_slices_psize
[SLICE_ARRAY_SIZE
];
707 unsigned long slb_addr_limit
;
708 #ifdef CONFIG_PPC_64K_PAGES
709 struct slice_mask mask_64k
;
711 struct slice_mask mask_4k
;
712 #ifdef CONFIG_HUGETLB_PAGE
713 struct slice_mask mask_16m
;
714 struct slice_mask mask_16g
;
717 #ifdef CONFIG_PPC_SUBPAGE_PROT
718 struct subpage_prot_table
*spt
;
719 #endif /* CONFIG_PPC_SUBPAGE_PROT */
724 * The code below is equivalent to this function for arguments
725 * < 2^VSID_BITS, which is all this should ever be called
726 * with. However gcc is not clever enough to compute the
727 * modulus (2^n-1) without a second multiply.
729 #define vsid_scramble(protovsid, size) \
730 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
732 /* simplified form avoiding mod operation */
733 #define vsid_scramble(protovsid, size) \
736 x = (protovsid) * VSID_MULTIPLIER_##size; \
737 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
738 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
742 static inline unsigned long vsid_scramble(unsigned long protovsid
,
743 unsigned long vsid_multiplier
, int vsid_bits
)
746 unsigned long vsid_modulus
= ((1UL << vsid_bits
) - 1);
748 * We have same multipler for both 256 and 1T segements now
750 vsid
= protovsid
* vsid_multiplier
;
751 vsid
= (vsid
>> vsid_bits
) + (vsid
& vsid_modulus
);
752 return (vsid
+ ((vsid
+ 1) >> vsid_bits
)) & vsid_modulus
;
757 /* Returns the segment size indicator for a user address */
758 static inline int user_segment_size(unsigned long addr
)
760 /* Use 1T segments if possible for addresses >= 1T */
761 if (addr
>= (1UL << SID_SHIFT_1T
))
762 return mmu_highuser_ssize
;
763 return MMU_SEGSIZE_256M
;
766 static inline unsigned long get_vsid(unsigned long context
, unsigned long ea
,
769 unsigned long va_bits
= VA_BITS
;
770 unsigned long vsid_bits
;
771 unsigned long protovsid
;
774 * Bad address. We return VSID 0 for that
776 if ((ea
& EA_MASK
) >= H_PGTABLE_RANGE
)
779 if (!mmu_has_feature(MMU_FTR_68_BIT_VA
))
782 if (ssize
== MMU_SEGSIZE_256M
) {
783 vsid_bits
= va_bits
- SID_SHIFT
;
784 protovsid
= (context
<< ESID_BITS
) |
785 ((ea
>> SID_SHIFT
) & ESID_BITS_MASK
);
786 return vsid_scramble(protovsid
, VSID_MULTIPLIER_256M
, vsid_bits
);
789 vsid_bits
= va_bits
- SID_SHIFT_1T
;
790 protovsid
= (context
<< ESID_BITS_1T
) |
791 ((ea
>> SID_SHIFT_1T
) & ESID_BITS_1T_MASK
);
792 return vsid_scramble(protovsid
, VSID_MULTIPLIER_1T
, vsid_bits
);
796 * For kernel space, we use context ids as below
797 * below. Range is 512TB per context.
799 * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
800 * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
801 * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
802 * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
806 * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
807 * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
808 * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
811 static inline unsigned long get_kernel_context(unsigned long ea
)
813 unsigned long region_id
= get_region_id(ea
);
816 * Depending on Kernel config, kernel region can have one context
819 if (region_id
== LINEAR_MAP_REGION_ID
) {
821 * We already verified ea to be not beyond the addr limit.
823 ctx
= 1 + ((ea
& EA_MASK
) >> MAX_EA_BITS_PER_CONTEXT
);
825 ctx
= region_id
+ MAX_KERNEL_CTX_CNT
- 1;
830 * This is only valid for addresses >= PAGE_OFFSET
832 static inline unsigned long get_kernel_vsid(unsigned long ea
, int ssize
)
834 unsigned long context
;
836 if (!is_kernel_addr(ea
))
839 context
= get_kernel_context(ea
);
840 return get_vsid(context
, ea
, ssize
);
843 unsigned htab_shift_for_mem_size(unsigned long mem_size
);
845 #endif /* __ASSEMBLY__ */
847 #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */