1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
9 * Derived from "include/asm-i386/pgtable.h"
12 #ifndef _ASM_S390_PGTABLE_H
13 #define _ASM_S390_PGTABLE_H
15 #include <linux/sched.h>
16 #include <linux/mm_types.h>
17 #include <linux/page-flags.h>
18 #include <linux/radix-tree.h>
19 #include <linux/atomic.h>
23 extern pgd_t swapper_pg_dir
[];
24 extern void paging_init(void);
33 extern atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
35 static inline void update_page_count(int level
, long count
)
37 if (IS_ENABLED(CONFIG_PROC_FS
))
38 atomic_long_add(count
, &direct_pages_count
[level
]);
42 void arch_report_meminfo(struct seq_file
*m
);
45 * The S390 doesn't have any external MMU info: the kernel page
46 * tables contain all the necessary information.
48 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
52 * ZERO_PAGE is a global shared page that is always zero; used
53 * for zero-mapped memory areas etc..
56 extern unsigned long empty_zero_page
;
57 extern unsigned long zero_page_mask
;
59 #define ZERO_PAGE(vaddr) \
60 (virt_to_page((void *)(empty_zero_page + \
61 (((unsigned long)(vaddr)) &zero_page_mask))))
62 #define __HAVE_COLOR_ZERO_PAGE
64 /* TODO: s390 cannot support io_remap_pfn_range... */
66 #define FIRST_USER_ADDRESS 0UL
68 #define pte_ERROR(e) \
69 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
70 #define pmd_ERROR(e) \
71 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
72 #define pud_ERROR(e) \
73 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
74 #define p4d_ERROR(e) \
75 printk("%s:%d: bad p4d %p.\n", __FILE__, __LINE__, (void *) p4d_val(e))
76 #define pgd_ERROR(e) \
77 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
80 * The vmalloc and module area will always be on the topmost area of the
81 * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
82 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
83 * modules will reside. That makes sure that inter module branches always
84 * happen without trampolines and in addition the placement within a 2GB frame
85 * is branch prediction unit friendly.
87 extern unsigned long VMALLOC_START
;
88 extern unsigned long VMALLOC_END
;
89 #define VMALLOC_DEFAULT_SIZE ((128UL << 30) - MODULES_LEN)
90 extern struct page
*vmemmap
;
92 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
94 extern unsigned long MODULES_VADDR
;
95 extern unsigned long MODULES_END
;
96 #define MODULES_VADDR MODULES_VADDR
97 #define MODULES_END MODULES_END
98 #define MODULES_LEN (1UL << 31)
100 static inline int is_module_addr(void *addr
)
102 BUILD_BUG_ON(MODULES_LEN
> (1UL << 31));
103 if (addr
< (void *)MODULES_VADDR
)
105 if (addr
> (void *)MODULES_END
)
111 * A 64 bit pagetable entry of S390 has following format:
113 * 0000000000111111111122222222223333333333444444444455555555556666
114 * 0123456789012345678901234567890123456789012345678901234567890123
116 * I Page-Invalid Bit: Page is not available for address-translation
117 * P Page-Protection Bit: Store access not possible for page
118 * C Change-bit override: HW is not required to set change bit
120 * A 64 bit segmenttable entry of S390 has following format:
121 * | P-table origin | TT
122 * 0000000000111111111122222222223333333333444444444455555555556666
123 * 0123456789012345678901234567890123456789012345678901234567890123
125 * I Segment-Invalid Bit: Segment is not available for address-translation
126 * C Common-Segment Bit: Segment is not private (PoP 3-30)
127 * P Page-Protection Bit: Store access not possible for page
130 * A 64 bit region table entry of S390 has following format:
131 * | S-table origin | TF TTTL
132 * 0000000000111111111122222222223333333333444444444455555555556666
133 * 0123456789012345678901234567890123456789012345678901234567890123
135 * I Segment-Invalid Bit: Segment is not available for address-translation
140 * The 64 bit regiontable origin of S390 has following format:
141 * | region table origon | DTTL
142 * 0000000000111111111122222222223333333333444444444455555555556666
143 * 0123456789012345678901234567890123456789012345678901234567890123
145 * X Space-Switch event:
146 * G Segment-Invalid Bit:
147 * P Private-Space Bit:
148 * S Storage-Alteration:
152 * A storage key has the following format:
156 * F : fetch protection bit
161 /* Hardware bits in the page table entry */
162 #define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
163 #define _PAGE_PROTECT 0x200 /* HW read-only bit */
164 #define _PAGE_INVALID 0x400 /* HW invalid bit */
165 #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
167 /* Software bits in the page table entry */
168 #define _PAGE_PRESENT 0x001 /* SW pte present bit */
169 #define _PAGE_YOUNG 0x004 /* SW pte young bit */
170 #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
171 #define _PAGE_READ 0x010 /* SW pte read bit */
172 #define _PAGE_WRITE 0x020 /* SW pte write bit */
173 #define _PAGE_SPECIAL 0x040 /* SW associated with special page */
174 #define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
176 #ifdef CONFIG_MEM_SOFT_DIRTY
177 #define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
179 #define _PAGE_SOFT_DIRTY 0x000
182 /* Set of bits not changed in pte_modify */
183 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
184 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
187 * handle_pte_fault uses pte_present and pte_none to find out the pte type
188 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
189 * distinguish present from not-present ptes. It is changed only with the page
192 * The following table gives the different possible bit combinations for
193 * the pte hardware and software bits in the last 12 bits of a pte
194 * (. unassigned bit, x don't care, t swap type):
202 * prot-none, clean, old .11.xx0000.1
203 * prot-none, clean, young .11.xx0001.1
204 * prot-none, dirty, old .11.xx0010.1
205 * prot-none, dirty, young .11.xx0011.1
206 * read-only, clean, old .11.xx0100.1
207 * read-only, clean, young .01.xx0101.1
208 * read-only, dirty, old .11.xx0110.1
209 * read-only, dirty, young .01.xx0111.1
210 * read-write, clean, old .11.xx1100.1
211 * read-write, clean, young .01.xx1101.1
212 * read-write, dirty, old .10.xx1110.1
213 * read-write, dirty, young .00.xx1111.1
214 * HW-bits: R read-only, I invalid
215 * SW-bits: p present, y young, d dirty, r read, w write, s special,
218 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
219 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
220 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
223 /* Bits in the segment/region table address-space-control-element */
224 #define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
225 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
226 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
227 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
228 #define _ASCE_REAL_SPACE 0x20 /* real space control */
229 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
230 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
231 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
232 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
233 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
234 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
236 /* Bits in the region table entry */
237 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
238 #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
239 #define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
240 #define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
241 #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
242 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
243 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
244 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
245 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
246 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
248 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
249 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
250 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
251 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
252 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
253 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
255 #define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
256 #define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
257 #define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
258 #define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
259 #define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
260 #define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
262 #ifdef CONFIG_MEM_SOFT_DIRTY
263 #define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
265 #define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
268 #define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
270 /* Bits in the segment table entry */
271 #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
272 #define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
273 #define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
274 #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
275 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
276 #define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
277 #define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
278 #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
279 #define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
281 #define _SEGMENT_ENTRY (0)
282 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
284 #define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
285 #define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
286 #define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
287 #define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
288 #define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
290 #ifdef CONFIG_MEM_SOFT_DIRTY
291 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
293 #define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
296 #define _CRST_ENTRIES 2048 /* number of region/segment table entries */
297 #define _PAGE_ENTRIES 256 /* number of page table entries */
299 #define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
300 #define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
302 #define _REGION1_SHIFT 53
303 #define _REGION2_SHIFT 42
304 #define _REGION3_SHIFT 31
305 #define _SEGMENT_SHIFT 20
307 #define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
308 #define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
309 #define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
310 #define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
311 #define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
313 #define _REGION1_SIZE (1UL << _REGION1_SHIFT)
314 #define _REGION2_SIZE (1UL << _REGION2_SHIFT)
315 #define _REGION3_SIZE (1UL << _REGION3_SHIFT)
316 #define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
318 #define _REGION1_MASK (~(_REGION1_SIZE - 1))
319 #define _REGION2_MASK (~(_REGION2_SIZE - 1))
320 #define _REGION3_MASK (~(_REGION3_SIZE - 1))
321 #define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
323 #define PMD_SHIFT _SEGMENT_SHIFT
324 #define PUD_SHIFT _REGION3_SHIFT
325 #define P4D_SHIFT _REGION2_SHIFT
326 #define PGDIR_SHIFT _REGION1_SHIFT
328 #define PMD_SIZE _SEGMENT_SIZE
329 #define PUD_SIZE _REGION3_SIZE
330 #define P4D_SIZE _REGION2_SIZE
331 #define PGDIR_SIZE _REGION1_SIZE
333 #define PMD_MASK _SEGMENT_MASK
334 #define PUD_MASK _REGION3_MASK
335 #define P4D_MASK _REGION2_MASK
336 #define PGDIR_MASK _REGION1_MASK
338 #define PTRS_PER_PTE _PAGE_ENTRIES
339 #define PTRS_PER_PMD _CRST_ENTRIES
340 #define PTRS_PER_PUD _CRST_ENTRIES
341 #define PTRS_PER_P4D _CRST_ENTRIES
342 #define PTRS_PER_PGD _CRST_ENTRIES
344 #define MAX_PTRS_PER_P4D PTRS_PER_P4D
347 * Segment table and region3 table entry encoding
348 * (R = read-only, I = invalid, y = young bit):
350 * prot-none, clean, old 00..1...1...00
351 * prot-none, clean, young 01..1...1...00
352 * prot-none, dirty, old 10..1...1...00
353 * prot-none, dirty, young 11..1...1...00
354 * read-only, clean, old 00..1...1...01
355 * read-only, clean, young 01..1...0...01
356 * read-only, dirty, old 10..1...1...01
357 * read-only, dirty, young 11..1...0...01
358 * read-write, clean, old 00..1...1...11
359 * read-write, clean, young 01..1...0...11
360 * read-write, dirty, old 10..0...1...11
361 * read-write, dirty, young 11..0...0...11
362 * The segment table origin is used to distinguish empty (origin==0) from
363 * read-write, old segment table entries (origin!=0)
364 * HW-bits: R read-only, I invalid
365 * SW-bits: y young, d dirty, r read, w write
368 /* Page status table bits for virtualization */
369 #define PGSTE_ACC_BITS 0xf000000000000000UL
370 #define PGSTE_FP_BIT 0x0800000000000000UL
371 #define PGSTE_PCL_BIT 0x0080000000000000UL
372 #define PGSTE_HR_BIT 0x0040000000000000UL
373 #define PGSTE_HC_BIT 0x0020000000000000UL
374 #define PGSTE_GR_BIT 0x0004000000000000UL
375 #define PGSTE_GC_BIT 0x0002000000000000UL
376 #define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
377 #define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
378 #define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
380 /* Guest Page State used for virtualization */
381 #define _PGSTE_GPS_ZERO 0x0000000080000000UL
382 #define _PGSTE_GPS_NODAT 0x0000000040000000UL
383 #define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
384 #define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
385 #define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
386 #define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
387 #define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
390 * A user page table pointer has the space-switch-event bit, the
391 * private-space-control bit and the storage-alteration-event-control
392 * bit set. A kernel page table pointer doesn't need them.
394 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
398 * Page protection definitions.
400 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
401 #define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
402 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
403 #define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_INVALID | _PAGE_PROTECT)
405 #define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
406 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
407 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_INVALID | _PAGE_PROTECT)
410 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
411 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
412 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
415 _PAGE_PROTECT | _PAGE_NOEXEC)
416 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
417 _PAGE_YOUNG | _PAGE_DIRTY)
420 * On s390 the page table entry has an invalid bit and a read-only bit.
421 * Read permission implies execute permission and write permission
422 * implies read permission.
425 #define __P000 PAGE_NONE
426 #define __P001 PAGE_RO
427 #define __P010 PAGE_RO
428 #define __P011 PAGE_RO
429 #define __P100 PAGE_RX
430 #define __P101 PAGE_RX
431 #define __P110 PAGE_RX
432 #define __P111 PAGE_RX
434 #define __S000 PAGE_NONE
435 #define __S001 PAGE_RO
436 #define __S010 PAGE_RW
437 #define __S011 PAGE_RW
438 #define __S100 PAGE_RX
439 #define __S101 PAGE_RX
440 #define __S110 PAGE_RWX
441 #define __S111 PAGE_RWX
444 * Segment entry (large page) protection definitions.
446 #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
447 _SEGMENT_ENTRY_PROTECT)
448 #define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
449 _SEGMENT_ENTRY_READ | \
450 _SEGMENT_ENTRY_NOEXEC)
451 #define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
453 #define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
454 _SEGMENT_ENTRY_WRITE | \
455 _SEGMENT_ENTRY_NOEXEC)
456 #define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
457 _SEGMENT_ENTRY_WRITE)
458 #define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
459 _SEGMENT_ENTRY_LARGE | \
460 _SEGMENT_ENTRY_READ | \
461 _SEGMENT_ENTRY_WRITE | \
462 _SEGMENT_ENTRY_YOUNG | \
463 _SEGMENT_ENTRY_DIRTY | \
464 _SEGMENT_ENTRY_NOEXEC)
465 #define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
466 _SEGMENT_ENTRY_LARGE | \
467 _SEGMENT_ENTRY_READ | \
468 _SEGMENT_ENTRY_YOUNG | \
469 _SEGMENT_ENTRY_PROTECT | \
470 _SEGMENT_ENTRY_NOEXEC)
471 #define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
472 _SEGMENT_ENTRY_LARGE | \
473 _SEGMENT_ENTRY_READ | \
474 _SEGMENT_ENTRY_WRITE | \
475 _SEGMENT_ENTRY_YOUNG | \
476 _SEGMENT_ENTRY_DIRTY)
479 * Region3 entry (large page) protection definitions.
482 #define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
483 _REGION3_ENTRY_LARGE | \
484 _REGION3_ENTRY_READ | \
485 _REGION3_ENTRY_WRITE | \
486 _REGION3_ENTRY_YOUNG | \
487 _REGION3_ENTRY_DIRTY | \
488 _REGION_ENTRY_NOEXEC)
489 #define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
490 _REGION3_ENTRY_LARGE | \
491 _REGION3_ENTRY_READ | \
492 _REGION3_ENTRY_YOUNG | \
493 _REGION_ENTRY_PROTECT | \
494 _REGION_ENTRY_NOEXEC)
496 static inline bool mm_p4d_folded(struct mm_struct
*mm
)
498 return mm
->context
.asce_limit
<= _REGION1_SIZE
;
500 #define mm_p4d_folded(mm) mm_p4d_folded(mm)
502 static inline bool mm_pud_folded(struct mm_struct
*mm
)
504 return mm
->context
.asce_limit
<= _REGION2_SIZE
;
506 #define mm_pud_folded(mm) mm_pud_folded(mm)
508 static inline bool mm_pmd_folded(struct mm_struct
*mm
)
510 return mm
->context
.asce_limit
<= _REGION3_SIZE
;
512 #define mm_pmd_folded(mm) mm_pmd_folded(mm)
514 static inline int mm_has_pgste(struct mm_struct
*mm
)
517 if (unlikely(mm
->context
.has_pgste
))
523 static inline int mm_alloc_pgste(struct mm_struct
*mm
)
526 if (unlikely(mm
->context
.alloc_pgste
))
533 * In the case that a guest uses storage keys
534 * faults should no longer be backed by zero pages
536 #define mm_forbids_zeropage mm_has_pgste
537 static inline int mm_uses_skeys(struct mm_struct
*mm
)
540 if (mm
->context
.uses_skeys
)
546 static inline void csp(unsigned int *ptr
, unsigned int old
, unsigned int new)
548 register unsigned long reg2
asm("2") = old
;
549 register unsigned long reg3
asm("3") = new;
550 unsigned long address
= (unsigned long)ptr
| 1;
554 : "+d" (reg2
), "+m" (*ptr
)
555 : "d" (reg3
), "d" (address
)
559 static inline void cspg(unsigned long *ptr
, unsigned long old
, unsigned long new)
561 register unsigned long reg2
asm("2") = old
;
562 register unsigned long reg3
asm("3") = new;
563 unsigned long address
= (unsigned long)ptr
| 1;
566 " .insn rre,0xb98a0000,%0,%3"
567 : "+d" (reg2
), "+m" (*ptr
)
568 : "d" (reg3
), "d" (address
)
572 #define CRDTE_DTT_PAGE 0x00UL
573 #define CRDTE_DTT_SEGMENT 0x10UL
574 #define CRDTE_DTT_REGION3 0x14UL
575 #define CRDTE_DTT_REGION2 0x18UL
576 #define CRDTE_DTT_REGION1 0x1cUL
578 static inline void crdte(unsigned long old
, unsigned long new,
579 unsigned long table
, unsigned long dtt
,
580 unsigned long address
, unsigned long asce
)
582 register unsigned long reg2
asm("2") = old
;
583 register unsigned long reg3
asm("3") = new;
584 register unsigned long reg4
asm("4") = table
| dtt
;
585 register unsigned long reg5
asm("5") = address
;
587 asm volatile(".insn rrf,0xb98f0000,%0,%2,%4,0"
589 : "d" (reg3
), "d" (reg4
), "d" (reg5
), "a" (asce
)
594 * pgd/p4d/pud/pmd/pte query functions
596 static inline int pgd_folded(pgd_t pgd
)
598 return (pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R1
;
601 static inline int pgd_present(pgd_t pgd
)
605 return (pgd_val(pgd
) & _REGION_ENTRY_ORIGIN
) != 0UL;
608 static inline int pgd_none(pgd_t pgd
)
612 return (pgd_val(pgd
) & _REGION_ENTRY_INVALID
) != 0UL;
615 static inline int pgd_bad(pgd_t pgd
)
617 if ((pgd_val(pgd
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R1
)
619 return (pgd_val(pgd
) & ~_REGION_ENTRY_BITS
) != 0;
622 static inline unsigned long pgd_pfn(pgd_t pgd
)
624 unsigned long origin_mask
;
626 origin_mask
= _REGION_ENTRY_ORIGIN
;
627 return (pgd_val(pgd
) & origin_mask
) >> PAGE_SHIFT
;
630 static inline int p4d_folded(p4d_t p4d
)
632 return (p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R2
;
635 static inline int p4d_present(p4d_t p4d
)
639 return (p4d_val(p4d
) & _REGION_ENTRY_ORIGIN
) != 0UL;
642 static inline int p4d_none(p4d_t p4d
)
646 return p4d_val(p4d
) == _REGION2_ENTRY_EMPTY
;
649 static inline unsigned long p4d_pfn(p4d_t p4d
)
651 unsigned long origin_mask
;
653 origin_mask
= _REGION_ENTRY_ORIGIN
;
654 return (p4d_val(p4d
) & origin_mask
) >> PAGE_SHIFT
;
657 static inline int pud_folded(pud_t pud
)
659 return (pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) < _REGION_ENTRY_TYPE_R3
;
662 static inline int pud_present(pud_t pud
)
666 return (pud_val(pud
) & _REGION_ENTRY_ORIGIN
) != 0UL;
669 static inline int pud_none(pud_t pud
)
673 return pud_val(pud
) == _REGION3_ENTRY_EMPTY
;
676 #define pud_leaf pud_large
677 static inline int pud_large(pud_t pud
)
679 if ((pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
) != _REGION_ENTRY_TYPE_R3
)
681 return !!(pud_val(pud
) & _REGION3_ENTRY_LARGE
);
684 static inline unsigned long pud_pfn(pud_t pud
)
686 unsigned long origin_mask
;
688 origin_mask
= _REGION_ENTRY_ORIGIN
;
690 origin_mask
= _REGION3_ENTRY_ORIGIN_LARGE
;
691 return (pud_val(pud
) & origin_mask
) >> PAGE_SHIFT
;
694 #define pmd_leaf pmd_large
695 static inline int pmd_large(pmd_t pmd
)
697 return (pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
) != 0;
700 static inline int pmd_bad(pmd_t pmd
)
702 if ((pmd_val(pmd
) & _SEGMENT_ENTRY_TYPE_MASK
) > 0 || pmd_large(pmd
))
704 return (pmd_val(pmd
) & ~_SEGMENT_ENTRY_BITS
) != 0;
707 static inline int pud_bad(pud_t pud
)
709 unsigned long type
= pud_val(pud
) & _REGION_ENTRY_TYPE_MASK
;
711 if (type
> _REGION_ENTRY_TYPE_R3
|| pud_large(pud
))
713 if (type
< _REGION_ENTRY_TYPE_R3
)
715 return (pud_val(pud
) & ~_REGION_ENTRY_BITS
) != 0;
718 static inline int p4d_bad(p4d_t p4d
)
720 unsigned long type
= p4d_val(p4d
) & _REGION_ENTRY_TYPE_MASK
;
722 if (type
> _REGION_ENTRY_TYPE_R2
)
724 if (type
< _REGION_ENTRY_TYPE_R2
)
726 return (p4d_val(p4d
) & ~_REGION_ENTRY_BITS
) != 0;
729 static inline int pmd_present(pmd_t pmd
)
731 return pmd_val(pmd
) != _SEGMENT_ENTRY_EMPTY
;
734 static inline int pmd_none(pmd_t pmd
)
736 return pmd_val(pmd
) == _SEGMENT_ENTRY_EMPTY
;
739 static inline unsigned long pmd_pfn(pmd_t pmd
)
741 unsigned long origin_mask
;
743 origin_mask
= _SEGMENT_ENTRY_ORIGIN
;
745 origin_mask
= _SEGMENT_ENTRY_ORIGIN_LARGE
;
746 return (pmd_val(pmd
) & origin_mask
) >> PAGE_SHIFT
;
749 #define pmd_write pmd_write
750 static inline int pmd_write(pmd_t pmd
)
752 return (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
) != 0;
755 static inline int pmd_dirty(pmd_t pmd
)
757 return (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
) != 0;
760 static inline int pmd_young(pmd_t pmd
)
762 return (pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
) != 0;
765 static inline int pte_present(pte_t pte
)
767 /* Bit pattern: (pte & 0x001) == 0x001 */
768 return (pte_val(pte
) & _PAGE_PRESENT
) != 0;
771 static inline int pte_none(pte_t pte
)
773 /* Bit pattern: pte == 0x400 */
774 return pte_val(pte
) == _PAGE_INVALID
;
777 static inline int pte_swap(pte_t pte
)
779 /* Bit pattern: (pte & 0x201) == 0x200 */
780 return (pte_val(pte
) & (_PAGE_PROTECT
| _PAGE_PRESENT
))
784 static inline int pte_special(pte_t pte
)
786 return (pte_val(pte
) & _PAGE_SPECIAL
);
789 #define __HAVE_ARCH_PTE_SAME
790 static inline int pte_same(pte_t a
, pte_t b
)
792 return pte_val(a
) == pte_val(b
);
795 #ifdef CONFIG_NUMA_BALANCING
796 static inline int pte_protnone(pte_t pte
)
798 return pte_present(pte
) && !(pte_val(pte
) & _PAGE_READ
);
801 static inline int pmd_protnone(pmd_t pmd
)
803 /* pmd_large(pmd) implies pmd_present(pmd) */
804 return pmd_large(pmd
) && !(pmd_val(pmd
) & _SEGMENT_ENTRY_READ
);
808 static inline int pte_soft_dirty(pte_t pte
)
810 return pte_val(pte
) & _PAGE_SOFT_DIRTY
;
812 #define pte_swp_soft_dirty pte_soft_dirty
814 static inline pte_t
pte_mksoft_dirty(pte_t pte
)
816 pte_val(pte
) |= _PAGE_SOFT_DIRTY
;
819 #define pte_swp_mksoft_dirty pte_mksoft_dirty
821 static inline pte_t
pte_clear_soft_dirty(pte_t pte
)
823 pte_val(pte
) &= ~_PAGE_SOFT_DIRTY
;
826 #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
828 static inline int pmd_soft_dirty(pmd_t pmd
)
830 return pmd_val(pmd
) & _SEGMENT_ENTRY_SOFT_DIRTY
;
833 static inline pmd_t
pmd_mksoft_dirty(pmd_t pmd
)
835 pmd_val(pmd
) |= _SEGMENT_ENTRY_SOFT_DIRTY
;
839 static inline pmd_t
pmd_clear_soft_dirty(pmd_t pmd
)
841 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_SOFT_DIRTY
;
846 * query functions pte_write/pte_dirty/pte_young only work if
847 * pte_present() is true. Undefined behaviour if not..
849 static inline int pte_write(pte_t pte
)
851 return (pte_val(pte
) & _PAGE_WRITE
) != 0;
854 static inline int pte_dirty(pte_t pte
)
856 return (pte_val(pte
) & _PAGE_DIRTY
) != 0;
859 static inline int pte_young(pte_t pte
)
861 return (pte_val(pte
) & _PAGE_YOUNG
) != 0;
864 #define __HAVE_ARCH_PTE_UNUSED
865 static inline int pte_unused(pte_t pte
)
867 return pte_val(pte
) & _PAGE_UNUSED
;
871 * pgd/pmd/pte modification functions
874 static inline void pgd_clear(pgd_t
*pgd
)
876 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R1
)
877 pgd_val(*pgd
) = _REGION1_ENTRY_EMPTY
;
880 static inline void p4d_clear(p4d_t
*p4d
)
882 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R2
)
883 p4d_val(*p4d
) = _REGION2_ENTRY_EMPTY
;
886 static inline void pud_clear(pud_t
*pud
)
888 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) == _REGION_ENTRY_TYPE_R3
)
889 pud_val(*pud
) = _REGION3_ENTRY_EMPTY
;
892 static inline void pmd_clear(pmd_t
*pmdp
)
894 pmd_val(*pmdp
) = _SEGMENT_ENTRY_EMPTY
;
897 static inline void pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
899 pte_val(*ptep
) = _PAGE_INVALID
;
903 * The following pte modification functions only work if
904 * pte_present() is true. Undefined behaviour if not..
906 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
908 pte_val(pte
) &= _PAGE_CHG_MASK
;
909 pte_val(pte
) |= pgprot_val(newprot
);
911 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
912 * has the invalid bit set, clear it again for readable, young pages
914 if ((pte_val(pte
) & _PAGE_YOUNG
) && (pte_val(pte
) & _PAGE_READ
))
915 pte_val(pte
) &= ~_PAGE_INVALID
;
917 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
918 * protection bit set, clear it again for writable, dirty pages
920 if ((pte_val(pte
) & _PAGE_DIRTY
) && (pte_val(pte
) & _PAGE_WRITE
))
921 pte_val(pte
) &= ~_PAGE_PROTECT
;
925 static inline pte_t
pte_wrprotect(pte_t pte
)
927 pte_val(pte
) &= ~_PAGE_WRITE
;
928 pte_val(pte
) |= _PAGE_PROTECT
;
932 static inline pte_t
pte_mkwrite(pte_t pte
)
934 pte_val(pte
) |= _PAGE_WRITE
;
935 if (pte_val(pte
) & _PAGE_DIRTY
)
936 pte_val(pte
) &= ~_PAGE_PROTECT
;
940 static inline pte_t
pte_mkclean(pte_t pte
)
942 pte_val(pte
) &= ~_PAGE_DIRTY
;
943 pte_val(pte
) |= _PAGE_PROTECT
;
947 static inline pte_t
pte_mkdirty(pte_t pte
)
949 pte_val(pte
) |= _PAGE_DIRTY
| _PAGE_SOFT_DIRTY
;
950 if (pte_val(pte
) & _PAGE_WRITE
)
951 pte_val(pte
) &= ~_PAGE_PROTECT
;
955 static inline pte_t
pte_mkold(pte_t pte
)
957 pte_val(pte
) &= ~_PAGE_YOUNG
;
958 pte_val(pte
) |= _PAGE_INVALID
;
962 static inline pte_t
pte_mkyoung(pte_t pte
)
964 pte_val(pte
) |= _PAGE_YOUNG
;
965 if (pte_val(pte
) & _PAGE_READ
)
966 pte_val(pte
) &= ~_PAGE_INVALID
;
970 static inline pte_t
pte_mkspecial(pte_t pte
)
972 pte_val(pte
) |= _PAGE_SPECIAL
;
976 #ifdef CONFIG_HUGETLB_PAGE
977 static inline pte_t
pte_mkhuge(pte_t pte
)
979 pte_val(pte
) |= _PAGE_LARGE
;
984 #define IPTE_GLOBAL 0
987 #define IPTE_NODAT 0x400
988 #define IPTE_GUEST_ASCE 0x800
990 static __always_inline
void __ptep_ipte(unsigned long address
, pte_t
*ptep
,
991 unsigned long opt
, unsigned long asce
,
994 unsigned long pto
= (unsigned long) ptep
;
996 if (__builtin_constant_p(opt
) && opt
== 0) {
997 /* Invalidation + TLB flush for the pte */
999 " .insn rrf,0xb2210000,%[r1],%[r2],0,%[m4]"
1000 : "+m" (*ptep
) : [r1
] "a" (pto
), [r2
] "a" (address
),
1005 /* Invalidate ptes with options + TLB flush of the ptes */
1006 opt
= opt
| (asce
& _ASCE_ORIGIN
);
1008 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1009 : [r2
] "+a" (address
), [r3
] "+a" (opt
)
1010 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
1013 static __always_inline
void __ptep_ipte_range(unsigned long address
, int nr
,
1014 pte_t
*ptep
, int local
)
1016 unsigned long pto
= (unsigned long) ptep
;
1018 /* Invalidate a range of ptes + TLB flush of the ptes */
1021 " .insn rrf,0xb2210000,%[r1],%[r2],%[r3],%[m4]"
1022 : [r2
] "+a" (address
), [r3
] "+a" (nr
)
1023 : [r1
] "a" (pto
), [m4
] "i" (local
) : "memory");
1024 } while (nr
!= 255);
1028 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1029 * both clear the TLB for the unmapped pte. The reason is that
1030 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1031 * to modify an active pte. The sequence is
1032 * 1) ptep_get_and_clear
1034 * 3) flush_tlb_range
1035 * On s390 the tlb needs to get flushed with the modification of the pte
1036 * if the pte is active. The only way how this can be implemented is to
1037 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1040 pte_t
ptep_xchg_direct(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1041 pte_t
ptep_xchg_lazy(struct mm_struct
*, unsigned long, pte_t
*, pte_t
);
1043 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1044 static inline int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
1045 unsigned long addr
, pte_t
*ptep
)
1049 pte
= ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, pte_mkold(pte
));
1050 return pte_young(pte
);
1053 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1054 static inline int ptep_clear_flush_young(struct vm_area_struct
*vma
,
1055 unsigned long address
, pte_t
*ptep
)
1057 return ptep_test_and_clear_young(vma
, address
, ptep
);
1060 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1061 static inline pte_t
ptep_get_and_clear(struct mm_struct
*mm
,
1062 unsigned long addr
, pte_t
*ptep
)
1064 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1067 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1068 pte_t
ptep_modify_prot_start(struct vm_area_struct
*, unsigned long, pte_t
*);
1069 void ptep_modify_prot_commit(struct vm_area_struct
*, unsigned long,
1070 pte_t
*, pte_t
, pte_t
);
1072 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1073 static inline pte_t
ptep_clear_flush(struct vm_area_struct
*vma
,
1074 unsigned long addr
, pte_t
*ptep
)
1076 return ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1080 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1081 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1082 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1083 * cannot be accessed while the batched unmap is running. In this case
1084 * full==1 and a simple pte_clear is enough. See tlb.h.
1086 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1087 static inline pte_t
ptep_get_and_clear_full(struct mm_struct
*mm
,
1089 pte_t
*ptep
, int full
)
1093 *ptep
= __pte(_PAGE_INVALID
);
1096 return ptep_xchg_lazy(mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
1099 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1100 static inline void ptep_set_wrprotect(struct mm_struct
*mm
,
1101 unsigned long addr
, pte_t
*ptep
)
1106 ptep_xchg_lazy(mm
, addr
, ptep
, pte_wrprotect(pte
));
1109 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1110 static inline int ptep_set_access_flags(struct vm_area_struct
*vma
,
1111 unsigned long addr
, pte_t
*ptep
,
1112 pte_t entry
, int dirty
)
1114 if (pte_same(*ptep
, entry
))
1116 ptep_xchg_direct(vma
->vm_mm
, addr
, ptep
, entry
);
1121 * Additional functions to handle KVM guest page tables
1123 void ptep_set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1124 pte_t
*ptep
, pte_t entry
);
1125 void ptep_set_notify(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1126 void ptep_notify(struct mm_struct
*mm
, unsigned long addr
,
1127 pte_t
*ptep
, unsigned long bits
);
1128 int ptep_force_prot(struct mm_struct
*mm
, unsigned long gaddr
,
1129 pte_t
*ptep
, int prot
, unsigned long bit
);
1130 void ptep_zap_unused(struct mm_struct
*mm
, unsigned long addr
,
1131 pte_t
*ptep
, int reset
);
1132 void ptep_zap_key(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
);
1133 int ptep_shadow_pte(struct mm_struct
*mm
, unsigned long saddr
,
1134 pte_t
*sptep
, pte_t
*tptep
, pte_t pte
);
1135 void ptep_unshadow_pte(struct mm_struct
*mm
, unsigned long saddr
, pte_t
*ptep
);
1137 bool ptep_test_and_clear_uc(struct mm_struct
*mm
, unsigned long address
,
1139 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1140 unsigned char key
, bool nq
);
1141 int cond_set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1142 unsigned char key
, unsigned char *oldkey
,
1143 bool nq
, bool mr
, bool mc
);
1144 int reset_guest_reference_bit(struct mm_struct
*mm
, unsigned long addr
);
1145 int get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
1146 unsigned char *key
);
1148 int set_pgste_bits(struct mm_struct
*mm
, unsigned long addr
,
1149 unsigned long bits
, unsigned long value
);
1150 int get_pgste(struct mm_struct
*mm
, unsigned long hva
, unsigned long *pgstep
);
1151 int pgste_perform_essa(struct mm_struct
*mm
, unsigned long hva
, int orc
,
1152 unsigned long *oldpte
, unsigned long *oldpgste
);
1153 void gmap_pmdp_csp(struct mm_struct
*mm
, unsigned long vmaddr
);
1154 void gmap_pmdp_invalidate(struct mm_struct
*mm
, unsigned long vmaddr
);
1155 void gmap_pmdp_idte_local(struct mm_struct
*mm
, unsigned long vmaddr
);
1156 void gmap_pmdp_idte_global(struct mm_struct
*mm
, unsigned long vmaddr
);
1159 * Certain architectures need to do special things when PTEs
1160 * within a page table are directly modified. Thus, the following
1161 * hook is made available.
1163 static inline void set_pte_at(struct mm_struct
*mm
, unsigned long addr
,
1164 pte_t
*ptep
, pte_t entry
)
1166 if (pte_present(entry
))
1167 pte_val(entry
) &= ~_PAGE_UNUSED
;
1168 if (mm_has_pgste(mm
))
1169 ptep_set_pte_at(mm
, addr
, ptep
, entry
);
1175 * Conversion functions: convert a page and protection to a page entry,
1176 * and a page entry and page directory to the page they refer to.
1178 static inline pte_t
mk_pte_phys(unsigned long physpage
, pgprot_t pgprot
)
1181 pte_val(__pte
) = physpage
+ pgprot_val(pgprot
);
1182 if (!MACHINE_HAS_NX
)
1183 pte_val(__pte
) &= ~_PAGE_NOEXEC
;
1184 return pte_mkyoung(__pte
);
1187 static inline pte_t
mk_pte(struct page
*page
, pgprot_t pgprot
)
1189 unsigned long physpage
= page_to_phys(page
);
1190 pte_t __pte
= mk_pte_phys(physpage
, pgprot
);
1192 if (pte_write(__pte
) && PageDirty(page
))
1193 __pte
= pte_mkdirty(__pte
);
1197 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1198 #define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1199 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1200 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1201 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1203 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1204 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1205 #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN)
1206 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1209 * The pgd_offset function *always* adds the index for the top-level
1210 * region/segment table. This is done to get a sequence like the
1211 * following to work:
1212 * pgdp = pgd_offset(current->mm, addr);
1213 * pgd = READ_ONCE(*pgdp);
1214 * p4dp = p4d_offset(&pgd, addr);
1216 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1217 * only add an index if they dereferenced the pointer.
1219 static inline pgd_t
*pgd_offset_raw(pgd_t
*pgd
, unsigned long address
)
1224 /* Get the first entry of the top level table */
1225 rste
= pgd_val(*pgd
);
1226 /* Pick up the shift from the table type of the first entry */
1227 shift
= ((rste
& _REGION_ENTRY_TYPE_MASK
) >> 2) * 11 + 20;
1228 return pgd
+ ((address
>> shift
) & (PTRS_PER_PGD
- 1));
1231 #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1232 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1234 static inline p4d_t
*p4d_offset(pgd_t
*pgd
, unsigned long address
)
1236 if ((pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) >= _REGION_ENTRY_TYPE_R1
)
1237 return (p4d_t
*) pgd_deref(*pgd
) + p4d_index(address
);
1238 return (p4d_t
*) pgd
;
1241 static inline pud_t
*pud_offset(p4d_t
*p4d
, unsigned long address
)
1243 if ((p4d_val(*p4d
) & _REGION_ENTRY_TYPE_MASK
) >= _REGION_ENTRY_TYPE_R2
)
1244 return (pud_t
*) p4d_deref(*p4d
) + pud_index(address
);
1245 return (pud_t
*) p4d
;
1248 static inline pmd_t
*pmd_offset(pud_t
*pud
, unsigned long address
)
1250 if ((pud_val(*pud
) & _REGION_ENTRY_TYPE_MASK
) >= _REGION_ENTRY_TYPE_R3
)
1251 return (pmd_t
*) pud_deref(*pud
) + pmd_index(address
);
1252 return (pmd_t
*) pud
;
1255 static inline pte_t
*pte_offset(pmd_t
*pmd
, unsigned long address
)
1257 return (pte_t
*) pmd_deref(*pmd
) + pte_index(address
);
1260 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
1261 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1263 static inline void pte_unmap(pte_t
*pte
) { }
1265 static inline bool gup_fast_permitted(unsigned long start
, unsigned long end
)
1267 return end
<= current
->mm
->context
.asce_limit
;
1269 #define gup_fast_permitted gup_fast_permitted
1271 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1272 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1273 #define pte_page(x) pfn_to_page(pte_pfn(x))
1275 #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1276 #define pud_page(pud) pfn_to_page(pud_pfn(pud))
1277 #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1278 #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1280 static inline pmd_t
pmd_wrprotect(pmd_t pmd
)
1282 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_WRITE
;
1283 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1287 static inline pmd_t
pmd_mkwrite(pmd_t pmd
)
1289 pmd_val(pmd
) |= _SEGMENT_ENTRY_WRITE
;
1290 if (pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
)
1291 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1295 static inline pmd_t
pmd_mkclean(pmd_t pmd
)
1297 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_DIRTY
;
1298 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1302 static inline pmd_t
pmd_mkdirty(pmd_t pmd
)
1304 pmd_val(pmd
) |= _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_SOFT_DIRTY
;
1305 if (pmd_val(pmd
) & _SEGMENT_ENTRY_WRITE
)
1306 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_PROTECT
;
1310 static inline pud_t
pud_wrprotect(pud_t pud
)
1312 pud_val(pud
) &= ~_REGION3_ENTRY_WRITE
;
1313 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1317 static inline pud_t
pud_mkwrite(pud_t pud
)
1319 pud_val(pud
) |= _REGION3_ENTRY_WRITE
;
1320 if (pud_val(pud
) & _REGION3_ENTRY_DIRTY
)
1321 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1325 static inline pud_t
pud_mkclean(pud_t pud
)
1327 pud_val(pud
) &= ~_REGION3_ENTRY_DIRTY
;
1328 pud_val(pud
) |= _REGION_ENTRY_PROTECT
;
1332 static inline pud_t
pud_mkdirty(pud_t pud
)
1334 pud_val(pud
) |= _REGION3_ENTRY_DIRTY
| _REGION3_ENTRY_SOFT_DIRTY
;
1335 if (pud_val(pud
) & _REGION3_ENTRY_WRITE
)
1336 pud_val(pud
) &= ~_REGION_ENTRY_PROTECT
;
1340 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1341 static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot
)
1344 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1345 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1347 if (pgprot_val(pgprot
) == pgprot_val(PAGE_NONE
))
1348 return pgprot_val(SEGMENT_NONE
);
1349 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RO
))
1350 return pgprot_val(SEGMENT_RO
);
1351 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RX
))
1352 return pgprot_val(SEGMENT_RX
);
1353 if (pgprot_val(pgprot
) == pgprot_val(PAGE_RW
))
1354 return pgprot_val(SEGMENT_RW
);
1355 return pgprot_val(SEGMENT_RWX
);
1358 static inline pmd_t
pmd_mkyoung(pmd_t pmd
)
1360 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1361 if (pmd_val(pmd
) & _SEGMENT_ENTRY_READ
)
1362 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_INVALID
;
1366 static inline pmd_t
pmd_mkold(pmd_t pmd
)
1368 pmd_val(pmd
) &= ~_SEGMENT_ENTRY_YOUNG
;
1369 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1373 static inline pmd_t
pmd_modify(pmd_t pmd
, pgprot_t newprot
)
1375 pmd_val(pmd
) &= _SEGMENT_ENTRY_ORIGIN_LARGE
|
1376 _SEGMENT_ENTRY_DIRTY
| _SEGMENT_ENTRY_YOUNG
|
1377 _SEGMENT_ENTRY_LARGE
| _SEGMENT_ENTRY_SOFT_DIRTY
;
1378 pmd_val(pmd
) |= massage_pgprot_pmd(newprot
);
1379 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_DIRTY
))
1380 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1381 if (!(pmd_val(pmd
) & _SEGMENT_ENTRY_YOUNG
))
1382 pmd_val(pmd
) |= _SEGMENT_ENTRY_INVALID
;
1386 static inline pmd_t
mk_pmd_phys(unsigned long physpage
, pgprot_t pgprot
)
1389 pmd_val(__pmd
) = physpage
+ massage_pgprot_pmd(pgprot
);
1393 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1395 static inline void __pmdp_csp(pmd_t
*pmdp
)
1397 csp((unsigned int *)pmdp
+ 1, pmd_val(*pmdp
),
1398 pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1401 #define IDTE_GLOBAL 0
1402 #define IDTE_LOCAL 1
1404 #define IDTE_PTOA 0x0800
1405 #define IDTE_NODAT 0x1000
1406 #define IDTE_GUEST_ASCE 0x2000
1408 static __always_inline
void __pmdp_idte(unsigned long addr
, pmd_t
*pmdp
,
1409 unsigned long opt
, unsigned long asce
,
1414 sto
= (unsigned long) pmdp
- pmd_index(addr
) * sizeof(pmd_t
);
1415 if (__builtin_constant_p(opt
) && opt
== 0) {
1416 /* flush without guest asce */
1418 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1420 : [r1
] "a" (sto
), [r2
] "a" ((addr
& HPAGE_MASK
)),
1424 /* flush with guest asce */
1426 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1428 : [r1
] "a" (sto
), [r2
] "a" ((addr
& HPAGE_MASK
) | opt
),
1429 [r3
] "a" (asce
), [m4
] "i" (local
)
1434 static __always_inline
void __pudp_idte(unsigned long addr
, pud_t
*pudp
,
1435 unsigned long opt
, unsigned long asce
,
1440 r3o
= (unsigned long) pudp
- pud_index(addr
) * sizeof(pud_t
);
1441 r3o
|= _ASCE_TYPE_REGION3
;
1442 if (__builtin_constant_p(opt
) && opt
== 0) {
1443 /* flush without guest asce */
1445 " .insn rrf,0xb98e0000,%[r1],%[r2],0,%[m4]"
1447 : [r1
] "a" (r3o
), [r2
] "a" ((addr
& PUD_MASK
)),
1451 /* flush with guest asce */
1453 " .insn rrf,0xb98e0000,%[r1],%[r2],%[r3],%[m4]"
1455 : [r1
] "a" (r3o
), [r2
] "a" ((addr
& PUD_MASK
) | opt
),
1456 [r3
] "a" (asce
), [m4
] "i" (local
)
1461 pmd_t
pmdp_xchg_direct(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1462 pmd_t
pmdp_xchg_lazy(struct mm_struct
*, unsigned long, pmd_t
*, pmd_t
);
1463 pud_t
pudp_xchg_direct(struct mm_struct
*, unsigned long, pud_t
*, pud_t
);
1465 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1467 #define __HAVE_ARCH_PGTABLE_DEPOSIT
1468 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1471 #define __HAVE_ARCH_PGTABLE_WITHDRAW
1472 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
);
1474 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1475 static inline int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1476 unsigned long addr
, pmd_t
*pmdp
,
1477 pmd_t entry
, int dirty
)
1479 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1481 entry
= pmd_mkyoung(entry
);
1483 entry
= pmd_mkdirty(entry
);
1484 if (pmd_val(*pmdp
) == pmd_val(entry
))
1486 pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, entry
);
1490 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1491 static inline int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
1492 unsigned long addr
, pmd_t
*pmdp
)
1496 pmd
= pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd_mkold(pmd
));
1497 return pmd_young(pmd
);
1500 #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1501 static inline int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
1502 unsigned long addr
, pmd_t
*pmdp
)
1504 VM_BUG_ON(addr
& ~HPAGE_MASK
);
1505 return pmdp_test_and_clear_young(vma
, addr
, pmdp
);
1508 static inline void set_pmd_at(struct mm_struct
*mm
, unsigned long addr
,
1509 pmd_t
*pmdp
, pmd_t entry
)
1511 if (!MACHINE_HAS_NX
)
1512 pmd_val(entry
) &= ~_SEGMENT_ENTRY_NOEXEC
;
1516 static inline pmd_t
pmd_mkhuge(pmd_t pmd
)
1518 pmd_val(pmd
) |= _SEGMENT_ENTRY_LARGE
;
1519 pmd_val(pmd
) |= _SEGMENT_ENTRY_YOUNG
;
1520 pmd_val(pmd
) |= _SEGMENT_ENTRY_PROTECT
;
1524 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1525 static inline pmd_t
pmdp_huge_get_and_clear(struct mm_struct
*mm
,
1526 unsigned long addr
, pmd_t
*pmdp
)
1528 return pmdp_xchg_direct(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1531 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1532 static inline pmd_t
pmdp_huge_get_and_clear_full(struct mm_struct
*mm
,
1534 pmd_t
*pmdp
, int full
)
1538 *pmdp
= __pmd(_SEGMENT_ENTRY_EMPTY
);
1541 return pmdp_xchg_lazy(mm
, addr
, pmdp
, __pmd(_SEGMENT_ENTRY_EMPTY
));
1544 #define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1545 static inline pmd_t
pmdp_huge_clear_flush(struct vm_area_struct
*vma
,
1546 unsigned long addr
, pmd_t
*pmdp
)
1548 return pmdp_huge_get_and_clear(vma
->vm_mm
, addr
, pmdp
);
1551 #define __HAVE_ARCH_PMDP_INVALIDATE
1552 static inline pmd_t
pmdp_invalidate(struct vm_area_struct
*vma
,
1553 unsigned long addr
, pmd_t
*pmdp
)
1555 pmd_t pmd
= __pmd(pmd_val(*pmdp
) | _SEGMENT_ENTRY_INVALID
);
1557 return pmdp_xchg_direct(vma
->vm_mm
, addr
, pmdp
, pmd
);
1560 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
1561 static inline void pmdp_set_wrprotect(struct mm_struct
*mm
,
1562 unsigned long addr
, pmd_t
*pmdp
)
1567 pmd
= pmdp_xchg_lazy(mm
, addr
, pmdp
, pmd_wrprotect(pmd
));
1570 static inline pmd_t
pmdp_collapse_flush(struct vm_area_struct
*vma
,
1571 unsigned long address
,
1574 return pmdp_huge_get_and_clear(vma
->vm_mm
, address
, pmdp
);
1576 #define pmdp_collapse_flush pmdp_collapse_flush
1578 #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1579 #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1581 static inline int pmd_trans_huge(pmd_t pmd
)
1583 return pmd_val(pmd
) & _SEGMENT_ENTRY_LARGE
;
1586 #define has_transparent_hugepage has_transparent_hugepage
1587 static inline int has_transparent_hugepage(void)
1589 return MACHINE_HAS_EDAT1
? 1 : 0;
1591 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1594 * 64 bit swap entry format:
1595 * A page-table entry has some bits we have to treat in a special way.
1596 * Bits 52 and bit 55 have to be zero, otherwise a specification
1597 * exception will occur instead of a page translation exception. The
1598 * specification exception has the bad habit not to store necessary
1599 * information in the lowcore.
1600 * Bits 54 and 63 are used to indicate the page type.
1601 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1602 * This leaves the bits 0-51 and bits 56-62 to store type and offset.
1603 * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
1605 * | offset |01100|type |00|
1606 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1607 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1610 #define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1611 #define __SWP_OFFSET_SHIFT 12
1612 #define __SWP_TYPE_MASK ((1UL << 5) - 1)
1613 #define __SWP_TYPE_SHIFT 2
1615 static inline pte_t
mk_swap_pte(unsigned long type
, unsigned long offset
)
1619 pte_val(pte
) = _PAGE_INVALID
| _PAGE_PROTECT
;
1620 pte_val(pte
) |= (offset
& __SWP_OFFSET_MASK
) << __SWP_OFFSET_SHIFT
;
1621 pte_val(pte
) |= (type
& __SWP_TYPE_MASK
) << __SWP_TYPE_SHIFT
;
1625 static inline unsigned long __swp_type(swp_entry_t entry
)
1627 return (entry
.val
>> __SWP_TYPE_SHIFT
) & __SWP_TYPE_MASK
;
1630 static inline unsigned long __swp_offset(swp_entry_t entry
)
1632 return (entry
.val
>> __SWP_OFFSET_SHIFT
) & __SWP_OFFSET_MASK
;
1635 static inline swp_entry_t
__swp_entry(unsigned long type
, unsigned long offset
)
1637 return (swp_entry_t
) { pte_val(mk_swap_pte(type
, offset
)) };
1640 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1641 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1643 #define kern_addr_valid(addr) (1)
1645 extern int vmem_add_mapping(unsigned long start
, unsigned long size
);
1646 extern int vmem_remove_mapping(unsigned long start
, unsigned long size
);
1647 extern int s390_enable_sie(void);
1648 extern int s390_enable_skey(void);
1649 extern void s390_reset_cmma(struct mm_struct
*mm
);
1651 /* s390 has a private copy of get unmapped area to deal with cache synonyms */
1652 #define HAVE_ARCH_UNMAPPED_AREA
1653 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1655 #include <asm-generic/pgtable.h>
1657 #endif /* _S390_PAGE_H */