spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / s390 / include / asm / pgtable.h
blob011358c1b18e0d1145874eaf5929df48e89ed435
1 /*
2 * include/asm-s390/pgtable.h
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Ulrich Weigand (weigand@de.ibm.com)
8 * Martin Schwidefsky (schwidefsky@de.ibm.com)
10 * Derived from "include/asm-i386/pgtable.h"
13 #ifndef _ASM_S390_PGTABLE_H
14 #define _ASM_S390_PGTABLE_H
17 * The Linux memory management assumes a three-level page table setup. For
18 * s390 31 bit we "fold" the mid level into the top-level page table, so
19 * that we physically have the same two-level page table as the s390 mmu
20 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
21 * the hardware provides (region first and region second tables are not
22 * used).
24 * The "pgd_xxx()" functions are trivial for a folded two-level
25 * setup: the pgd is never bad, and a pmd always exists (as it's folded
26 * into the pgd entry)
28 * This file contains the functions and defines necessary to modify and use
29 * the S390 page table tree.
31 #ifndef __ASSEMBLY__
32 #include <linux/sched.h>
33 #include <linux/mm_types.h>
34 #include <asm/bug.h>
35 #include <asm/page.h>
37 extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
38 extern void paging_init(void);
39 extern void vmem_map_init(void);
40 extern void fault_init(void);
43 * The S390 doesn't have any external MMU info: the kernel page
44 * tables contain all the necessary information.
46 #define update_mmu_cache(vma, address, ptep) do { } while (0)
49 * ZERO_PAGE is a global shared page that is always zero; used
50 * for zero-mapped memory areas etc..
53 extern unsigned long empty_zero_page;
54 extern unsigned long zero_page_mask;
56 #define ZERO_PAGE(vaddr) \
57 (virt_to_page((void *)(empty_zero_page + \
58 (((unsigned long)(vaddr)) &zero_page_mask))))
60 #define is_zero_pfn is_zero_pfn
61 static inline int is_zero_pfn(unsigned long pfn)
63 extern unsigned long zero_pfn;
64 unsigned long offset_from_zero_pfn = pfn - zero_pfn;
65 return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
68 #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
70 #endif /* !__ASSEMBLY__ */
73 * PMD_SHIFT determines the size of the area a second-level page
74 * table can map
75 * PGDIR_SHIFT determines what a third-level page table entry can map
77 #ifndef __s390x__
78 # define PMD_SHIFT 20
79 # define PUD_SHIFT 20
80 # define PGDIR_SHIFT 20
81 #else /* __s390x__ */
82 # define PMD_SHIFT 20
83 # define PUD_SHIFT 31
84 # define PGDIR_SHIFT 42
85 #endif /* __s390x__ */
87 #define PMD_SIZE (1UL << PMD_SHIFT)
88 #define PMD_MASK (~(PMD_SIZE-1))
89 #define PUD_SIZE (1UL << PUD_SHIFT)
90 #define PUD_MASK (~(PUD_SIZE-1))
91 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
92 #define PGDIR_MASK (~(PGDIR_SIZE-1))
95 * entries per page directory level: the S390 is two-level, so
96 * we don't really have any PMD directory physically.
97 * for S390 segment-table entries are combined to one PGD
98 * that leads to 1024 pte per pgd
100 #define PTRS_PER_PTE 256
101 #ifndef __s390x__
102 #define PTRS_PER_PMD 1
103 #define PTRS_PER_PUD 1
104 #else /* __s390x__ */
105 #define PTRS_PER_PMD 2048
106 #define PTRS_PER_PUD 2048
107 #endif /* __s390x__ */
108 #define PTRS_PER_PGD 2048
110 #define FIRST_USER_ADDRESS 0
112 #define pte_ERROR(e) \
113 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
114 #define pmd_ERROR(e) \
115 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
116 #define pud_ERROR(e) \
117 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
118 #define pgd_ERROR(e) \
119 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
121 #ifndef __ASSEMBLY__
123 * The vmalloc area will always be on the topmost area of the kernel
124 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
125 * which should be enough for any sane case.
126 * By putting vmalloc at the top, we maximise the gap between physical
127 * memory and vmalloc to catch misplaced memory accesses. As a side
128 * effect, this also makes sure that 64 bit module code cannot be used
129 * as system call address.
131 extern unsigned long VMALLOC_START;
132 extern unsigned long VMALLOC_END;
133 extern struct page *vmemmap;
135 #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
138 * A 31 bit pagetable entry of S390 has following format:
139 * | PFRA | | OS |
140 * 0 0IP0
141 * 00000000001111111111222222222233
142 * 01234567890123456789012345678901
144 * I Page-Invalid Bit: Page is not available for address-translation
145 * P Page-Protection Bit: Store access not possible for page
147 * A 31 bit segmenttable entry of S390 has following format:
148 * | P-table origin | |PTL
149 * 0 IC
150 * 00000000001111111111222222222233
151 * 01234567890123456789012345678901
153 * I Segment-Invalid Bit: Segment is not available for address-translation
154 * C Common-Segment Bit: Segment is not private (PoP 3-30)
155 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
157 * The 31 bit segmenttable origin of S390 has following format:
159 * |S-table origin | | STL |
160 * X **GPS
161 * 00000000001111111111222222222233
162 * 01234567890123456789012345678901
164 * X Space-Switch event:
165 * G Segment-Invalid Bit: *
166 * P Private-Space Bit: Segment is not private (PoP 3-30)
167 * S Storage-Alteration:
168 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
170 * A 64 bit pagetable entry of S390 has following format:
171 * | PFRA |0IPC| OS |
172 * 0000000000111111111122222222223333333333444444444455555555556666
173 * 0123456789012345678901234567890123456789012345678901234567890123
175 * I Page-Invalid Bit: Page is not available for address-translation
176 * P Page-Protection Bit: Store access not possible for page
177 * C Change-bit override: HW is not required to set change bit
179 * A 64 bit segmenttable entry of S390 has following format:
180 * | P-table origin | TT
181 * 0000000000111111111122222222223333333333444444444455555555556666
182 * 0123456789012345678901234567890123456789012345678901234567890123
184 * I Segment-Invalid Bit: Segment is not available for address-translation
185 * C Common-Segment Bit: Segment is not private (PoP 3-30)
186 * P Page-Protection Bit: Store access not possible for page
187 * TT Type 00
189 * A 64 bit region table entry of S390 has following format:
190 * | S-table origin | TF TTTL
191 * 0000000000111111111122222222223333333333444444444455555555556666
192 * 0123456789012345678901234567890123456789012345678901234567890123
194 * I Segment-Invalid Bit: Segment is not available for address-translation
195 * TT Type 01
196 * TF
197 * TL Table length
199 * The 64 bit regiontable origin of S390 has following format:
200 * | region table origon | DTTL
201 * 0000000000111111111122222222223333333333444444444455555555556666
202 * 0123456789012345678901234567890123456789012345678901234567890123
204 * X Space-Switch event:
205 * G Segment-Invalid Bit:
206 * P Private-Space Bit:
207 * S Storage-Alteration:
208 * R Real space
209 * TL Table-Length:
211 * A storage key has the following format:
212 * | ACC |F|R|C|0|
213 * 0 3 4 5 6 7
214 * ACC: access key
215 * F : fetch protection bit
216 * R : referenced bit
217 * C : changed bit
220 /* Hardware bits in the page table entry */
221 #define _PAGE_CO 0x100 /* HW Change-bit override */
222 #define _PAGE_RO 0x200 /* HW read-only bit */
223 #define _PAGE_INVALID 0x400 /* HW invalid bit */
225 /* Software bits in the page table entry */
226 #define _PAGE_SWT 0x001 /* SW pte type bit t */
227 #define _PAGE_SWX 0x002 /* SW pte type bit x */
228 #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
229 #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
230 #define _PAGE_SPECIAL 0x010 /* SW associated with special page */
231 #define __HAVE_ARCH_PTE_SPECIAL
233 /* Set of bits not changed in pte_modify */
234 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
236 /* Six different types of pages. */
237 #define _PAGE_TYPE_EMPTY 0x400
238 #define _PAGE_TYPE_NONE 0x401
239 #define _PAGE_TYPE_SWAP 0x403
240 #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
241 #define _PAGE_TYPE_RO 0x200
242 #define _PAGE_TYPE_RW 0x000
245 * Only four types for huge pages, using the invalid bit and protection bit
246 * of a segment table entry.
248 #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
249 #define _HPAGE_TYPE_NONE 0x220
250 #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
251 #define _HPAGE_TYPE_RW 0x000
254 * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
255 * pte_none and pte_file to find out the pte type WITHOUT holding the page
256 * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
257 * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
258 * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
259 * This change is done while holding the lock, but the intermediate step
260 * of a previously valid pte with the hw invalid bit set can be observed by
261 * handle_pte_fault. That makes it necessary that all valid pte types with
262 * the hw invalid bit set must be distinguishable from the four pte types
263 * empty, none, swap and file.
265 * irxt ipte irxt
266 * _PAGE_TYPE_EMPTY 1000 -> 1000
267 * _PAGE_TYPE_NONE 1001 -> 1001
268 * _PAGE_TYPE_SWAP 1011 -> 1011
269 * _PAGE_TYPE_FILE 11?1 -> 11?1
270 * _PAGE_TYPE_RO 0100 -> 1100
271 * _PAGE_TYPE_RW 0000 -> 1000
273 * pte_none is true for bits combinations 1000, 1010, 1100, 1110
274 * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
275 * pte_file is true for bits combinations 1101, 1111
276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
279 #ifndef __s390x__
281 /* Bits in the segment table address-space-control-element */
282 #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
283 #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
284 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
285 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
286 #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
288 /* Bits in the segment table entry */
289 #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
290 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
291 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
292 #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
293 #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
295 #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
296 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
298 /* Page status table bits for virtualization */
299 #define RCP_ACC_BITS 0xf0000000UL
300 #define RCP_FP_BIT 0x08000000UL
301 #define RCP_PCL_BIT 0x00800000UL
302 #define RCP_HR_BIT 0x00400000UL
303 #define RCP_HC_BIT 0x00200000UL
304 #define RCP_GR_BIT 0x00040000UL
305 #define RCP_GC_BIT 0x00020000UL
307 /* User dirty / referenced bit for KVM's migration feature */
308 #define KVM_UR_BIT 0x00008000UL
309 #define KVM_UC_BIT 0x00004000UL
311 #else /* __s390x__ */
313 /* Bits in the segment/region table address-space-control-element */
314 #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
315 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
316 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
317 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
318 #define _ASCE_REAL_SPACE 0x20 /* real space control */
319 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
320 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
321 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
322 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
323 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
324 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
326 /* Bits in the region table entry */
327 #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
328 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
329 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
330 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
331 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
332 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
333 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
335 #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
336 #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
337 #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
338 #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
339 #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
340 #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
342 /* Bits in the segment table entry */
343 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
344 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
345 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
347 #define _SEGMENT_ENTRY (0)
348 #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
350 #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
351 #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
353 /* Page status table bits for virtualization */
354 #define RCP_ACC_BITS 0xf000000000000000UL
355 #define RCP_FP_BIT 0x0800000000000000UL
356 #define RCP_PCL_BIT 0x0080000000000000UL
357 #define RCP_HR_BIT 0x0040000000000000UL
358 #define RCP_HC_BIT 0x0020000000000000UL
359 #define RCP_GR_BIT 0x0004000000000000UL
360 #define RCP_GC_BIT 0x0002000000000000UL
362 /* User dirty / referenced bit for KVM's migration feature */
363 #define KVM_UR_BIT 0x0000800000000000UL
364 #define KVM_UC_BIT 0x0000400000000000UL
366 #endif /* __s390x__ */
369 * A user page table pointer has the space-switch-event bit, the
370 * private-space-control bit and the storage-alteration-event-control
371 * bit set. A kernel page table pointer doesn't need them.
373 #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
374 _ASCE_ALT_EVENT)
377 * Page protection definitions.
379 #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
380 #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
381 #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
383 #define PAGE_KERNEL PAGE_RW
384 #define PAGE_COPY PAGE_RO
387 * On s390 the page table entry has an invalid bit and a read-only bit.
388 * Read permission implies execute permission and write permission
389 * implies read permission.
391 /*xwr*/
392 #define __P000 PAGE_NONE
393 #define __P001 PAGE_RO
394 #define __P010 PAGE_RO
395 #define __P011 PAGE_RO
396 #define __P100 PAGE_RO
397 #define __P101 PAGE_RO
398 #define __P110 PAGE_RO
399 #define __P111 PAGE_RO
401 #define __S000 PAGE_NONE
402 #define __S001 PAGE_RO
403 #define __S010 PAGE_RW
404 #define __S011 PAGE_RW
405 #define __S100 PAGE_RO
406 #define __S101 PAGE_RO
407 #define __S110 PAGE_RW
408 #define __S111 PAGE_RW
410 static inline int mm_exclusive(struct mm_struct *mm)
412 return likely(mm == current->active_mm &&
413 atomic_read(&mm->context.attach_count) <= 1);
416 static inline int mm_has_pgste(struct mm_struct *mm)
418 #ifdef CONFIG_PGSTE
419 if (unlikely(mm->context.has_pgste))
420 return 1;
421 #endif
422 return 0;
425 * pgd/pmd/pte query functions
427 #ifndef __s390x__
429 static inline int pgd_present(pgd_t pgd) { return 1; }
430 static inline int pgd_none(pgd_t pgd) { return 0; }
431 static inline int pgd_bad(pgd_t pgd) { return 0; }
433 static inline int pud_present(pud_t pud) { return 1; }
434 static inline int pud_none(pud_t pud) { return 0; }
435 static inline int pud_bad(pud_t pud) { return 0; }
437 #else /* __s390x__ */
439 static inline int pgd_present(pgd_t pgd)
441 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
442 return 1;
443 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
446 static inline int pgd_none(pgd_t pgd)
448 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
449 return 0;
450 return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
453 static inline int pgd_bad(pgd_t pgd)
456 * With dynamic page table levels the pgd can be a region table
457 * entry or a segment table entry. Check for the bit that are
458 * invalid for either table entry.
460 unsigned long mask =
461 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
462 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
463 return (pgd_val(pgd) & mask) != 0;
466 static inline int pud_present(pud_t pud)
468 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
469 return 1;
470 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
473 static inline int pud_none(pud_t pud)
475 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
476 return 0;
477 return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
480 static inline int pud_bad(pud_t pud)
483 * With dynamic page table levels the pud can be a region table
484 * entry or a segment table entry. Check for the bit that are
485 * invalid for either table entry.
487 unsigned long mask =
488 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
489 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
490 return (pud_val(pud) & mask) != 0;
493 #endif /* __s390x__ */
495 static inline int pmd_present(pmd_t pmd)
497 return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
500 static inline int pmd_none(pmd_t pmd)
502 return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
505 static inline int pmd_bad(pmd_t pmd)
507 unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
508 return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
511 static inline int pte_none(pte_t pte)
513 return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
516 static inline int pte_present(pte_t pte)
518 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
519 return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
520 (!(pte_val(pte) & _PAGE_INVALID) &&
521 !(pte_val(pte) & _PAGE_SWT));
524 static inline int pte_file(pte_t pte)
526 unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
527 return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
530 static inline int pte_special(pte_t pte)
532 return (pte_val(pte) & _PAGE_SPECIAL);
535 #define __HAVE_ARCH_PTE_SAME
536 static inline int pte_same(pte_t a, pte_t b)
538 return pte_val(a) == pte_val(b);
541 static inline pgste_t pgste_get_lock(pte_t *ptep)
543 unsigned long new = 0;
544 #ifdef CONFIG_PGSTE
545 unsigned long old;
547 preempt_disable();
548 asm(
549 " lg %0,%2\n"
550 "0: lgr %1,%0\n"
551 " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
552 " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
553 " csg %0,%1,%2\n"
554 " jl 0b\n"
555 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
556 : "Q" (ptep[PTRS_PER_PTE]) : "cc");
557 #endif
558 return __pgste(new);
561 static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
563 #ifdef CONFIG_PGSTE
564 asm(
565 " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
566 " stg %1,%0\n"
567 : "=Q" (ptep[PTRS_PER_PTE])
568 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
569 preempt_enable();
570 #endif
573 static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
575 #ifdef CONFIG_PGSTE
576 unsigned long address, bits;
577 unsigned char skey;
579 if (!pte_present(*ptep))
580 return pgste;
581 address = pte_val(*ptep) & PAGE_MASK;
582 skey = page_get_storage_key(address);
583 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
584 /* Clear page changed & referenced bit in the storage key */
585 if (bits & _PAGE_CHANGED)
586 page_set_storage_key(address, skey ^ bits, 1);
587 else if (bits)
588 page_reset_referenced(address);
589 /* Transfer page changed & referenced bit to guest bits in pgste */
590 pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
591 /* Get host changed & referenced bits from pgste */
592 bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
593 /* Clear host bits in pgste. */
594 pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
595 pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
596 /* Copy page access key and fetch protection bit to pgste */
597 pgste_val(pgste) |=
598 (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
599 /* Transfer changed and referenced to kvm user bits */
600 pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
601 /* Transfer changed & referenced to pte sofware bits */
602 pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
603 #endif
604 return pgste;
608 static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
610 #ifdef CONFIG_PGSTE
611 int young;
613 if (!pte_present(*ptep))
614 return pgste;
615 young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
616 /* Transfer page referenced bit to pte software bit (host view) */
617 if (young || (pgste_val(pgste) & RCP_HR_BIT))
618 pte_val(*ptep) |= _PAGE_SWR;
619 /* Clear host referenced bit in pgste. */
620 pgste_val(pgste) &= ~RCP_HR_BIT;
621 /* Transfer page referenced bit to guest bit in pgste */
622 pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
623 #endif
624 return pgste;
628 static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
630 #ifdef CONFIG_PGSTE
631 unsigned long address;
632 unsigned long okey, nkey;
634 if (!pte_present(entry))
635 return;
636 address = pte_val(entry) & PAGE_MASK;
637 okey = nkey = page_get_storage_key(address);
638 nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
639 /* Set page access key and fetch protection bit from pgste */
640 nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
641 if (okey != nkey)
642 page_set_storage_key(address, nkey, 1);
643 #endif
647 * struct gmap_struct - guest address space
648 * @mm: pointer to the parent mm_struct
649 * @table: pointer to the page directory
650 * @asce: address space control element for gmap page table
651 * @crst_list: list of all crst tables used in the guest address space
653 struct gmap {
654 struct list_head list;
655 struct mm_struct *mm;
656 unsigned long *table;
657 unsigned long asce;
658 struct list_head crst_list;
662 * struct gmap_rmap - reverse mapping for segment table entries
663 * @next: pointer to the next gmap_rmap structure in the list
664 * @entry: pointer to a segment table entry
666 struct gmap_rmap {
667 struct list_head list;
668 unsigned long *entry;
672 * struct gmap_pgtable - gmap information attached to a page table
673 * @vmaddr: address of the 1MB segment in the process virtual memory
674 * @mapper: list of segment table entries maping a page table
676 struct gmap_pgtable {
677 unsigned long vmaddr;
678 struct list_head mapper;
681 struct gmap *gmap_alloc(struct mm_struct *mm);
682 void gmap_free(struct gmap *gmap);
683 void gmap_enable(struct gmap *gmap);
684 void gmap_disable(struct gmap *gmap);
685 int gmap_map_segment(struct gmap *gmap, unsigned long from,
686 unsigned long to, unsigned long length);
687 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
688 unsigned long __gmap_fault(unsigned long address, struct gmap *);
689 unsigned long gmap_fault(unsigned long address, struct gmap *);
690 void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
693 * Certain architectures need to do special things when PTEs
694 * within a page table are directly modified. Thus, the following
695 * hook is made available.
697 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
698 pte_t *ptep, pte_t entry)
700 pgste_t pgste;
702 if (mm_has_pgste(mm)) {
703 pgste = pgste_get_lock(ptep);
704 pgste_set_pte(ptep, pgste, entry);
705 *ptep = entry;
706 pgste_set_unlock(ptep, pgste);
707 } else
708 *ptep = entry;
712 * query functions pte_write/pte_dirty/pte_young only work if
713 * pte_present() is true. Undefined behaviour if not..
715 static inline int pte_write(pte_t pte)
717 return (pte_val(pte) & _PAGE_RO) == 0;
720 static inline int pte_dirty(pte_t pte)
722 #ifdef CONFIG_PGSTE
723 if (pte_val(pte) & _PAGE_SWC)
724 return 1;
725 #endif
726 return 0;
729 static inline int pte_young(pte_t pte)
731 #ifdef CONFIG_PGSTE
732 if (pte_val(pte) & _PAGE_SWR)
733 return 1;
734 #endif
735 return 0;
739 * pgd/pmd/pte modification functions
742 static inline void pgd_clear(pgd_t *pgd)
744 #ifdef __s390x__
745 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
746 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
747 #endif
750 static inline void pud_clear(pud_t *pud)
752 #ifdef __s390x__
753 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
754 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
755 #endif
758 static inline void pmd_clear(pmd_t *pmdp)
760 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
763 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
765 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
769 * The following pte modification functions only work if
770 * pte_present() is true. Undefined behaviour if not..
772 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
774 pte_val(pte) &= _PAGE_CHG_MASK;
775 pte_val(pte) |= pgprot_val(newprot);
776 return pte;
779 static inline pte_t pte_wrprotect(pte_t pte)
781 /* Do not clobber _PAGE_TYPE_NONE pages! */
782 if (!(pte_val(pte) & _PAGE_INVALID))
783 pte_val(pte) |= _PAGE_RO;
784 return pte;
787 static inline pte_t pte_mkwrite(pte_t pte)
789 pte_val(pte) &= ~_PAGE_RO;
790 return pte;
793 static inline pte_t pte_mkclean(pte_t pte)
795 #ifdef CONFIG_PGSTE
796 pte_val(pte) &= ~_PAGE_SWC;
797 #endif
798 return pte;
801 static inline pte_t pte_mkdirty(pte_t pte)
803 return pte;
806 static inline pte_t pte_mkold(pte_t pte)
808 #ifdef CONFIG_PGSTE
809 pte_val(pte) &= ~_PAGE_SWR;
810 #endif
811 return pte;
814 static inline pte_t pte_mkyoung(pte_t pte)
816 return pte;
819 static inline pte_t pte_mkspecial(pte_t pte)
821 pte_val(pte) |= _PAGE_SPECIAL;
822 return pte;
825 #ifdef CONFIG_HUGETLB_PAGE
826 static inline pte_t pte_mkhuge(pte_t pte)
829 * PROT_NONE needs to be remapped from the pte type to the ste type.
830 * The HW invalid bit is also different for pte and ste. The pte
831 * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
832 * bit, so we don't have to clear it.
834 if (pte_val(pte) & _PAGE_INVALID) {
835 if (pte_val(pte) & _PAGE_SWT)
836 pte_val(pte) |= _HPAGE_TYPE_NONE;
837 pte_val(pte) |= _SEGMENT_ENTRY_INV;
840 * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
841 * table entry.
843 pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
845 * Also set the change-override bit because we don't need dirty bit
846 * tracking for hugetlbfs pages.
848 pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
849 return pte;
851 #endif
854 * Get (and clear) the user dirty bit for a pte.
856 static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
857 pte_t *ptep)
859 pgste_t pgste;
860 int dirty = 0;
862 if (mm_has_pgste(mm)) {
863 pgste = pgste_get_lock(ptep);
864 pgste = pgste_update_all(ptep, pgste);
865 dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
866 pgste_val(pgste) &= ~KVM_UC_BIT;
867 pgste_set_unlock(ptep, pgste);
868 return dirty;
870 return dirty;
874 * Get (and clear) the user referenced bit for a pte.
876 static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
877 pte_t *ptep)
879 pgste_t pgste;
880 int young = 0;
882 if (mm_has_pgste(mm)) {
883 pgste = pgste_get_lock(ptep);
884 pgste = pgste_update_young(ptep, pgste);
885 young = !!(pgste_val(pgste) & KVM_UR_BIT);
886 pgste_val(pgste) &= ~KVM_UR_BIT;
887 pgste_set_unlock(ptep, pgste);
889 return young;
892 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
893 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
894 unsigned long addr, pte_t *ptep)
896 pgste_t pgste;
897 pte_t pte;
899 if (mm_has_pgste(vma->vm_mm)) {
900 pgste = pgste_get_lock(ptep);
901 pgste = pgste_update_young(ptep, pgste);
902 pte = *ptep;
903 *ptep = pte_mkold(pte);
904 pgste_set_unlock(ptep, pgste);
905 return pte_young(pte);
907 return 0;
910 #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
911 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
912 unsigned long address, pte_t *ptep)
914 /* No need to flush TLB
915 * On s390 reference bits are in storage key and never in TLB
916 * With virtualization we handle the reference bit, without we
917 * we can simply return */
918 return ptep_test_and_clear_young(vma, address, ptep);
921 static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
923 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
924 #ifndef __s390x__
925 /* pto must point to the start of the segment table */
926 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
927 #else
928 /* ipte in zarch mode can do the math */
929 pte_t *pto = ptep;
930 #endif
931 asm volatile(
932 " ipte %2,%3"
933 : "=m" (*ptep) : "m" (*ptep),
934 "a" (pto), "a" (address));
939 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
940 * both clear the TLB for the unmapped pte. The reason is that
941 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
942 * to modify an active pte. The sequence is
943 * 1) ptep_get_and_clear
944 * 2) set_pte_at
945 * 3) flush_tlb_range
946 * On s390 the tlb needs to get flushed with the modification of the pte
947 * if the pte is active. The only way how this can be implemented is to
948 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
949 * is a nop.
951 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
952 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
953 unsigned long address, pte_t *ptep)
955 pgste_t pgste;
956 pte_t pte;
958 mm->context.flush_mm = 1;
959 if (mm_has_pgste(mm))
960 pgste = pgste_get_lock(ptep);
962 pte = *ptep;
963 if (!mm_exclusive(mm))
964 __ptep_ipte(address, ptep);
965 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
967 if (mm_has_pgste(mm)) {
968 pgste = pgste_update_all(&pte, pgste);
969 pgste_set_unlock(ptep, pgste);
971 return pte;
974 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
975 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
976 unsigned long address,
977 pte_t *ptep)
979 pte_t pte;
981 mm->context.flush_mm = 1;
982 if (mm_has_pgste(mm))
983 pgste_get_lock(ptep);
985 pte = *ptep;
986 if (!mm_exclusive(mm))
987 __ptep_ipte(address, ptep);
988 return pte;
991 static inline void ptep_modify_prot_commit(struct mm_struct *mm,
992 unsigned long address,
993 pte_t *ptep, pte_t pte)
995 *ptep = pte;
996 if (mm_has_pgste(mm))
997 pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
1000 #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1001 static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1002 unsigned long address, pte_t *ptep)
1004 pgste_t pgste;
1005 pte_t pte;
1007 if (mm_has_pgste(vma->vm_mm))
1008 pgste = pgste_get_lock(ptep);
1010 pte = *ptep;
1011 __ptep_ipte(address, ptep);
1012 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1014 if (mm_has_pgste(vma->vm_mm)) {
1015 pgste = pgste_update_all(&pte, pgste);
1016 pgste_set_unlock(ptep, pgste);
1018 return pte;
1022 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1023 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1024 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1025 * cannot be accessed while the batched unmap is running. In this case
1026 * full==1 and a simple pte_clear is enough. See tlb.h.
1028 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1029 static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1030 unsigned long address,
1031 pte_t *ptep, int full)
1033 pgste_t pgste;
1034 pte_t pte;
1036 if (mm_has_pgste(mm))
1037 pgste = pgste_get_lock(ptep);
1039 pte = *ptep;
1040 if (!full)
1041 __ptep_ipte(address, ptep);
1042 pte_val(*ptep) = _PAGE_TYPE_EMPTY;
1044 if (mm_has_pgste(mm)) {
1045 pgste = pgste_update_all(&pte, pgste);
1046 pgste_set_unlock(ptep, pgste);
1048 return pte;
1051 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
1052 static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1053 unsigned long address, pte_t *ptep)
1055 pgste_t pgste;
1056 pte_t pte = *ptep;
1058 if (pte_write(pte)) {
1059 mm->context.flush_mm = 1;
1060 if (mm_has_pgste(mm))
1061 pgste = pgste_get_lock(ptep);
1063 if (!mm_exclusive(mm))
1064 __ptep_ipte(address, ptep);
1065 *ptep = pte_wrprotect(pte);
1067 if (mm_has_pgste(mm))
1068 pgste_set_unlock(ptep, pgste);
1070 return pte;
1073 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1074 static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1075 unsigned long address, pte_t *ptep,
1076 pte_t entry, int dirty)
1078 pgste_t pgste;
1080 if (pte_same(*ptep, entry))
1081 return 0;
1082 if (mm_has_pgste(vma->vm_mm))
1083 pgste = pgste_get_lock(ptep);
1085 __ptep_ipte(address, ptep);
1086 *ptep = entry;
1088 if (mm_has_pgste(vma->vm_mm))
1089 pgste_set_unlock(ptep, pgste);
1090 return 1;
1094 * Conversion functions: convert a page and protection to a page entry,
1095 * and a page entry and page directory to the page they refer to.
1097 static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1099 pte_t __pte;
1100 pte_val(__pte) = physpage + pgprot_val(pgprot);
1101 return __pte;
1104 static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1106 unsigned long physpage = page_to_phys(page);
1108 return mk_pte_phys(physpage, pgprot);
1111 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1112 #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1113 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1114 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1116 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1117 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
1119 #ifndef __s390x__
1121 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1122 #define pud_deref(pmd) ({ BUG(); 0UL; })
1123 #define pgd_deref(pmd) ({ BUG(); 0UL; })
1125 #define pud_offset(pgd, address) ((pud_t *) pgd)
1126 #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1128 #else /* __s390x__ */
1130 #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1131 #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1132 #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1134 static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1136 pud_t *pud = (pud_t *) pgd;
1137 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1138 pud = (pud_t *) pgd_deref(*pgd);
1139 return pud + pud_index(address);
1142 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1144 pmd_t *pmd = (pmd_t *) pud;
1145 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1146 pmd = (pmd_t *) pud_deref(*pud);
1147 return pmd + pmd_index(address);
1150 #endif /* __s390x__ */
1152 #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1153 #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1154 #define pte_page(x) pfn_to_page(pte_pfn(x))
1156 #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1158 /* Find an entry in the lowest level page table.. */
1159 #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1160 #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1161 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1162 #define pte_unmap(pte) do { } while (0)
1165 * 31 bit swap entry format:
1166 * A page-table entry has some bits we have to treat in a special way.
1167 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1168 * exception will occur instead of a page translation exception. The
1169 * specifiation exception has the bad habit not to store necessary
1170 * information in the lowcore.
1171 * Bit 21 and bit 22 are the page invalid bit and the page protection
1172 * bit. We set both to indicate a swapped page.
1173 * Bit 30 and 31 are used to distinguish the different page types. For
1174 * a swapped page these bits need to be zero.
1175 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1176 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1177 * plus 24 for the offset.
1178 * 0| offset |0110|o|type |00|
1179 * 0 0000000001111111111 2222 2 22222 33
1180 * 0 1234567890123456789 0123 4 56789 01
1182 * 64 bit swap entry format:
1183 * A page-table entry has some bits we have to treat in a special way.
1184 * Bits 52 and bit 55 have to be zero, otherwise an specification
1185 * exception will occur instead of a page translation exception. The
1186 * specifiation exception has the bad habit not to store necessary
1187 * information in the lowcore.
1188 * Bit 53 and bit 54 are the page invalid bit and the page protection
1189 * bit. We set both to indicate a swapped page.
1190 * Bit 62 and 63 are used to distinguish the different page types. For
1191 * a swapped page these bits need to be zero.
1192 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1193 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1194 * plus 56 for the offset.
1195 * | offset |0110|o|type |00|
1196 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1197 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1199 #ifndef __s390x__
1200 #define __SWP_OFFSET_MASK (~0UL >> 12)
1201 #else
1202 #define __SWP_OFFSET_MASK (~0UL >> 11)
1203 #endif
1204 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1206 pte_t pte;
1207 offset &= __SWP_OFFSET_MASK;
1208 pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
1209 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1210 return pte;
1213 #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1214 #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1215 #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1217 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1218 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1220 #ifndef __s390x__
1221 # define PTE_FILE_MAX_BITS 26
1222 #else /* __s390x__ */
1223 # define PTE_FILE_MAX_BITS 59
1224 #endif /* __s390x__ */
1226 #define pte_to_pgoff(__pte) \
1227 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1229 #define pgoff_to_pte(__off) \
1230 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1231 | _PAGE_TYPE_FILE })
1233 #endif /* !__ASSEMBLY__ */
1235 #define kern_addr_valid(addr) (1)
1237 extern int vmem_add_mapping(unsigned long start, unsigned long size);
1238 extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1239 extern int s390_enable_sie(void);
1242 * No page table caches to initialise
1244 #define pgtable_cache_init() do { } while (0)
1246 #include <asm-generic/pgtable.h>
1248 #endif /* _S390_PAGE_H */