spi-topcliff-pch: add recovery processing in case wait-event timeout
[zen-stable.git] / arch / s390 / include / asm / hugetlb.h
blob799ed0f1643d135c843f3a1cdc41204063289f0f
1 /*
2 * IBM System z Huge TLB Page Support for Kernel.
4 * Copyright IBM Corp. 2008
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
8 #ifndef _ASM_S390_HUGETLB_H
9 #define _ASM_S390_HUGETLB_H
11 #include <asm/page.h>
12 #include <asm/pgtable.h>
15 #define is_hugepage_only_range(mm, addr, len) 0
16 #define hugetlb_free_pgd_range free_pgd_range
18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
25 static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
28 if (len & ~HPAGE_MASK)
29 return -EINVAL;
30 if (addr & ~HPAGE_MASK)
31 return -EINVAL;
32 return 0;
35 #define hugetlb_prefault_arch_hook(mm) do { } while (0)
37 int arch_prepare_hugepage(struct page *page);
38 void arch_release_hugepage(struct page *page);
40 static inline pte_t huge_pte_wrprotect(pte_t pte)
42 pte_val(pte) |= _PAGE_RO;
43 return pte;
46 static inline int huge_pte_none(pte_t pte)
48 return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
49 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
52 static inline pte_t huge_ptep_get(pte_t *ptep)
54 pte_t pte = *ptep;
55 unsigned long mask;
57 if (!MACHINE_HAS_HPAGE) {
58 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
59 if (ptep) {
60 mask = pte_val(pte) &
61 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
62 pte = pte_mkhuge(*ptep);
63 pte_val(pte) |= mask;
66 return pte;
69 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
70 unsigned long addr, pte_t *ptep)
72 pte_t pte = huge_ptep_get(ptep);
74 mm->context.flush_mm = 1;
75 pmd_clear((pmd_t *) ptep);
76 return pte;
79 static inline void __pmd_csp(pmd_t *pmdp)
81 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
82 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
83 _SEGMENT_ENTRY_INV;
84 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
86 asm volatile(
87 " csp %1,%3"
88 : "=m" (*pmdp)
89 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
90 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
93 static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
95 unsigned long sto = (unsigned long) pmdp -
96 pmd_index(address) * sizeof(pmd_t);
98 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
99 asm volatile(
100 " .insn rrf,0xb98e0000,%2,%3,0,0"
101 : "=m" (*pmdp)
102 : "m" (*pmdp), "a" (sto),
103 "a" ((address & HPAGE_MASK))
106 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
109 static inline void huge_ptep_invalidate(struct mm_struct *mm,
110 unsigned long address, pte_t *ptep)
112 pmd_t *pmdp = (pmd_t *) ptep;
114 if (MACHINE_HAS_IDTE)
115 __pmd_idte(address, pmdp);
116 else
117 __pmd_csp(pmdp);
120 #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
121 ({ \
122 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
123 if (__changed) { \
124 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
125 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
127 __changed; \
130 #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
131 ({ \
132 pte_t __pte = huge_ptep_get(__ptep); \
133 if (pte_write(__pte)) { \
134 (__mm)->context.flush_mm = 1; \
135 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
136 (__mm) != current->active_mm) \
137 huge_ptep_invalidate(__mm, __addr, __ptep); \
138 set_huge_pte_at(__mm, __addr, __ptep, \
139 huge_pte_wrprotect(__pte)); \
143 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
144 unsigned long address, pte_t *ptep)
146 huge_ptep_invalidate(vma->vm_mm, address, ptep);
149 #endif /* _ASM_S390_HUGETLB_H */