1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 #include <linux/hugetlb.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgtable.h>
11 #include <asm/pgalloc.h>
13 #include <asm/set_memory.h>
15 static inline unsigned long sske_frame(unsigned long addr
, unsigned char skey
)
17 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
18 : [addr
] "+a" (addr
) : [skey
] "d" (skey
));
22 void __storage_key_init_range(unsigned long start
, unsigned long end
)
24 unsigned long boundary
, size
;
27 if (MACHINE_HAS_EDAT1
) {
28 /* set storage keys for a 1MB frame */
30 boundary
= (start
+ size
) & ~(size
- 1);
31 if (boundary
<= end
) {
33 start
= sske_frame(start
, PAGE_DEFAULT_KEY
);
34 } while (start
< boundary
);
38 page_set_storage_key(start
, PAGE_DEFAULT_KEY
, 1);
44 atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
46 void arch_report_meminfo(struct seq_file
*m
)
48 seq_printf(m
, "DirectMap4k: %8lu kB\n",
49 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_4K
]) << 2);
50 seq_printf(m
, "DirectMap1M: %8lu kB\n",
51 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_1M
]) << 10);
52 seq_printf(m
, "DirectMap2G: %8lu kB\n",
53 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_2G
]) << 21);
55 #endif /* CONFIG_PROC_FS */
57 static void pgt_set(unsigned long *old
, unsigned long new, unsigned long addr
,
60 unsigned long table
, mask
;
63 if (MACHINE_HAS_EDAT2
) {
65 case CRDTE_DTT_REGION3
:
66 mask
= ~(PTRS_PER_PUD
* sizeof(pud_t
) - 1);
68 case CRDTE_DTT_SEGMENT
:
69 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
72 mask
= ~(PTRS_PER_PTE
* sizeof(pte_t
) - 1);
75 table
= (unsigned long)old
& mask
;
76 crdte(*old
, new, table
, dtt
, addr
, S390_lowcore
.kernel_asce
);
77 } else if (MACHINE_HAS_IDTE
) {
80 csp((unsigned int *)old
+ 1, *old
, new);
84 static int walk_pte_level(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
89 ptep
= pte_offset(pmdp
, addr
);
94 if (flags
& SET_MEMORY_RO
)
95 new = pte_wrprotect(new);
96 else if (flags
& SET_MEMORY_RW
)
97 new = pte_mkwrite(pte_mkdirty(new));
98 if (flags
& SET_MEMORY_NX
)
99 pte_val(new) |= _PAGE_NOEXEC
;
100 else if (flags
& SET_MEMORY_X
)
101 pte_val(new) &= ~_PAGE_NOEXEC
;
102 pgt_set((unsigned long *)ptep
, pte_val(new), addr
, CRDTE_DTT_PAGE
);
106 } while (addr
< end
);
110 static int split_pmd_page(pmd_t
*pmdp
, unsigned long addr
)
112 unsigned long pte_addr
, prot
;
113 pte_t
*pt_dir
, *ptep
;
117 pt_dir
= vmem_pte_alloc();
120 pte_addr
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
121 ro
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_PROTECT
);
122 nx
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_NOEXEC
);
123 prot
= pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
125 prot
&= ~_PAGE_NOEXEC
;
127 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
128 pte_val(*ptep
) = pte_addr
| prot
;
129 pte_addr
+= PAGE_SIZE
;
132 pmd_val(new) = __pa(pt_dir
) | _SEGMENT_ENTRY
;
133 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
134 update_page_count(PG_DIRECT_MAP_4K
, PTRS_PER_PTE
);
135 update_page_count(PG_DIRECT_MAP_1M
, -1);
139 static void modify_pmd_page(pmd_t
*pmdp
, unsigned long addr
,
144 if (flags
& SET_MEMORY_RO
)
145 new = pmd_wrprotect(new);
146 else if (flags
& SET_MEMORY_RW
)
147 new = pmd_mkwrite(pmd_mkdirty(new));
148 if (flags
& SET_MEMORY_NX
)
149 pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC
;
150 else if (flags
& SET_MEMORY_X
)
151 pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC
;
152 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
155 static int walk_pmd_level(pud_t
*pudp
, unsigned long addr
, unsigned long end
,
162 pmdp
= pmd_offset(pudp
, addr
);
166 next
= pmd_addr_end(addr
, end
);
167 if (pmd_large(*pmdp
)) {
168 if (addr
& ~PMD_MASK
|| addr
+ PMD_SIZE
> next
) {
169 rc
= split_pmd_page(pmdp
, addr
);
174 modify_pmd_page(pmdp
, addr
, flags
);
176 rc
= walk_pte_level(pmdp
, addr
, next
, flags
);
183 } while (addr
< end
);
187 static int split_pud_page(pud_t
*pudp
, unsigned long addr
)
189 unsigned long pmd_addr
, prot
;
190 pmd_t
*pm_dir
, *pmdp
;
194 pm_dir
= vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
197 pmd_addr
= pud_pfn(*pudp
) << PAGE_SHIFT
;
198 ro
= !!(pud_val(*pudp
) & _REGION_ENTRY_PROTECT
);
199 nx
= !!(pud_val(*pudp
) & _REGION_ENTRY_NOEXEC
);
200 prot
= pgprot_val(ro
? SEGMENT_KERNEL_RO
: SEGMENT_KERNEL
);
202 prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
204 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
205 pmd_val(*pmdp
) = pmd_addr
| prot
;
206 pmd_addr
+= PMD_SIZE
;
209 pud_val(new) = __pa(pm_dir
) | _REGION3_ENTRY
;
210 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
211 update_page_count(PG_DIRECT_MAP_1M
, PTRS_PER_PMD
);
212 update_page_count(PG_DIRECT_MAP_2G
, -1);
216 static void modify_pud_page(pud_t
*pudp
, unsigned long addr
,
221 if (flags
& SET_MEMORY_RO
)
222 new = pud_wrprotect(new);
223 else if (flags
& SET_MEMORY_RW
)
224 new = pud_mkwrite(pud_mkdirty(new));
225 if (flags
& SET_MEMORY_NX
)
226 pud_val(new) |= _REGION_ENTRY_NOEXEC
;
227 else if (flags
& SET_MEMORY_X
)
228 pud_val(new) &= ~_REGION_ENTRY_NOEXEC
;
229 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
232 static int walk_pud_level(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
239 pudp
= pud_offset(p4d
, addr
);
243 next
= pud_addr_end(addr
, end
);
244 if (pud_large(*pudp
)) {
245 if (addr
& ~PUD_MASK
|| addr
+ PUD_SIZE
> next
) {
246 rc
= split_pud_page(pudp
, addr
);
251 modify_pud_page(pudp
, addr
, flags
);
253 rc
= walk_pmd_level(pudp
, addr
, next
, flags
);
258 } while (addr
< end
&& !rc
);
262 static int walk_p4d_level(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
269 p4dp
= p4d_offset(pgd
, addr
);
273 next
= p4d_addr_end(addr
, end
);
274 rc
= walk_pud_level(p4dp
, addr
, next
, flags
);
278 } while (addr
< end
&& !rc
);
282 static DEFINE_MUTEX(cpa_mutex
);
284 static int change_page_attr(unsigned long addr
, unsigned long end
,
293 if (end
>= MODULES_END
)
295 mutex_lock(&cpa_mutex
);
296 pgdp
= pgd_offset_k(addr
);
300 next
= pgd_addr_end(addr
, end
);
301 rc
= walk_p4d_level(pgdp
, addr
, next
, flags
);
305 } while (pgdp
++, addr
= next
, addr
< end
&& !rc
);
306 mutex_unlock(&cpa_mutex
);
310 int __set_memory(unsigned long addr
, int numpages
, unsigned long flags
)
313 flags
&= ~(SET_MEMORY_NX
| SET_MEMORY_X
);
317 return change_page_attr(addr
, addr
+ numpages
* PAGE_SIZE
, flags
);
320 #ifdef CONFIG_DEBUG_PAGEALLOC
322 static void ipte_range(pte_t
*pte
, unsigned long address
, int nr
)
326 if (test_facility(13)) {
327 __ptep_ipte_range(address
, nr
- 1, pte
, IPTE_GLOBAL
);
330 for (i
= 0; i
< nr
; i
++) {
331 __ptep_ipte(address
, pte
, 0, 0, IPTE_GLOBAL
);
332 address
+= PAGE_SIZE
;
337 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
339 unsigned long address
;
347 for (i
= 0; i
< numpages
;) {
348 address
= page_to_phys(page
+ i
);
349 pgd
= pgd_offset_k(address
);
350 p4d
= p4d_offset(pgd
, address
);
351 pud
= pud_offset(p4d
, address
);
352 pmd
= pmd_offset(pud
, address
);
353 pte
= pte_offset_kernel(pmd
, address
);
354 nr
= (unsigned long)pte
>> ilog2(sizeof(long));
355 nr
= PTRS_PER_PTE
- (nr
& (PTRS_PER_PTE
- 1));
356 nr
= min(numpages
- i
, nr
);
358 for (j
= 0; j
< nr
; j
++) {
359 pte_val(*pte
) &= ~_PAGE_INVALID
;
360 address
+= PAGE_SIZE
;
364 ipte_range(pte
, address
, nr
);
370 #ifdef CONFIG_HIBERNATION
371 bool kernel_page_present(struct page
*page
)
376 addr
= page_to_phys(page
);
381 : "=d" (cc
), "+a" (addr
) : : "cc");
384 #endif /* CONFIG_HIBERNATION */
386 #endif /* CONFIG_DEBUG_PAGEALLOC */