1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 #include <linux/hugetlb.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgtable.h>
11 #include <asm/pgalloc.h>
13 #include <asm/set_memory.h>
15 static inline unsigned long sske_frame(unsigned long addr
, unsigned char skey
)
17 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
18 : [addr
] "+a" (addr
) : [skey
] "d" (skey
));
22 void __storage_key_init_range(unsigned long start
, unsigned long end
)
24 unsigned long boundary
, size
;
26 if (!PAGE_DEFAULT_KEY
)
29 if (MACHINE_HAS_EDAT1
) {
30 /* set storage keys for a 1MB frame */
32 boundary
= (start
+ size
) & ~(size
- 1);
33 if (boundary
<= end
) {
35 start
= sske_frame(start
, PAGE_DEFAULT_KEY
);
36 } while (start
< boundary
);
40 page_set_storage_key(start
, PAGE_DEFAULT_KEY
, 0);
46 atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
48 void arch_report_meminfo(struct seq_file
*m
)
50 seq_printf(m
, "DirectMap4k: %8lu kB\n",
51 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_4K
]) << 2);
52 seq_printf(m
, "DirectMap1M: %8lu kB\n",
53 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_1M
]) << 10);
54 seq_printf(m
, "DirectMap2G: %8lu kB\n",
55 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_2G
]) << 21);
57 #endif /* CONFIG_PROC_FS */
59 static void pgt_set(unsigned long *old
, unsigned long new, unsigned long addr
,
62 unsigned long table
, mask
;
65 if (MACHINE_HAS_EDAT2
) {
67 case CRDTE_DTT_REGION3
:
68 mask
= ~(PTRS_PER_PUD
* sizeof(pud_t
) - 1);
70 case CRDTE_DTT_SEGMENT
:
71 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
74 mask
= ~(PTRS_PER_PTE
* sizeof(pte_t
) - 1);
77 table
= (unsigned long)old
& mask
;
78 crdte(*old
, new, table
, dtt
, addr
, S390_lowcore
.kernel_asce
);
79 } else if (MACHINE_HAS_IDTE
) {
82 csp((unsigned int *)old
+ 1, *old
, new);
86 static int walk_pte_level(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
91 ptep
= pte_offset(pmdp
, addr
);
96 if (flags
& SET_MEMORY_RO
)
97 new = pte_wrprotect(new);
98 else if (flags
& SET_MEMORY_RW
)
99 new = pte_mkwrite(pte_mkdirty(new));
100 if (flags
& SET_MEMORY_NX
)
101 pte_val(new) |= _PAGE_NOEXEC
;
102 else if (flags
& SET_MEMORY_X
)
103 pte_val(new) &= ~_PAGE_NOEXEC
;
104 pgt_set((unsigned long *)ptep
, pte_val(new), addr
, CRDTE_DTT_PAGE
);
108 } while (addr
< end
);
112 static int split_pmd_page(pmd_t
*pmdp
, unsigned long addr
)
114 unsigned long pte_addr
, prot
;
115 pte_t
*pt_dir
, *ptep
;
119 pt_dir
= vmem_pte_alloc();
122 pte_addr
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
123 ro
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_PROTECT
);
124 nx
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_NOEXEC
);
125 prot
= pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
127 prot
&= ~_PAGE_NOEXEC
;
129 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
130 pte_val(*ptep
) = pte_addr
| prot
;
131 pte_addr
+= PAGE_SIZE
;
134 pmd_val(new) = __pa(pt_dir
) | _SEGMENT_ENTRY
;
135 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
136 update_page_count(PG_DIRECT_MAP_4K
, PTRS_PER_PTE
);
137 update_page_count(PG_DIRECT_MAP_1M
, -1);
141 static void modify_pmd_page(pmd_t
*pmdp
, unsigned long addr
,
146 if (flags
& SET_MEMORY_RO
)
147 new = pmd_wrprotect(new);
148 else if (flags
& SET_MEMORY_RW
)
149 new = pmd_mkwrite(pmd_mkdirty(new));
150 if (flags
& SET_MEMORY_NX
)
151 pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC
;
152 else if (flags
& SET_MEMORY_X
)
153 pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC
;
154 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
157 static int walk_pmd_level(pud_t
*pudp
, unsigned long addr
, unsigned long end
,
164 pmdp
= pmd_offset(pudp
, addr
);
168 next
= pmd_addr_end(addr
, end
);
169 if (pmd_large(*pmdp
)) {
170 if (addr
& ~PMD_MASK
|| addr
+ PMD_SIZE
> next
) {
171 rc
= split_pmd_page(pmdp
, addr
);
176 modify_pmd_page(pmdp
, addr
, flags
);
178 rc
= walk_pte_level(pmdp
, addr
, next
, flags
);
185 } while (addr
< end
);
189 static int split_pud_page(pud_t
*pudp
, unsigned long addr
)
191 unsigned long pmd_addr
, prot
;
192 pmd_t
*pm_dir
, *pmdp
;
196 pm_dir
= vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
199 pmd_addr
= pud_pfn(*pudp
) << PAGE_SHIFT
;
200 ro
= !!(pud_val(*pudp
) & _REGION_ENTRY_PROTECT
);
201 nx
= !!(pud_val(*pudp
) & _REGION_ENTRY_NOEXEC
);
202 prot
= pgprot_val(ro
? SEGMENT_KERNEL_RO
: SEGMENT_KERNEL
);
204 prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
206 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
207 pmd_val(*pmdp
) = pmd_addr
| prot
;
208 pmd_addr
+= PMD_SIZE
;
211 pud_val(new) = __pa(pm_dir
) | _REGION3_ENTRY
;
212 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
213 update_page_count(PG_DIRECT_MAP_1M
, PTRS_PER_PMD
);
214 update_page_count(PG_DIRECT_MAP_2G
, -1);
218 static void modify_pud_page(pud_t
*pudp
, unsigned long addr
,
223 if (flags
& SET_MEMORY_RO
)
224 new = pud_wrprotect(new);
225 else if (flags
& SET_MEMORY_RW
)
226 new = pud_mkwrite(pud_mkdirty(new));
227 if (flags
& SET_MEMORY_NX
)
228 pud_val(new) |= _REGION_ENTRY_NOEXEC
;
229 else if (flags
& SET_MEMORY_X
)
230 pud_val(new) &= ~_REGION_ENTRY_NOEXEC
;
231 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
234 static int walk_pud_level(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
241 pudp
= pud_offset(p4d
, addr
);
245 next
= pud_addr_end(addr
, end
);
246 if (pud_large(*pudp
)) {
247 if (addr
& ~PUD_MASK
|| addr
+ PUD_SIZE
> next
) {
248 rc
= split_pud_page(pudp
, addr
);
253 modify_pud_page(pudp
, addr
, flags
);
255 rc
= walk_pmd_level(pudp
, addr
, next
, flags
);
260 } while (addr
< end
&& !rc
);
264 static int walk_p4d_level(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
271 p4dp
= p4d_offset(pgd
, addr
);
275 next
= p4d_addr_end(addr
, end
);
276 rc
= walk_pud_level(p4dp
, addr
, next
, flags
);
280 } while (addr
< end
&& !rc
);
284 static DEFINE_MUTEX(cpa_mutex
);
286 static int change_page_attr(unsigned long addr
, unsigned long end
,
295 if (end
>= MODULES_END
)
297 mutex_lock(&cpa_mutex
);
298 pgdp
= pgd_offset_k(addr
);
302 next
= pgd_addr_end(addr
, end
);
303 rc
= walk_p4d_level(pgdp
, addr
, next
, flags
);
307 } while (pgdp
++, addr
= next
, addr
< end
&& !rc
);
308 mutex_unlock(&cpa_mutex
);
312 int __set_memory(unsigned long addr
, int numpages
, unsigned long flags
)
315 flags
&= ~(SET_MEMORY_NX
| SET_MEMORY_X
);
319 return change_page_attr(addr
, addr
+ numpages
* PAGE_SIZE
, flags
);
322 #ifdef CONFIG_DEBUG_PAGEALLOC
324 static void ipte_range(pte_t
*pte
, unsigned long address
, int nr
)
328 if (test_facility(13)) {
329 __ptep_ipte_range(address
, nr
- 1, pte
, IPTE_GLOBAL
);
332 for (i
= 0; i
< nr
; i
++) {
333 __ptep_ipte(address
, pte
, 0, 0, IPTE_GLOBAL
);
334 address
+= PAGE_SIZE
;
339 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
341 unsigned long address
;
349 for (i
= 0; i
< numpages
;) {
350 address
= page_to_phys(page
+ i
);
351 pgd
= pgd_offset_k(address
);
352 p4d
= p4d_offset(pgd
, address
);
353 pud
= pud_offset(p4d
, address
);
354 pmd
= pmd_offset(pud
, address
);
355 pte
= pte_offset_kernel(pmd
, address
);
356 nr
= (unsigned long)pte
>> ilog2(sizeof(long));
357 nr
= PTRS_PER_PTE
- (nr
& (PTRS_PER_PTE
- 1));
358 nr
= min(numpages
- i
, nr
);
360 for (j
= 0; j
< nr
; j
++) {
361 pte_val(*pte
) &= ~_PAGE_INVALID
;
362 address
+= PAGE_SIZE
;
366 ipte_range(pte
, address
, nr
);
372 #ifdef CONFIG_HIBERNATION
373 bool kernel_page_present(struct page
*page
)
378 addr
= page_to_phys(page
);
383 : "=d" (cc
), "+a" (addr
) : : "cc");
386 #endif /* CONFIG_HIBERNATION */
388 #endif /* CONFIG_DEBUG_PAGEALLOC */