1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2011
4 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 #include <linux/hugetlb.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgalloc.h>
12 #include <asm/set_memory.h>
14 static inline unsigned long sske_frame(unsigned long addr
, unsigned char skey
)
16 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],1,0"
17 : [addr
] "+a" (addr
) : [skey
] "d" (skey
));
21 void __storage_key_init_range(unsigned long start
, unsigned long end
)
23 unsigned long boundary
, size
;
26 if (MACHINE_HAS_EDAT1
) {
27 /* set storage keys for a 1MB frame */
29 boundary
= (start
+ size
) & ~(size
- 1);
30 if (boundary
<= end
) {
32 start
= sske_frame(start
, PAGE_DEFAULT_KEY
);
33 } while (start
< boundary
);
37 page_set_storage_key(start
, PAGE_DEFAULT_KEY
, 1);
43 atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
45 void arch_report_meminfo(struct seq_file
*m
)
47 seq_printf(m
, "DirectMap4k: %8lu kB\n",
48 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_4K
]) << 2);
49 seq_printf(m
, "DirectMap1M: %8lu kB\n",
50 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_1M
]) << 10);
51 seq_printf(m
, "DirectMap2G: %8lu kB\n",
52 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_2G
]) << 21);
54 #endif /* CONFIG_PROC_FS */
56 static void pgt_set(unsigned long *old
, unsigned long new, unsigned long addr
,
59 unsigned long table
, mask
;
62 if (MACHINE_HAS_EDAT2
) {
64 case CRDTE_DTT_REGION3
:
65 mask
= ~(PTRS_PER_PUD
* sizeof(pud_t
) - 1);
67 case CRDTE_DTT_SEGMENT
:
68 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
71 mask
= ~(PTRS_PER_PTE
* sizeof(pte_t
) - 1);
74 table
= (unsigned long)old
& mask
;
75 crdte(*old
, new, table
, dtt
, addr
, S390_lowcore
.kernel_asce
);
76 } else if (MACHINE_HAS_IDTE
) {
79 csp((unsigned int *)old
+ 1, *old
, new);
83 static int walk_pte_level(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
88 ptep
= pte_offset_kernel(pmdp
, addr
);
93 if (flags
& SET_MEMORY_RO
)
94 new = pte_wrprotect(new);
95 else if (flags
& SET_MEMORY_RW
)
96 new = pte_mkwrite(pte_mkdirty(new));
97 if (flags
& SET_MEMORY_NX
)
98 pte_val(new) |= _PAGE_NOEXEC
;
99 else if (flags
& SET_MEMORY_X
)
100 pte_val(new) &= ~_PAGE_NOEXEC
;
101 pgt_set((unsigned long *)ptep
, pte_val(new), addr
, CRDTE_DTT_PAGE
);
105 } while (addr
< end
);
109 static int split_pmd_page(pmd_t
*pmdp
, unsigned long addr
)
111 unsigned long pte_addr
, prot
;
112 pte_t
*pt_dir
, *ptep
;
116 pt_dir
= vmem_pte_alloc();
119 pte_addr
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
120 ro
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_PROTECT
);
121 nx
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_NOEXEC
);
122 prot
= pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
124 prot
&= ~_PAGE_NOEXEC
;
126 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
127 pte_val(*ptep
) = pte_addr
| prot
;
128 pte_addr
+= PAGE_SIZE
;
131 pmd_val(new) = __pa(pt_dir
) | _SEGMENT_ENTRY
;
132 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
133 update_page_count(PG_DIRECT_MAP_4K
, PTRS_PER_PTE
);
134 update_page_count(PG_DIRECT_MAP_1M
, -1);
138 static void modify_pmd_page(pmd_t
*pmdp
, unsigned long addr
,
143 if (flags
& SET_MEMORY_RO
)
144 new = pmd_wrprotect(new);
145 else if (flags
& SET_MEMORY_RW
)
146 new = pmd_mkwrite(pmd_mkdirty(new));
147 if (flags
& SET_MEMORY_NX
)
148 pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC
;
149 else if (flags
& SET_MEMORY_X
)
150 pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC
;
151 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
154 static int walk_pmd_level(pud_t
*pudp
, unsigned long addr
, unsigned long end
,
161 pmdp
= pmd_offset(pudp
, addr
);
165 next
= pmd_addr_end(addr
, end
);
166 if (pmd_large(*pmdp
)) {
167 if (addr
& ~PMD_MASK
|| addr
+ PMD_SIZE
> next
) {
168 rc
= split_pmd_page(pmdp
, addr
);
173 modify_pmd_page(pmdp
, addr
, flags
);
175 rc
= walk_pte_level(pmdp
, addr
, next
, flags
);
182 } while (addr
< end
);
186 static int split_pud_page(pud_t
*pudp
, unsigned long addr
)
188 unsigned long pmd_addr
, prot
;
189 pmd_t
*pm_dir
, *pmdp
;
193 pm_dir
= vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY
);
196 pmd_addr
= pud_pfn(*pudp
) << PAGE_SHIFT
;
197 ro
= !!(pud_val(*pudp
) & _REGION_ENTRY_PROTECT
);
198 nx
= !!(pud_val(*pudp
) & _REGION_ENTRY_NOEXEC
);
199 prot
= pgprot_val(ro
? SEGMENT_KERNEL_RO
: SEGMENT_KERNEL
);
201 prot
&= ~_SEGMENT_ENTRY_NOEXEC
;
203 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
204 pmd_val(*pmdp
) = pmd_addr
| prot
;
205 pmd_addr
+= PMD_SIZE
;
208 pud_val(new) = __pa(pm_dir
) | _REGION3_ENTRY
;
209 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
210 update_page_count(PG_DIRECT_MAP_1M
, PTRS_PER_PMD
);
211 update_page_count(PG_DIRECT_MAP_2G
, -1);
215 static void modify_pud_page(pud_t
*pudp
, unsigned long addr
,
220 if (flags
& SET_MEMORY_RO
)
221 new = pud_wrprotect(new);
222 else if (flags
& SET_MEMORY_RW
)
223 new = pud_mkwrite(pud_mkdirty(new));
224 if (flags
& SET_MEMORY_NX
)
225 pud_val(new) |= _REGION_ENTRY_NOEXEC
;
226 else if (flags
& SET_MEMORY_X
)
227 pud_val(new) &= ~_REGION_ENTRY_NOEXEC
;
228 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
231 static int walk_pud_level(p4d_t
*p4d
, unsigned long addr
, unsigned long end
,
238 pudp
= pud_offset(p4d
, addr
);
242 next
= pud_addr_end(addr
, end
);
243 if (pud_large(*pudp
)) {
244 if (addr
& ~PUD_MASK
|| addr
+ PUD_SIZE
> next
) {
245 rc
= split_pud_page(pudp
, addr
);
250 modify_pud_page(pudp
, addr
, flags
);
252 rc
= walk_pmd_level(pudp
, addr
, next
, flags
);
257 } while (addr
< end
&& !rc
);
261 static int walk_p4d_level(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
268 p4dp
= p4d_offset(pgd
, addr
);
272 next
= p4d_addr_end(addr
, end
);
273 rc
= walk_pud_level(p4dp
, addr
, next
, flags
);
277 } while (addr
< end
&& !rc
);
281 DEFINE_MUTEX(cpa_mutex
);
283 static int change_page_attr(unsigned long addr
, unsigned long end
,
292 if (end
>= MODULES_END
)
294 mutex_lock(&cpa_mutex
);
295 pgdp
= pgd_offset_k(addr
);
299 next
= pgd_addr_end(addr
, end
);
300 rc
= walk_p4d_level(pgdp
, addr
, next
, flags
);
304 } while (pgdp
++, addr
= next
, addr
< end
&& !rc
);
305 mutex_unlock(&cpa_mutex
);
309 int __set_memory(unsigned long addr
, int numpages
, unsigned long flags
)
312 flags
&= ~(SET_MEMORY_NX
| SET_MEMORY_X
);
316 return change_page_attr(addr
, addr
+ numpages
* PAGE_SIZE
, flags
);
319 #ifdef CONFIG_DEBUG_PAGEALLOC
321 static void ipte_range(pte_t
*pte
, unsigned long address
, int nr
)
325 if (test_facility(13)) {
326 __ptep_ipte_range(address
, nr
- 1, pte
, IPTE_GLOBAL
);
329 for (i
= 0; i
< nr
; i
++) {
330 __ptep_ipte(address
, pte
, 0, 0, IPTE_GLOBAL
);
331 address
+= PAGE_SIZE
;
336 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
338 unsigned long address
;
342 for (i
= 0; i
< numpages
;) {
343 address
= page_to_phys(page
+ i
);
344 pte
= virt_to_kpte(address
);
345 nr
= (unsigned long)pte
>> ilog2(sizeof(long));
346 nr
= PTRS_PER_PTE
- (nr
& (PTRS_PER_PTE
- 1));
347 nr
= min(numpages
- i
, nr
);
349 for (j
= 0; j
< nr
; j
++) {
350 pte_val(*pte
) &= ~_PAGE_INVALID
;
351 address
+= PAGE_SIZE
;
355 ipte_range(pte
, address
, nr
);
361 #endif /* CONFIG_DEBUG_PAGEALLOC */