2 * Copyright IBM Corp. 2011
3 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
5 #include <linux/hugetlb.h>
6 #include <linux/module.h>
8 #include <asm/cacheflush.h>
9 #include <asm/facility.h>
10 #include <asm/pgtable.h>
13 static inline unsigned long sske_frame(unsigned long addr
, unsigned char skey
)
15 asm volatile(".insn rrf,0xb22b0000,%[skey],%[addr],9,0"
16 : [addr
] "+a" (addr
) : [skey
] "d" (skey
));
20 void __storage_key_init_range(unsigned long start
, unsigned long end
)
22 unsigned long boundary
, size
;
24 if (!PAGE_DEFAULT_KEY
)
27 if (MACHINE_HAS_EDAT1
) {
28 /* set storage keys for a 1MB frame */
30 boundary
= (start
+ size
) & ~(size
- 1);
31 if (boundary
<= end
) {
33 start
= sske_frame(start
, PAGE_DEFAULT_KEY
);
34 } while (start
< boundary
);
38 page_set_storage_key(start
, PAGE_DEFAULT_KEY
, 0);
44 atomic_long_t direct_pages_count
[PG_DIRECT_MAP_MAX
];
46 void arch_report_meminfo(struct seq_file
*m
)
48 seq_printf(m
, "DirectMap4k: %8lu kB\n",
49 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_4K
]) << 2);
50 seq_printf(m
, "DirectMap1M: %8lu kB\n",
51 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_1M
]) << 10);
52 seq_printf(m
, "DirectMap2G: %8lu kB\n",
53 atomic_long_read(&direct_pages_count
[PG_DIRECT_MAP_2G
]) << 21);
55 #endif /* CONFIG_PROC_FS */
57 static void pgt_set(unsigned long *old
, unsigned long new, unsigned long addr
,
60 unsigned long table
, mask
;
63 if (MACHINE_HAS_EDAT2
) {
65 case CRDTE_DTT_REGION3
:
66 mask
= ~(PTRS_PER_PUD
* sizeof(pud_t
) - 1);
68 case CRDTE_DTT_SEGMENT
:
69 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
72 mask
= ~(PTRS_PER_PTE
* sizeof(pte_t
) - 1);
75 table
= (unsigned long)old
& mask
;
76 crdte(*old
, new, table
, dtt
, addr
, S390_lowcore
.kernel_asce
);
77 } else if (MACHINE_HAS_IDTE
) {
80 csp((unsigned int *)old
+ 1, *old
, new);
85 unsigned int set_ro
: 1;
86 unsigned int clear_ro
: 1;
89 static int walk_pte_level(pmd_t
*pmdp
, unsigned long addr
, unsigned long end
,
94 ptep
= pte_offset(pmdp
, addr
);
99 new = pte_wrprotect(*ptep
);
100 else if (cpa
.clear_ro
)
101 new = pte_mkwrite(pte_mkdirty(*ptep
));
102 pgt_set((unsigned long *)ptep
, pte_val(new), addr
, CRDTE_DTT_PAGE
);
106 } while (addr
< end
);
110 static int split_pmd_page(pmd_t
*pmdp
, unsigned long addr
)
112 unsigned long pte_addr
, prot
;
113 pte_t
*pt_dir
, *ptep
;
117 pt_dir
= vmem_pte_alloc();
120 pte_addr
= pmd_pfn(*pmdp
) << PAGE_SHIFT
;
121 ro
= !!(pmd_val(*pmdp
) & _SEGMENT_ENTRY_PROTECT
);
122 prot
= pgprot_val(ro
? PAGE_KERNEL_RO
: PAGE_KERNEL
);
124 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
125 pte_val(*ptep
) = pte_addr
| prot
;
126 pte_addr
+= PAGE_SIZE
;
129 pmd_val(new) = __pa(pt_dir
) | _SEGMENT_ENTRY
;
130 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
131 update_page_count(PG_DIRECT_MAP_4K
, PTRS_PER_PTE
);
132 update_page_count(PG_DIRECT_MAP_1M
, -1);
136 static void modify_pmd_page(pmd_t
*pmdp
, unsigned long addr
, struct cpa cpa
)
141 new = pmd_wrprotect(*pmdp
);
142 else if (cpa
.clear_ro
)
143 new = pmd_mkwrite(pmd_mkdirty(*pmdp
));
144 pgt_set((unsigned long *)pmdp
, pmd_val(new), addr
, CRDTE_DTT_SEGMENT
);
147 static int walk_pmd_level(pud_t
*pudp
, unsigned long addr
, unsigned long end
,
154 pmdp
= pmd_offset(pudp
, addr
);
158 next
= pmd_addr_end(addr
, end
);
159 if (pmd_large(*pmdp
)) {
160 if (addr
& ~PMD_MASK
|| addr
+ PMD_SIZE
> next
) {
161 rc
= split_pmd_page(pmdp
, addr
);
166 modify_pmd_page(pmdp
, addr
, cpa
);
168 rc
= walk_pte_level(pmdp
, addr
, next
, cpa
);
175 } while (addr
< end
);
179 static int split_pud_page(pud_t
*pudp
, unsigned long addr
)
181 unsigned long pmd_addr
, prot
;
182 pmd_t
*pm_dir
, *pmdp
;
186 pm_dir
= vmem_pmd_alloc();
189 pmd_addr
= pud_pfn(*pudp
) << PAGE_SHIFT
;
190 ro
= !!(pud_val(*pudp
) & _REGION_ENTRY_PROTECT
);
191 prot
= pgprot_val(ro
? SEGMENT_KERNEL_RO
: SEGMENT_KERNEL
);
193 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
194 pmd_val(*pmdp
) = pmd_addr
| prot
;
195 pmd_addr
+= PMD_SIZE
;
198 pud_val(new) = __pa(pm_dir
) | _REGION3_ENTRY
;
199 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
200 update_page_count(PG_DIRECT_MAP_1M
, PTRS_PER_PMD
);
201 update_page_count(PG_DIRECT_MAP_2G
, -1);
205 static void modify_pud_page(pud_t
*pudp
, unsigned long addr
, struct cpa cpa
)
210 new = pud_wrprotect(*pudp
);
211 else if (cpa
.clear_ro
)
212 new = pud_mkwrite(pud_mkdirty(*pudp
));
213 pgt_set((unsigned long *)pudp
, pud_val(new), addr
, CRDTE_DTT_REGION3
);
216 static int walk_pud_level(pgd_t
*pgd
, unsigned long addr
, unsigned long end
,
223 pudp
= pud_offset(pgd
, addr
);
227 next
= pud_addr_end(addr
, end
);
228 if (pud_large(*pudp
)) {
229 if (addr
& ~PUD_MASK
|| addr
+ PUD_SIZE
> next
) {
230 rc
= split_pud_page(pudp
, addr
);
235 modify_pud_page(pudp
, addr
, cpa
);
237 rc
= walk_pmd_level(pudp
, addr
, next
, cpa
);
242 } while (addr
< end
&& !rc
);
246 static DEFINE_MUTEX(cpa_mutex
);
248 static int change_page_attr(unsigned long addr
, unsigned long end
,
257 if (end
>= MODULES_END
)
259 mutex_lock(&cpa_mutex
);
260 pgdp
= pgd_offset_k(addr
);
264 next
= pgd_addr_end(addr
, end
);
265 rc
= walk_pud_level(pgdp
, addr
, next
, cpa
);
269 } while (pgdp
++, addr
= next
, addr
< end
&& !rc
);
270 mutex_unlock(&cpa_mutex
);
274 int set_memory_ro(unsigned long addr
, int numpages
)
281 return change_page_attr(addr
, addr
+ numpages
* PAGE_SIZE
, cpa
);
284 int set_memory_rw(unsigned long addr
, int numpages
)
291 return change_page_attr(addr
, addr
+ numpages
* PAGE_SIZE
, cpa
);
295 int set_memory_nx(unsigned long addr
, int numpages
)
300 int set_memory_x(unsigned long addr
, int numpages
)
305 #ifdef CONFIG_DEBUG_PAGEALLOC
307 static void ipte_range(pte_t
*pte
, unsigned long address
, int nr
)
311 if (test_facility(13)) {
312 __ptep_ipte_range(address
, nr
- 1, pte
, IPTE_GLOBAL
);
315 for (i
= 0; i
< nr
; i
++) {
316 __ptep_ipte(address
, pte
, IPTE_GLOBAL
);
317 address
+= PAGE_SIZE
;
322 void __kernel_map_pages(struct page
*page
, int numpages
, int enable
)
324 unsigned long address
;
331 for (i
= 0; i
< numpages
;) {
332 address
= page_to_phys(page
+ i
);
333 pgd
= pgd_offset_k(address
);
334 pud
= pud_offset(pgd
, address
);
335 pmd
= pmd_offset(pud
, address
);
336 pte
= pte_offset_kernel(pmd
, address
);
337 nr
= (unsigned long)pte
>> ilog2(sizeof(long));
338 nr
= PTRS_PER_PTE
- (nr
& (PTRS_PER_PTE
- 1));
339 nr
= min(numpages
- i
, nr
);
341 for (j
= 0; j
< nr
; j
++) {
342 pte_val(*pte
) = address
| pgprot_val(PAGE_KERNEL
);
343 address
+= PAGE_SIZE
;
347 ipte_range(pte
, address
, nr
);
353 #ifdef CONFIG_HIBERNATION
354 bool kernel_page_present(struct page
*page
)
359 addr
= page_to_phys(page
);
364 : "=d" (cc
), "+a" (addr
) : : "cc");
367 #endif /* CONFIG_HIBERNATION */
369 #endif /* CONFIG_DEBUG_PAGEALLOC */