2 * Copyright IBM Corp. 2007,2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
20 #include <asm/system.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 struct rcu_table_freelist
{
30 unsigned int pgt_index
;
31 unsigned int crst_index
;
32 unsigned long *table
[0];
35 #define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
39 static DEFINE_PER_CPU(struct rcu_table_freelist
*, rcu_table_freelist
);
41 static void __page_table_free(struct mm_struct
*mm
, unsigned long *table
);
43 static struct rcu_table_freelist
*rcu_table_freelist_get(struct mm_struct
*mm
)
45 struct rcu_table_freelist
**batchp
= &__get_cpu_var(rcu_table_freelist
);
46 struct rcu_table_freelist
*batch
= *batchp
;
50 batch
= (struct rcu_table_freelist
*) __get_free_page(GFP_ATOMIC
);
54 batch
->crst_index
= RCU_FREELIST_SIZE
;
60 static void rcu_table_freelist_callback(struct rcu_head
*head
)
62 struct rcu_table_freelist
*batch
=
63 container_of(head
, struct rcu_table_freelist
, rcu
);
65 while (batch
->pgt_index
> 0)
66 __page_table_free(batch
->mm
, batch
->table
[--batch
->pgt_index
]);
67 while (batch
->crst_index
< RCU_FREELIST_SIZE
)
68 crst_table_free(batch
->mm
, batch
->table
[batch
->crst_index
++]);
69 free_page((unsigned long) batch
);
72 void rcu_table_freelist_finish(void)
74 struct rcu_table_freelist
**batchp
= &get_cpu_var(rcu_table_freelist
);
75 struct rcu_table_freelist
*batch
= *batchp
;
79 call_rcu(&batch
->rcu
, rcu_table_freelist_callback
);
82 put_cpu_var(rcu_table_freelist
);
85 static void smp_sync(void *arg
)
91 #define TABLES_PER_PAGE 4
92 #define FRAG_MASK 15UL
93 #define SECOND_HALVES 10UL
95 void clear_table_pgstes(unsigned long *table
)
97 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/4);
98 memset(table
+ 256, 0, PAGE_SIZE
/4);
99 clear_table(table
+ 512, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/4);
100 memset(table
+ 768, 0, PAGE_SIZE
/4);
104 #define ALLOC_ORDER 2
105 #define TABLES_PER_PAGE 2
106 #define FRAG_MASK 3UL
107 #define SECOND_HALVES 2UL
109 void clear_table_pgstes(unsigned long *table
)
111 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/2);
112 memset(table
+ 256, 0, PAGE_SIZE
/2);
117 unsigned long VMALLOC_START
= VMALLOC_END
- VMALLOC_SIZE
;
118 EXPORT_SYMBOL(VMALLOC_START
);
120 static int __init
parse_vmalloc(char *arg
)
124 VMALLOC_START
= (VMALLOC_END
- memparse(arg
, &arg
)) & PAGE_MASK
;
127 early_param("vmalloc", parse_vmalloc
);
129 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
131 struct page
*page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
135 return (unsigned long *) page_to_phys(page
);
138 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
140 free_pages((unsigned long) table
, ALLOC_ORDER
);
143 void crst_table_free_rcu(struct mm_struct
*mm
, unsigned long *table
)
145 struct rcu_table_freelist
*batch
;
148 if (atomic_read(&mm
->mm_users
) < 2 &&
149 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
150 crst_table_free(mm
, table
);
153 batch
= rcu_table_freelist_get(mm
);
155 smp_call_function(smp_sync
, NULL
, 1);
156 crst_table_free(mm
, table
);
159 batch
->table
[--batch
->crst_index
] = table
;
160 if (batch
->pgt_index
>= batch
->crst_index
)
161 rcu_table_freelist_finish();
167 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long limit
)
169 unsigned long *table
, *pgd
;
172 BUG_ON(limit
> (1UL << 53));
174 table
= crst_table_alloc(mm
);
177 spin_lock_bh(&mm
->page_table_lock
);
178 if (mm
->context
.asce_limit
< limit
) {
179 pgd
= (unsigned long *) mm
->pgd
;
180 if (mm
->context
.asce_limit
<= (1UL << 31)) {
181 entry
= _REGION3_ENTRY_EMPTY
;
182 mm
->context
.asce_limit
= 1UL << 42;
183 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
187 entry
= _REGION2_ENTRY_EMPTY
;
188 mm
->context
.asce_limit
= 1UL << 53;
189 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
193 crst_table_init(table
, entry
);
194 pgd_populate(mm
, (pgd_t
*) table
, (pud_t
*) pgd
);
195 mm
->pgd
= (pgd_t
*) table
;
196 mm
->task_size
= mm
->context
.asce_limit
;
199 spin_unlock_bh(&mm
->page_table_lock
);
201 crst_table_free(mm
, table
);
202 if (mm
->context
.asce_limit
< limit
)
204 update_mm(mm
, current
);
208 void crst_table_downgrade(struct mm_struct
*mm
, unsigned long limit
)
212 if (mm
->context
.asce_limit
<= limit
)
215 while (mm
->context
.asce_limit
> limit
) {
217 switch (pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) {
218 case _REGION_ENTRY_TYPE_R2
:
219 mm
->context
.asce_limit
= 1UL << 42;
220 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
224 case _REGION_ENTRY_TYPE_R3
:
225 mm
->context
.asce_limit
= 1UL << 31;
226 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
233 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
234 mm
->task_size
= mm
->context
.asce_limit
;
235 crst_table_free(mm
, (unsigned long *) pgd
);
237 update_mm(mm
, current
);
242 * page table entry allocation/free routines.
244 unsigned long *page_table_alloc(struct mm_struct
*mm
)
247 unsigned long *table
;
250 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
251 spin_lock_bh(&mm
->context
.list_lock
);
253 if (!list_empty(&mm
->context
.pgtable_list
)) {
254 page
= list_first_entry(&mm
->context
.pgtable_list
,
256 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
260 spin_unlock_bh(&mm
->context
.list_lock
);
261 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
264 pgtable_page_ctor(page
);
265 page
->flags
&= ~FRAG_MASK
;
266 table
= (unsigned long *) page_to_phys(page
);
267 if (mm
->context
.has_pgste
)
268 clear_table_pgstes(table
);
270 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
);
271 spin_lock_bh(&mm
->context
.list_lock
);
272 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
274 table
= (unsigned long *) page_to_phys(page
);
275 while (page
->flags
& bits
) {
280 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
281 list_move_tail(&page
->lru
, &mm
->context
.pgtable_list
);
282 spin_unlock_bh(&mm
->context
.list_lock
);
286 static void __page_table_free(struct mm_struct
*mm
, unsigned long *table
)
291 bits
= ((unsigned long) table
) & 15;
292 table
= (unsigned long *)(((unsigned long) table
) ^ bits
);
293 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
295 if (!(page
->flags
& FRAG_MASK
)) {
296 pgtable_page_dtor(page
);
301 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
306 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
307 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
308 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
309 spin_lock_bh(&mm
->context
.list_lock
);
311 if (page
->flags
& FRAG_MASK
) {
312 /* Page now has some free pgtable fragments. */
313 if (!list_empty(&page
->lru
))
314 list_move(&page
->lru
, &mm
->context
.pgtable_list
);
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page
->lru
);
319 spin_unlock_bh(&mm
->context
.list_lock
);
321 pgtable_page_dtor(page
);
326 void page_table_free_rcu(struct mm_struct
*mm
, unsigned long *table
)
328 struct rcu_table_freelist
*batch
;
333 if (atomic_read(&mm
->mm_users
) < 2 &&
334 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
335 page_table_free(mm
, table
);
338 batch
= rcu_table_freelist_get(mm
);
340 smp_call_function(smp_sync
, NULL
, 1);
341 page_table_free(mm
, table
);
344 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
345 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
346 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
347 spin_lock_bh(&mm
->context
.list_lock
);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
349 list_del_init(&page
->lru
);
350 spin_unlock_bh(&mm
->context
.list_lock
);
351 table
= (unsigned long *)(((unsigned long) table
) | bits
);
352 batch
->table
[batch
->pgt_index
++] = table
;
353 if (batch
->pgt_index
>= batch
->crst_index
)
354 rcu_table_freelist_finish();
360 * switch on pgstes for its userspace process (for kvm)
362 int s390_enable_sie(void)
364 struct task_struct
*tsk
= current
;
365 struct mm_struct
*mm
, *old_mm
;
367 /* Do we have switched amode? If no, we cannot do sie */
368 if (user_mode
== HOME_SPACE_MODE
)
371 /* Do we have pgstes? if yes, we are done */
372 if (tsk
->mm
->context
.has_pgste
)
375 /* lets check if we are allowed to replace the mm */
377 if (!tsk
->mm
|| atomic_read(&tsk
->mm
->mm_users
) > 1 ||
379 !hlist_empty(&tsk
->mm
->ioctx_list
) ||
381 tsk
->mm
!= tsk
->active_mm
) {
387 /* we copy the mm and let dup_mm create the page tables with_pgstes */
388 tsk
->mm
->context
.alloc_pgste
= 1;
390 tsk
->mm
->context
.alloc_pgste
= 0;
394 /* Now lets check again if something happened */
396 if (!tsk
->mm
|| atomic_read(&tsk
->mm
->mm_users
) > 1 ||
398 !hlist_empty(&tsk
->mm
->ioctx_list
) ||
400 tsk
->mm
!= tsk
->active_mm
) {
406 /* ok, we are alone. No ptrace, no threads, etc. */
408 tsk
->mm
= tsk
->active_mm
= mm
;
411 atomic_inc(&mm
->context
.attach_count
);
412 atomic_dec(&old_mm
->context
.attach_count
);
413 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
419 EXPORT_SYMBOL_GPL(s390_enable_sie
);
421 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
422 bool kernel_page_present(struct page
*page
)
427 addr
= page_to_phys(page
);
432 : "=d" (cc
), "+a" (addr
) : : "cc");
435 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */