2 * Copyright IBM Corp. 2007,2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
20 #include <asm/system.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 struct rcu_table_freelist
{
30 unsigned int pgt_index
;
31 unsigned int crst_index
;
32 unsigned long *table
[0];
35 #define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
39 DEFINE_PER_CPU(struct mmu_gather
, mmu_gathers
);
40 static DEFINE_PER_CPU(struct rcu_table_freelist
*, rcu_table_freelist
);
42 static void __page_table_free(struct mm_struct
*mm
, unsigned long *table
);
44 static struct rcu_table_freelist
*rcu_table_freelist_get(struct mm_struct
*mm
)
46 struct rcu_table_freelist
**batchp
= &__get_cpu_var(rcu_table_freelist
);
47 struct rcu_table_freelist
*batch
= *batchp
;
51 batch
= (struct rcu_table_freelist
*) __get_free_page(GFP_ATOMIC
);
55 batch
->crst_index
= RCU_FREELIST_SIZE
;
61 static void rcu_table_freelist_callback(struct rcu_head
*head
)
63 struct rcu_table_freelist
*batch
=
64 container_of(head
, struct rcu_table_freelist
, rcu
);
66 while (batch
->pgt_index
> 0)
67 __page_table_free(batch
->mm
, batch
->table
[--batch
->pgt_index
]);
68 while (batch
->crst_index
< RCU_FREELIST_SIZE
)
69 crst_table_free(batch
->mm
, batch
->table
[batch
->crst_index
++]);
70 free_page((unsigned long) batch
);
73 void rcu_table_freelist_finish(void)
75 struct rcu_table_freelist
*batch
= __get_cpu_var(rcu_table_freelist
);
79 call_rcu(&batch
->rcu
, rcu_table_freelist_callback
);
80 __get_cpu_var(rcu_table_freelist
) = NULL
;
83 static void smp_sync(void *arg
)
89 #define TABLES_PER_PAGE 4
90 #define FRAG_MASK 15UL
91 #define SECOND_HALVES 10UL
93 void clear_table_pgstes(unsigned long *table
)
95 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/4);
96 memset(table
+ 256, 0, PAGE_SIZE
/4);
97 clear_table(table
+ 512, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/4);
98 memset(table
+ 768, 0, PAGE_SIZE
/4);
102 #define ALLOC_ORDER 2
103 #define TABLES_PER_PAGE 2
104 #define FRAG_MASK 3UL
105 #define SECOND_HALVES 2UL
107 void clear_table_pgstes(unsigned long *table
)
109 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
/2);
110 memset(table
+ 256, 0, PAGE_SIZE
/2);
115 unsigned long VMALLOC_START
= VMALLOC_END
- VMALLOC_SIZE
;
116 EXPORT_SYMBOL(VMALLOC_START
);
118 static int __init
parse_vmalloc(char *arg
)
122 VMALLOC_START
= (VMALLOC_END
- memparse(arg
, &arg
)) & PAGE_MASK
;
125 early_param("vmalloc", parse_vmalloc
);
127 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
129 struct page
*page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
133 return (unsigned long *) page_to_phys(page
);
136 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
138 free_pages((unsigned long) table
, ALLOC_ORDER
);
141 void crst_table_free_rcu(struct mm_struct
*mm
, unsigned long *table
)
143 struct rcu_table_freelist
*batch
;
145 if (atomic_read(&mm
->mm_users
) < 2 &&
146 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
147 crst_table_free(mm
, table
);
150 batch
= rcu_table_freelist_get(mm
);
152 smp_call_function(smp_sync
, NULL
, 1);
153 crst_table_free(mm
, table
);
156 batch
->table
[--batch
->crst_index
] = table
;
157 if (batch
->pgt_index
>= batch
->crst_index
)
158 rcu_table_freelist_finish();
162 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long limit
)
164 unsigned long *table
, *pgd
;
167 BUG_ON(limit
> (1UL << 53));
169 table
= crst_table_alloc(mm
);
172 spin_lock_bh(&mm
->page_table_lock
);
173 if (mm
->context
.asce_limit
< limit
) {
174 pgd
= (unsigned long *) mm
->pgd
;
175 if (mm
->context
.asce_limit
<= (1UL << 31)) {
176 entry
= _REGION3_ENTRY_EMPTY
;
177 mm
->context
.asce_limit
= 1UL << 42;
178 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
182 entry
= _REGION2_ENTRY_EMPTY
;
183 mm
->context
.asce_limit
= 1UL << 53;
184 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
188 crst_table_init(table
, entry
);
189 pgd_populate(mm
, (pgd_t
*) table
, (pud_t
*) pgd
);
190 mm
->pgd
= (pgd_t
*) table
;
191 mm
->task_size
= mm
->context
.asce_limit
;
194 spin_unlock_bh(&mm
->page_table_lock
);
196 crst_table_free(mm
, table
);
197 if (mm
->context
.asce_limit
< limit
)
199 update_mm(mm
, current
);
203 void crst_table_downgrade(struct mm_struct
*mm
, unsigned long limit
)
207 if (mm
->context
.asce_limit
<= limit
)
210 while (mm
->context
.asce_limit
> limit
) {
212 switch (pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) {
213 case _REGION_ENTRY_TYPE_R2
:
214 mm
->context
.asce_limit
= 1UL << 42;
215 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
219 case _REGION_ENTRY_TYPE_R3
:
220 mm
->context
.asce_limit
= 1UL << 31;
221 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
228 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
229 mm
->task_size
= mm
->context
.asce_limit
;
230 crst_table_free(mm
, (unsigned long *) pgd
);
232 update_mm(mm
, current
);
237 * page table entry allocation/free routines.
239 unsigned long *page_table_alloc(struct mm_struct
*mm
)
242 unsigned long *table
;
245 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
246 spin_lock_bh(&mm
->context
.list_lock
);
248 if (!list_empty(&mm
->context
.pgtable_list
)) {
249 page
= list_first_entry(&mm
->context
.pgtable_list
,
251 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
255 spin_unlock_bh(&mm
->context
.list_lock
);
256 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
259 pgtable_page_ctor(page
);
260 page
->flags
&= ~FRAG_MASK
;
261 table
= (unsigned long *) page_to_phys(page
);
262 if (mm
->context
.has_pgste
)
263 clear_table_pgstes(table
);
265 clear_table(table
, _PAGE_TYPE_EMPTY
, PAGE_SIZE
);
266 spin_lock_bh(&mm
->context
.list_lock
);
267 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
269 table
= (unsigned long *) page_to_phys(page
);
270 while (page
->flags
& bits
) {
275 if ((page
->flags
& FRAG_MASK
) == ((1UL << TABLES_PER_PAGE
) - 1))
276 list_move_tail(&page
->lru
, &mm
->context
.pgtable_list
);
277 spin_unlock_bh(&mm
->context
.list_lock
);
281 static void __page_table_free(struct mm_struct
*mm
, unsigned long *table
)
286 bits
= ((unsigned long) table
) & 15;
287 table
= (unsigned long *)(((unsigned long) table
) ^ bits
);
288 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
290 if (!(page
->flags
& FRAG_MASK
)) {
291 pgtable_page_dtor(page
);
296 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
301 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
302 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
303 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
304 spin_lock_bh(&mm
->context
.list_lock
);
306 if (page
->flags
& FRAG_MASK
) {
307 /* Page now has some free pgtable fragments. */
308 if (!list_empty(&page
->lru
))
309 list_move(&page
->lru
, &mm
->context
.pgtable_list
);
312 /* All fragments of the 4K page have been freed. */
313 list_del(&page
->lru
);
314 spin_unlock_bh(&mm
->context
.list_lock
);
316 pgtable_page_dtor(page
);
321 void page_table_free_rcu(struct mm_struct
*mm
, unsigned long *table
)
323 struct rcu_table_freelist
*batch
;
327 if (atomic_read(&mm
->mm_users
) < 2 &&
328 cpumask_equal(mm_cpumask(mm
), cpumask_of(smp_processor_id()))) {
329 page_table_free(mm
, table
);
332 batch
= rcu_table_freelist_get(mm
);
334 smp_call_function(smp_sync
, NULL
, 1);
335 page_table_free(mm
, table
);
338 bits
= (mm
->context
.has_pgste
) ? 3UL : 1UL;
339 bits
<<= (__pa(table
) & (PAGE_SIZE
- 1)) / 256 / sizeof(unsigned long);
340 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
341 spin_lock_bh(&mm
->context
.list_lock
);
342 /* Delayed freeing with rcu prevents reuse of pgtable fragments */
343 list_del_init(&page
->lru
);
344 spin_unlock_bh(&mm
->context
.list_lock
);
345 table
= (unsigned long *)(((unsigned long) table
) | bits
);
346 batch
->table
[batch
->pgt_index
++] = table
;
347 if (batch
->pgt_index
>= batch
->crst_index
)
348 rcu_table_freelist_finish();
352 * switch on pgstes for its userspace process (for kvm)
354 int s390_enable_sie(void)
356 struct task_struct
*tsk
= current
;
357 struct mm_struct
*mm
, *old_mm
;
359 /* Do we have switched amode? If no, we cannot do sie */
360 if (user_mode
== HOME_SPACE_MODE
)
363 /* Do we have pgstes? if yes, we are done */
364 if (tsk
->mm
->context
.has_pgste
)
367 /* lets check if we are allowed to replace the mm */
369 if (!tsk
->mm
|| atomic_read(&tsk
->mm
->mm_users
) > 1 ||
371 !hlist_empty(&tsk
->mm
->ioctx_list
) ||
373 tsk
->mm
!= tsk
->active_mm
) {
379 /* we copy the mm and let dup_mm create the page tables with_pgstes */
380 tsk
->mm
->context
.alloc_pgste
= 1;
382 tsk
->mm
->context
.alloc_pgste
= 0;
386 /* Now lets check again if something happened */
388 if (!tsk
->mm
|| atomic_read(&tsk
->mm
->mm_users
) > 1 ||
390 !hlist_empty(&tsk
->mm
->ioctx_list
) ||
392 tsk
->mm
!= tsk
->active_mm
) {
398 /* ok, we are alone. No ptrace, no threads, etc. */
400 tsk
->mm
= tsk
->active_mm
= mm
;
403 atomic_inc(&mm
->context
.attach_count
);
404 atomic_dec(&old_mm
->context
.attach_count
);
405 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm
));
411 EXPORT_SYMBOL_GPL(s390_enable_sie
);
413 #if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
414 bool kernel_page_present(struct page
*page
)
419 addr
= page_to_phys(page
);
424 : "=d" (cc
), "+a" (addr
) : : "cc");
427 #endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */