2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
19 #include <linux/swap.h>
20 #include <linux/highmem.h>
21 #include <linux/slab.h>
22 #include <linux/pagemap.h>
23 #include <linux/spinlock.h>
24 #include <linux/cpumask.h>
25 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/smp.h>
30 #include <asm/pgtable.h>
31 #include <asm/pgalloc.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
35 #include <asm/homecache.h>
37 #define K(x) ((x) << (PAGE_SHIFT-10))
40 * The normal show_free_areas() is too verbose on Tile, with dozens
41 * of processors and often four NUMA zones each with high and lowmem.
43 void show_mem(unsigned int filter
)
47 pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
48 (global_node_page_state(NR_ACTIVE_ANON
) +
49 global_node_page_state(NR_ACTIVE_FILE
)),
50 (global_node_page_state(NR_INACTIVE_ANON
) +
51 global_node_page_state(NR_INACTIVE_FILE
)),
52 global_node_page_state(NR_FILE_DIRTY
),
53 global_node_page_state(NR_WRITEBACK
),
54 global_node_page_state(NR_UNSTABLE_NFS
),
55 global_page_state(NR_FREE_PAGES
),
56 (global_page_state(NR_SLAB_RECLAIMABLE
) +
57 global_page_state(NR_SLAB_UNRECLAIMABLE
)),
58 global_node_page_state(NR_FILE_MAPPED
),
59 global_page_state(NR_PAGETABLE
),
60 global_page_state(NR_BOUNCE
),
61 global_node_page_state(NR_FILE_PAGES
),
65 unsigned long flags
, order
, total
= 0, largest_order
= -1;
67 if (!populated_zone(zone
))
70 spin_lock_irqsave(&zone
->lock
, flags
);
71 for (order
= 0; order
< MAX_ORDER
; order
++) {
72 int nr
= zone
->free_area
[order
].nr_free
;
75 largest_order
= order
;
77 spin_unlock_irqrestore(&zone
->lock
, flags
);
78 pr_err("Node %d %7s: %lukB (largest %luKb)\n",
79 zone_to_nid(zone
), zone
->name
,
80 K(total
), largest_order
? K(1UL) << largest_order
: 0);
85 * shatter_huge_page() - ensure a given address is mapped by a small page.
87 * This function converts a huge PTE mapping kernel LOWMEM into a bunch
88 * of small PTEs with the same caching. No cache flush required, but we
89 * must do a global TLB flush.
91 * Any caller that wishes to modify a kernel mapping that might
92 * have been made with a huge page should call this function,
93 * since doing so properly avoids race conditions with installing the
94 * newly-shattered page and then flushing all the TLB entries.
96 * @addr: Address at which to shatter any existing huge page.
98 void shatter_huge_page(unsigned long addr
)
103 unsigned long flags
= 0; /* happy compiler */
104 #ifdef __PAGETABLE_PMD_FOLDED
105 struct list_head
*pos
;
108 /* Get a pointer to the pmd entry that we need to change. */
110 BUG_ON(pgd_addr_invalid(addr
));
111 BUG_ON(addr
< PAGE_OFFSET
); /* only for kernel LOWMEM */
112 pgd
= swapper_pg_dir
+ pgd_index(addr
);
113 pud
= pud_offset(pgd
, addr
);
114 BUG_ON(!pud_present(*pud
));
115 pmd
= pmd_offset(pud
, addr
);
116 BUG_ON(!pmd_present(*pmd
));
117 if (!pmd_huge_page(*pmd
))
120 spin_lock_irqsave(&init_mm
.page_table_lock
, flags
);
121 if (!pmd_huge_page(*pmd
)) {
122 /* Lost the race to convert the huge page. */
123 spin_unlock_irqrestore(&init_mm
.page_table_lock
, flags
);
127 /* Shatter the huge page into the preallocated L2 page table. */
128 pmd_populate_kernel(&init_mm
, pmd
, get_prealloc_pte(pmd_pfn(*pmd
)));
130 #ifdef __PAGETABLE_PMD_FOLDED
131 /* Walk every pgd on the system and update the pmd there. */
132 spin_lock(&pgd_lock
);
133 list_for_each(pos
, &pgd_list
) {
135 pgd
= list_to_pgd(pos
) + pgd_index(addr
);
136 pud
= pud_offset(pgd
, addr
);
137 copy_pmd
= pmd_offset(pud
, addr
);
138 __set_pmd(copy_pmd
, *pmd
);
140 spin_unlock(&pgd_lock
);
143 /* Tell every cpu to notice the change. */
144 flush_remote(0, 0, NULL
, addr
, HPAGE_SIZE
, HPAGE_SIZE
,
145 cpu_possible_mask
, NULL
, 0);
147 /* Hold the lock until the TLB flush is finished to avoid races. */
148 spin_unlock_irqrestore(&init_mm
.page_table_lock
, flags
);
152 * List of all pgd's needed so it can invalidate entries in both cached
153 * and uncached pgd's. This is essentially codepath-based locking
154 * against pageattr.c; it is the unique case in which a valid change
155 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
156 * vmalloc faults work because attached pagetables are never freed.
158 * The lock is always taken with interrupts disabled, unlike on x86
159 * and other platforms, because we need to take the lock in
160 * shatter_huge_page(), which may be called from an interrupt context.
161 * We are not at risk from the tlbflush IPI deadlock that was seen on
162 * x86, since we use the flush_remote() API to have the hypervisor do
163 * the TLB flushes regardless of irq disabling.
165 DEFINE_SPINLOCK(pgd_lock
);
168 static inline void pgd_list_add(pgd_t
*pgd
)
170 list_add(pgd_to_list(pgd
), &pgd_list
);
173 static inline void pgd_list_del(pgd_t
*pgd
)
175 list_del(pgd_to_list(pgd
));
178 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET)
179 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_INDEX_START)
181 static void pgd_ctor(pgd_t
*pgd
)
185 memset(pgd
, 0, KERNEL_PGD_INDEX_START
*sizeof(pgd_t
));
186 spin_lock_irqsave(&pgd_lock
, flags
);
190 * Check that the user interrupt vector has no L2.
191 * It never should for the swapper, and new page tables
192 * should always start with an empty user interrupt vector.
194 BUG_ON(((u64
*)swapper_pg_dir
)[pgd_index(MEM_USER_INTRPT
)] != 0);
197 memcpy(pgd
+ KERNEL_PGD_INDEX_START
,
198 swapper_pg_dir
+ KERNEL_PGD_INDEX_START
,
199 KERNEL_PGD_PTRS
* sizeof(pgd_t
));
202 spin_unlock_irqrestore(&pgd_lock
, flags
);
205 static void pgd_dtor(pgd_t
*pgd
)
207 unsigned long flags
; /* can be called from interrupt context */
209 spin_lock_irqsave(&pgd_lock
, flags
);
211 spin_unlock_irqrestore(&pgd_lock
, flags
);
214 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
216 pgd_t
*pgd
= kmem_cache_alloc(pgd_cache
, GFP_KERNEL
);
222 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
225 kmem_cache_free(pgd_cache
, pgd
);
229 #define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
231 struct page
*pgtable_alloc_one(struct mm_struct
*mm
, unsigned long address
,
234 gfp_t flags
= GFP_KERNEL
|__GFP_ZERO
;
238 p
= alloc_pages(flags
, L2_USER_PGTABLE_ORDER
);
242 if (!pgtable_page_ctor(p
)) {
243 __free_pages(p
, L2_USER_PGTABLE_ORDER
);
248 * Make every page have a page_count() of one, not just the first.
249 * We don't use __GFP_COMP since it doesn't look like it works
250 * correctly with tlb_remove_page().
252 for (i
= 1; i
< order
; ++i
) {
253 init_page_count(p
+i
);
254 inc_zone_page_state(p
+i
, NR_PAGETABLE
);
261 * Free page immediately (used in __pte_alloc if we raced with another
262 * process). We have to correct whatever pte_alloc_one() did before
263 * returning the pages to the allocator.
265 void pgtable_free(struct mm_struct
*mm
, struct page
*p
, int order
)
269 pgtable_page_dtor(p
);
272 for (i
= 1; i
< order
; ++i
) {
274 dec_zone_page_state(p
+i
, NR_PAGETABLE
);
278 void __pgtable_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
,
279 unsigned long address
, int order
)
283 pgtable_page_dtor(pte
);
284 tlb_remove_page(tlb
, pte
);
286 for (i
= 1; i
< order
; ++i
) {
287 tlb_remove_page(tlb
, pte
+ i
);
288 dec_zone_page_state(pte
+ i
, NR_PAGETABLE
);
295 * FIXME: needs to be atomic vs hypervisor writes. For now we make the
296 * window of vulnerability a bit smaller by doing an unlocked 8-bit update.
298 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
299 unsigned long addr
, pte_t
*ptep
)
301 #if HV_PTE_INDEX_ACCESSED < 8 || HV_PTE_INDEX_ACCESSED >= 16
302 # error Code assumes HV_PTE "accessed" bit in second byte
304 u8
*tmp
= (u8
*)ptep
;
305 u8 second_byte
= tmp
[1];
306 if (!(second_byte
& (1 << (HV_PTE_INDEX_ACCESSED
- 8))))
308 tmp
[1] = second_byte
& ~(1 << (HV_PTE_INDEX_ACCESSED
- 8));
313 * This implementation is atomic vs hypervisor writes, since the hypervisor
314 * always writes the low word (where "accessed" and "dirty" are) and this
315 * routine only writes the high word.
317 void ptep_set_wrprotect(struct mm_struct
*mm
,
318 unsigned long addr
, pte_t
*ptep
)
320 #if HV_PTE_INDEX_WRITABLE < 32
321 # error Code assumes HV_PTE "writable" bit in high word
323 u32
*tmp
= (u32
*)ptep
;
324 tmp
[1] = tmp
[1] & ~(1 << (HV_PTE_INDEX_WRITABLE
- 32));
330 * Return a pointer to the PTE that corresponds to the given
331 * address in the given page table. A NULL page table just uses
332 * the standard kernel page table; the preferred API in this case
335 * The returned pointer can point to a huge page in other levels
336 * of the page table than the bottom, if the huge page is present
337 * in the page table. For bottom-level PTEs, the returned pointer
338 * can point to a PTE that is either present or not.
340 pte_t
*virt_to_pte(struct mm_struct
* mm
, unsigned long addr
)
346 if (pgd_addr_invalid(addr
))
349 pgd
= mm
? pgd_offset(mm
, addr
) : swapper_pg_dir
+ pgd_index(addr
);
350 pud
= pud_offset(pgd
, addr
);
351 if (!pud_present(*pud
))
353 if (pud_huge_page(*pud
))
355 pmd
= pmd_offset(pud
, addr
);
356 if (!pmd_present(*pmd
))
358 if (pmd_huge_page(*pmd
))
360 return pte_offset_kernel(pmd
, addr
);
362 EXPORT_SYMBOL(virt_to_pte
);
364 pte_t
*virt_to_kpte(unsigned long kaddr
)
366 BUG_ON(kaddr
< PAGE_OFFSET
);
367 return virt_to_pte(NULL
, kaddr
);
369 EXPORT_SYMBOL(virt_to_kpte
);
371 pgprot_t
set_remote_cache_cpu(pgprot_t prot
, int cpu
)
373 unsigned int width
= smp_width
;
376 BUG_ON(y
>= smp_height
);
377 BUG_ON(hv_pte_get_mode(prot
) != HV_PTE_MODE_CACHE_TILE_L3
);
378 BUG_ON(cpu
< 0 || cpu
>= NR_CPUS
);
379 BUG_ON(!cpu_is_valid_lotar(cpu
));
380 return hv_pte_set_lotar(prot
, HV_XY_TO_LOTAR(x
, y
));
383 int get_remote_cache_cpu(pgprot_t prot
)
385 HV_LOTAR lotar
= hv_pte_get_lotar(prot
);
386 int x
= HV_LOTAR_X(lotar
);
387 int y
= HV_LOTAR_Y(lotar
);
388 BUG_ON(hv_pte_get_mode(prot
) != HV_PTE_MODE_CACHE_TILE_L3
);
389 return x
+ y
* smp_width
;
393 * Convert a kernel VA to a PA and homing information.
395 int va_to_cpa_and_pte(void *va
, unsigned long long *cpa
, pte_t
*pte
)
397 struct page
*page
= virt_to_page(va
);
398 pte_t null_pte
= { 0 };
402 /* Note that this is not writing a page table, just returning a pte. */
403 *pte
= pte_set_home(null_pte
, page_home(page
));
405 return 0; /* return non-zero if not hfh? */
407 EXPORT_SYMBOL(va_to_cpa_and_pte
);
409 void __set_pte(pte_t
*ptep
, pte_t pte
)
414 # if HV_PTE_INDEX_PRESENT >= 32 || HV_PTE_INDEX_MIGRATING >= 32
415 # error Must write the present and migrating bits last
417 if (pte_present(pte
)) {
418 ((u32
*)ptep
)[1] = (u32
)(pte_val(pte
) >> 32);
420 ((u32
*)ptep
)[0] = (u32
)(pte_val(pte
));
422 ((u32
*)ptep
)[0] = (u32
)(pte_val(pte
));
424 ((u32
*)ptep
)[1] = (u32
)(pte_val(pte
) >> 32);
426 #endif /* __tilegx__ */
429 void set_pte(pte_t
*ptep
, pte_t pte
)
431 if (pte_present(pte
) &&
432 (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte
) != HV_PTE_MODE_MMIO
)) {
433 /* The PTE actually references physical memory. */
434 unsigned long pfn
= pte_pfn(pte
);
435 if (pfn_valid(pfn
)) {
436 /* Update the home of the PTE from the struct page. */
437 pte
= pte_set_home(pte
, page_home(pfn_to_page(pfn
)));
438 } else if (hv_pte_get_mode(pte
) == 0) {
439 /* remap_pfn_range(), etc, must supply PTE mode. */
440 panic("set_pte(): out-of-range PFN and mode 0\n");
444 __set_pte(ptep
, pte
);
447 /* Can this mm load a PTE with cached_priority set? */
448 static inline int mm_is_priority_cached(struct mm_struct
*mm
)
450 return mm
->context
.priority_cached
!= 0;
454 * Add a priority mapping to an mm_context and
455 * notify the hypervisor if this is the first one.
457 void start_mm_caching(struct mm_struct
*mm
)
459 if (!mm_is_priority_cached(mm
)) {
460 mm
->context
.priority_cached
= -1UL;
461 hv_set_caching(-1UL);
466 * Validate and return the priority_cached flag. We know if it's zero
467 * that we don't need to scan, since we immediately set it non-zero
468 * when we first consider a MAP_CACHE_PRIORITY mapping.
470 * We only _try_ to acquire the mmap_sem semaphore; if we can't acquire it,
471 * since we're in an interrupt context (servicing switch_mm) we don't
472 * worry about it and don't unset the "priority_cached" field.
473 * Presumably we'll come back later and have more luck and clear
474 * the value then; for now we'll just keep the cache marked for priority.
476 static unsigned long update_priority_cached(struct mm_struct
*mm
)
478 if (mm
->context
.priority_cached
&& down_write_trylock(&mm
->mmap_sem
)) {
479 struct vm_area_struct
*vm
;
480 for (vm
= mm
->mmap
; vm
; vm
= vm
->vm_next
) {
481 if (hv_pte_get_cached_priority(vm
->vm_page_prot
))
485 mm
->context
.priority_cached
= 0;
486 up_write(&mm
->mmap_sem
);
488 return mm
->context
.priority_cached
;
491 /* Set caching correctly for an mm that we are switching to. */
492 void check_mm_caching(struct mm_struct
*prev
, struct mm_struct
*next
)
494 if (!mm_is_priority_cached(next
)) {
496 * If the new mm doesn't use priority caching, just see if we
497 * need the hv_set_caching(), or can assume it's already zero.
499 if (mm_is_priority_cached(prev
))
502 hv_set_caching(update_priority_cached(next
));
508 /* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */
509 void __iomem
*ioremap_prot(resource_size_t phys_addr
, unsigned long size
,
513 struct vm_struct
*area
;
514 unsigned long offset
, last_addr
;
517 /* Don't allow wraparound or zero size */
518 last_addr
= phys_addr
+ size
- 1;
519 if (!size
|| last_addr
< phys_addr
)
522 /* Create a read/write, MMIO VA mapping homed at the requested shim. */
523 pgprot
= PAGE_KERNEL
;
524 pgprot
= hv_pte_set_mode(pgprot
, HV_PTE_MODE_MMIO
);
525 pgprot
= hv_pte_set_lotar(pgprot
, hv_pte_get_lotar(home
));
528 * Mappings have to be page-aligned
530 offset
= phys_addr
& ~PAGE_MASK
;
531 phys_addr
&= PAGE_MASK
;
532 size
= PAGE_ALIGN(last_addr
+1) - phys_addr
;
537 area
= get_vm_area(size
, VM_IOREMAP
/* | other flags? */);
540 area
->phys_addr
= phys_addr
;
542 if (ioremap_page_range((unsigned long)addr
, (unsigned long)addr
+ size
,
543 phys_addr
, pgprot
)) {
547 return (__force
void __iomem
*) (offset
+ (char *)addr
);
549 EXPORT_SYMBOL(ioremap_prot
);
551 /* Unmap an MMIO VA mapping. */
552 void iounmap(volatile void __iomem
*addr_in
)
554 volatile void __iomem
*addr
= (volatile void __iomem
*)
555 (PAGE_MASK
& (unsigned long __force
)addr_in
);
557 vunmap((void * __force
)addr
);
559 /* x86 uses this complicated flow instead of vunmap(). Is
560 * there any particular reason we should do the same? */
561 struct vm_struct
*p
, *o
;
563 /* Use the vm area unlocked, assuming the caller
564 ensures there isn't another iounmap for the same address
565 in parallel. Reuse of the virtual address is prevented by
566 leaving it in the global lists until we're done with it.
567 cpa takes care of the direct mappings. */
568 p
= find_vm_area((void *)addr
);
571 pr_err("iounmap: bad address %p\n", addr
);
576 /* Finally remove it */
577 o
= remove_vm_area((void *)addr
);
578 BUG_ON(p
!= o
|| o
== NULL
);
582 EXPORT_SYMBOL(iounmap
);
584 #endif /* CHIP_HAS_MMIO() */