2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
31 #include <linux/export.h>
33 #include <linux/init.h>
34 #include <linux/highmem.h>
35 #include <linux/pagemap.h>
36 #include <linux/preempt.h>
37 #include <linux/spinlock.h>
38 #include <linux/memblock.h>
39 #include <linux/of_fdt.h>
40 #include <linux/hugetlb.h>
42 #include <asm/tlbflush.h>
44 #include <asm/code-patching.h>
45 #include <asm/cputhreads.h>
46 #include <asm/hugetlb.h>
52 * This struct lists the sw-supported page sizes. The hardawre MMU may support
53 * other sizes not listed here. The .ind field is only used on MMUs that have
54 * indirect page table entries.
56 #ifdef CONFIG_PPC_BOOK3E_MMU
57 #ifdef CONFIG_PPC_FSL_BOOK3E
58 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
61 .enc
= BOOK3E_PAGESZ_4K
,
65 .enc
= BOOK3E_PAGESZ_2M
,
69 .enc
= BOOK3E_PAGESZ_4M
,
73 .enc
= BOOK3E_PAGESZ_16M
,
77 .enc
= BOOK3E_PAGESZ_64M
,
81 .enc
= BOOK3E_PAGESZ_256M
,
85 .enc
= BOOK3E_PAGESZ_1GB
,
89 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
93 .enc
= BOOK3E_PAGESZ_4K
,
97 .enc
= BOOK3E_PAGESZ_16K
,
102 .enc
= BOOK3E_PAGESZ_64K
,
106 .enc
= BOOK3E_PAGESZ_1M
,
111 .enc
= BOOK3E_PAGESZ_16M
,
115 .enc
= BOOK3E_PAGESZ_256M
,
119 .enc
= BOOK3E_PAGESZ_1GB
,
122 #endif /* CONFIG_FSL_BOOKE */
124 static inline int mmu_get_tsize(int psize
)
126 return mmu_psize_defs
[psize
].enc
;
129 static inline int mmu_get_tsize(int psize
)
131 /* This isn't used on !Book3E for now */
134 #endif /* CONFIG_PPC_BOOK3E_MMU */
136 /* The variables below are currently only used on 64-bit Book3E
137 * though this will probably be made common with other nohash
138 * implementations at some point
142 int mmu_linear_psize
; /* Page size used for the linear mapping */
143 int mmu_pte_psize
; /* Page size used for PTE pages */
144 int mmu_vmemmap_psize
; /* Page size used for the virtual mem map */
145 int book3e_htw_mode
; /* HW tablewalk? Value is PPC_HTW_* */
146 unsigned long linear_map_top
; /* Top of linear mapping */
150 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
151 * exceptions. This is used for bolted and e6500 TLB miss handlers which
152 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
153 * this is set to zero.
157 #endif /* CONFIG_PPC64 */
159 #ifdef CONFIG_PPC_FSL_BOOK3E
160 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
161 DEFINE_PER_CPU(int, next_tlbcam_idx
);
162 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx
);
166 * Base TLB flushing operations:
168 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
169 * - flush_tlb_page(vma, vmaddr) flushes one page
170 * - flush_tlb_range(vma, start, end) flushes a range of pages
171 * - flush_tlb_kernel_range(start, end) flushes kernel pages
173 * - local_* variants of page and mm only apply to the current
178 * These are the base non-SMP variants of page and mm flushing
180 void local_flush_tlb_mm(struct mm_struct
*mm
)
185 pid
= mm
->context
.id
;
186 if (pid
!= MMU_NO_CONTEXT
)
190 EXPORT_SYMBOL(local_flush_tlb_mm
);
192 void __local_flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
198 pid
= mm
? mm
->context
.id
: 0;
199 if (pid
!= MMU_NO_CONTEXT
)
200 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
204 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
206 __local_flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
207 mmu_get_tsize(mmu_virtual_psize
), 0);
209 EXPORT_SYMBOL(local_flush_tlb_page
);
212 * And here are the SMP non-local implementations
216 static DEFINE_RAW_SPINLOCK(tlbivax_lock
);
218 static int mm_is_core_local(struct mm_struct
*mm
)
220 return cpumask_subset(mm_cpumask(mm
),
221 topology_sibling_cpumask(smp_processor_id()));
224 struct tlb_flush_param
{
231 static void do_flush_tlb_mm_ipi(void *param
)
233 struct tlb_flush_param
*p
= param
;
235 _tlbil_pid(p
? p
->pid
: 0);
238 static void do_flush_tlb_page_ipi(void *param
)
240 struct tlb_flush_param
*p
= param
;
242 _tlbil_va(p
->addr
, p
->pid
, p
->tsize
, p
->ind
);
246 /* Note on invalidations and PID:
248 * We snapshot the PID with preempt disabled. At this point, it can still
249 * change either because:
250 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
251 * - we are invaliating some target that isn't currently running here
252 * and is concurrently acquiring a new PID on another CPU
253 * - some other CPU is re-acquiring a lost PID for this mm
256 * However, this shouldn't be a problem as we only guarantee
257 * invalidation of TLB entries present prior to this call, so we
258 * don't care about the PID changing, and invalidating a stale PID
259 * is generally harmless.
262 void flush_tlb_mm(struct mm_struct
*mm
)
267 pid
= mm
->context
.id
;
268 if (unlikely(pid
== MMU_NO_CONTEXT
))
270 if (!mm_is_core_local(mm
)) {
271 struct tlb_flush_param p
= { .pid
= pid
};
272 /* Ignores smp_processor_id() even if set. */
273 smp_call_function_many(mm_cpumask(mm
),
274 do_flush_tlb_mm_ipi
, &p
, 1);
280 EXPORT_SYMBOL(flush_tlb_mm
);
282 void __flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
285 struct cpumask
*cpu_mask
;
289 * This function as well as __local_flush_tlb_page() must only be called
292 if (unlikely(WARN_ON(!mm
)))
296 pid
= mm
->context
.id
;
297 if (unlikely(pid
== MMU_NO_CONTEXT
))
299 cpu_mask
= mm_cpumask(mm
);
300 if (!mm_is_core_local(mm
)) {
301 /* If broadcast tlbivax is supported, use it */
302 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST
)) {
303 int lock
= mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL
);
305 raw_spin_lock(&tlbivax_lock
);
306 _tlbivax_bcast(vmaddr
, pid
, tsize
, ind
);
308 raw_spin_unlock(&tlbivax_lock
);
311 struct tlb_flush_param p
= {
317 /* Ignores smp_processor_id() even if set in cpu_mask */
318 smp_call_function_many(cpu_mask
,
319 do_flush_tlb_page_ipi
, &p
, 1);
322 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
327 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
329 #ifdef CONFIG_HUGETLB_PAGE
330 if (vma
&& is_vm_hugetlb_page(vma
))
331 flush_hugetlb_page(vma
, vmaddr
);
334 __flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
335 mmu_get_tsize(mmu_virtual_psize
), 0);
337 EXPORT_SYMBOL(flush_tlb_page
);
339 #endif /* CONFIG_SMP */
341 #ifdef CONFIG_PPC_47x
342 void __init
early_init_mmu_47x(void)
345 unsigned long root
= of_get_flat_dt_root();
346 if (of_get_flat_dt_prop(root
, "cooperative-partition", NULL
))
347 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST
);
348 #endif /* CONFIG_SMP */
350 #endif /* CONFIG_PPC_47x */
353 * Flush kernel TLB entries in the given range
355 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
359 smp_call_function(do_flush_tlb_mm_ipi
, NULL
, 1);
366 EXPORT_SYMBOL(flush_tlb_kernel_range
);
369 * Currently, for range flushing, we just do a full mm flush. This should
370 * be optimized based on a threshold on the size of the range, since
371 * some implementation can stack multiple tlbivax before a tlbsync but
372 * for now, we keep it that way
374 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
378 flush_tlb_mm(vma
->vm_mm
);
380 EXPORT_SYMBOL(flush_tlb_range
);
382 void tlb_flush(struct mmu_gather
*tlb
)
384 flush_tlb_mm(tlb
->mm
);
388 * Below are functions specific to the 64-bit variant of Book3E though that
389 * may change in the future
395 * Handling of virtual linear page tables or indirect TLB entries
396 * flushing when PTE pages are freed
398 void tlb_flush_pgtable(struct mmu_gather
*tlb
, unsigned long address
)
400 int tsize
= mmu_psize_defs
[mmu_pte_psize
].enc
;
402 if (book3e_htw_mode
!= PPC_HTW_NONE
) {
403 unsigned long start
= address
& PMD_MASK
;
404 unsigned long end
= address
+ PMD_SIZE
;
405 unsigned long size
= 1UL << mmu_psize_defs
[mmu_pte_psize
].shift
;
407 /* This isn't the most optimal, ideally we would factor out the
408 * while preempt & CPU mask mucking around, or even the IPI but
411 while (start
< end
) {
412 __flush_tlb_page(tlb
->mm
, start
, tsize
, 1);
416 unsigned long rmask
= 0xf000000000000000ul
;
417 unsigned long rid
= (address
& rmask
) | 0x1000000000000000ul
;
418 unsigned long vpte
= address
& ~rmask
;
420 #ifdef CONFIG_PPC_64K_PAGES
421 vpte
= (vpte
>> (PAGE_SHIFT
- 4)) & ~0xfffful
;
423 vpte
= (vpte
>> (PAGE_SHIFT
- 3)) & ~0xffful
;
426 __flush_tlb_page(tlb
->mm
, vpte
, tsize
, 0);
430 static void setup_page_sizes(void)
432 unsigned int tlb0cfg
;
437 #ifdef CONFIG_PPC_FSL_BOOK3E
438 unsigned int mmucfg
= mfspr(SPRN_MMUCFG
);
439 int fsl_mmu
= mmu_has_feature(MMU_FTR_TYPE_FSL_E
);
441 if (fsl_mmu
&& (mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V1
) {
442 unsigned int tlb1cfg
= mfspr(SPRN_TLB1CFG
);
443 unsigned int min_pg
, max_pg
;
445 min_pg
= (tlb1cfg
& TLBnCFG_MINSIZE
) >> TLBnCFG_MINSIZE_SHIFT
;
446 max_pg
= (tlb1cfg
& TLBnCFG_MAXSIZE
) >> TLBnCFG_MAXSIZE_SHIFT
;
448 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
449 struct mmu_psize_def
*def
;
452 def
= &mmu_psize_defs
[psize
];
455 if (shift
== 0 || shift
& 1)
458 /* adjust to be in terms of 4^shift Kb */
459 shift
= (shift
- 10) >> 1;
461 if ((shift
>= min_pg
) && (shift
<= max_pg
))
462 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
468 if (fsl_mmu
&& (mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V2
) {
471 tlb0cfg
= mfspr(SPRN_TLB0CFG
);
472 tlb1cfg
= mfspr(SPRN_TLB1CFG
);
473 tlb1ps
= mfspr(SPRN_TLB1PS
);
474 eptcfg
= mfspr(SPRN_EPTCFG
);
476 if ((tlb1cfg
& TLBnCFG_IND
) && (tlb0cfg
& TLBnCFG_PT
))
477 book3e_htw_mode
= PPC_HTW_E6500
;
480 * We expect 4K subpage size and unrestricted indirect size.
481 * The lack of a restriction on indirect size is a Freescale
482 * extension, indicated by PSn = 0 but SPSn != 0.
485 book3e_htw_mode
= PPC_HTW_NONE
;
487 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
488 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
490 if (tlb1ps
& (1U << (def
->shift
- 10))) {
491 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
493 if (book3e_htw_mode
&& psize
== MMU_PAGE_2M
)
494 def
->flags
|= MMU_PAGE_SIZE_INDIRECT
;
502 tlb0cfg
= mfspr(SPRN_TLB0CFG
);
503 tlb0ps
= mfspr(SPRN_TLB0PS
);
504 eptcfg
= mfspr(SPRN_EPTCFG
);
506 /* Look for supported direct sizes */
507 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
508 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
510 if (tlb0ps
& (1U << (def
->shift
- 10)))
511 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
514 /* Indirect page sizes supported ? */
515 if ((tlb0cfg
& TLBnCFG_IND
) == 0 ||
516 (tlb0cfg
& TLBnCFG_PT
) == 0)
519 book3e_htw_mode
= PPC_HTW_IBM
;
521 /* Now, we only deal with one IND page size for each
522 * direct size. Hopefully all implementations today are
523 * unambiguous, but we might want to be careful in the
526 for (i
= 0; i
< 3; i
++) {
527 unsigned int ps
, sps
;
535 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
536 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
538 if (ps
== (def
->shift
- 10))
539 def
->flags
|= MMU_PAGE_SIZE_INDIRECT
;
540 if (sps
== (def
->shift
- 10))
546 /* Cleanup array and print summary */
547 pr_info("MMU: Supported page sizes\n");
548 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
549 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
550 const char *__page_type_names
[] = {
556 if (def
->flags
== 0) {
560 pr_info(" %8ld KB as %s\n", 1ul << (def
->shift
- 10),
561 __page_type_names
[def
->flags
& 0x3]);
565 static void setup_mmu_htw(void)
568 * If we want to use HW tablewalk, enable it by patching the TLB miss
569 * handlers to branch to the one dedicated to it.
572 switch (book3e_htw_mode
) {
574 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e
);
575 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e
);
577 #ifdef CONFIG_PPC_FSL_BOOK3E
579 extlb_level_exc
= EX_TLB_SIZE
;
580 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e
);
581 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e
);
585 pr_info("MMU: Book3E HW tablewalk %s\n",
586 book3e_htw_mode
!= PPC_HTW_NONE
? "enabled" : "not supported");
590 * Early initialization of the MMU TLB code
592 static void early_init_this_mmu(void)
596 /* Set MAS4 based on page table setting */
598 mas4
= 0x4 << MAS4_WIMGED_SHIFT
;
599 switch (book3e_htw_mode
) {
602 mas4
|= BOOK3E_PAGESZ_2M
<< MAS4_TSIZED_SHIFT
;
603 mas4
|= MAS4_TLBSELD(1);
604 mmu_pte_psize
= MMU_PAGE_2M
;
609 #ifdef CONFIG_PPC_64K_PAGES
610 mas4
|= BOOK3E_PAGESZ_256M
<< MAS4_TSIZED_SHIFT
;
611 mmu_pte_psize
= MMU_PAGE_256M
;
613 mas4
|= BOOK3E_PAGESZ_1M
<< MAS4_TSIZED_SHIFT
;
614 mmu_pte_psize
= MMU_PAGE_1M
;
619 #ifdef CONFIG_PPC_64K_PAGES
620 mas4
|= BOOK3E_PAGESZ_64K
<< MAS4_TSIZED_SHIFT
;
622 mas4
|= BOOK3E_PAGESZ_4K
<< MAS4_TSIZED_SHIFT
;
624 mmu_pte_psize
= mmu_virtual_psize
;
627 mtspr(SPRN_MAS4
, mas4
);
629 #ifdef CONFIG_PPC_FSL_BOOK3E
630 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
631 unsigned int num_cams
;
632 int __maybe_unused cpu
= smp_processor_id();
635 /* use a quarter of the TLBCAM for bolted linear map */
636 num_cams
= (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) / 4;
639 * Only do the mapping once per core, or else the
640 * transient mapping would cause problems.
643 if (hweight32(get_tensr()) > 1)
648 linear_map_top
= map_mem_in_cams(linear_map_top
,
653 /* A sync won't hurt us after mucking around with
654 * the MMU configuration
659 static void __init
early_init_mmu_global(void)
661 /* XXX This will have to be decided at runtime, but right
662 * now our boot and TLB miss code hard wires it. Ideally
663 * we should find out a suitable page size and patch the
664 * TLB miss code (either that or use the PACA to store
667 mmu_linear_psize
= MMU_PAGE_1G
;
669 /* XXX This should be decided at runtime based on supported
670 * page sizes in the TLB, but for now let's assume 16M is
671 * always there and a good fit (which it probably is)
673 * Freescale booke only supports 4K pages in TLB0, so use that.
675 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
))
676 mmu_vmemmap_psize
= MMU_PAGE_4K
;
678 mmu_vmemmap_psize
= MMU_PAGE_16M
;
680 /* XXX This code only checks for TLB 0 capabilities and doesn't
681 * check what page size combos are supported by the HW. It
682 * also doesn't handle the case where a separate array holds
683 * the IND entries from the array loaded by the PT.
685 /* Look for supported page sizes */
688 /* Look for HW tablewalk support */
691 #ifdef CONFIG_PPC_FSL_BOOK3E
692 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
693 if (book3e_htw_mode
== PPC_HTW_NONE
) {
694 extlb_level_exc
= EX_TLB_SIZE
;
695 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e
);
696 patch_exception(0x1e0,
697 exc_instruction_tlb_miss_bolted_book3e
);
702 /* Set the global containing the top of the linear mapping
703 * for use by the TLB miss code
705 linear_map_top
= memblock_end_of_DRAM();
708 static void __init
early_mmu_set_memory_limit(void)
710 #ifdef CONFIG_PPC_FSL_BOOK3E
711 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
713 * Limit memory so we dont have linear faults.
714 * Unlike memblock_set_current_limit, which limits
715 * memory available during early boot, this permanently
716 * reduces the memory available to Linux. We need to
717 * do this because highmem is not supported on 64-bit.
719 memblock_enforce_memory_limit(linear_map_top
);
723 memblock_set_current_limit(linear_map_top
);
727 void __init
early_init_mmu(void)
729 early_init_mmu_global();
730 early_init_this_mmu();
731 early_mmu_set_memory_limit();
734 void early_init_mmu_secondary(void)
736 early_init_this_mmu();
739 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
740 phys_addr_t first_memblock_size
)
742 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
743 * the bolted TLB entry. We know for now that only 1G
744 * entries are supported though that may eventually
747 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
748 * unusual memory sizes it's possible for some RAM to not be mapped
749 * (such RAM is not used at all by Linux, since we don't support
750 * highmem on 64-bit). We limit ppc64_rma_size to what would be
751 * mappable if this memblock is the only one. Additional memblocks
752 * can only increase, not decrease, the amount that ends up getting
753 * mapped. We still limit max to 1G even if we'll eventually map
754 * more. This is due to what the early init code is set up to do.
756 * We crop it to the size of the first MEMBLOCK to
757 * avoid going over total available memory just in case...
759 #ifdef CONFIG_PPC_FSL_BOOK3E
760 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
761 unsigned long linear_sz
;
762 unsigned int num_cams
;
764 /* use a quarter of the TLBCAM for bolted linear map */
765 num_cams
= (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) / 4;
767 linear_sz
= map_mem_in_cams(first_memblock_size
, num_cams
,
770 ppc64_rma_size
= min_t(u64
, linear_sz
, 0x40000000);
773 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
775 /* Finally limit subsequent allocations */
776 memblock_set_current_limit(first_memblock_base
+ ppc64_rma_size
);
778 #else /* ! CONFIG_PPC64 */
779 void __init
early_init_mmu(void)
781 #ifdef CONFIG_PPC_47x
782 early_init_mmu_47x();
785 #endif /* CONFIG_PPC64 */