2 * TLB Management (flush/create/diagnostics) for ARC700
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 * -Reintroduce duplicate PD fixup - some customer chips still have the issue
14 * -No need to flush_cache_page( ) for each call to update_mmu_cache()
15 * some of the LMBench tests improved amazingly
16 * = page-fault thrice as fast (75 usec to 28 usec)
17 * = mmap twice as fast (9.6 msec to 4.6 msec),
18 * = fork (5.3 msec to 3.7 msec)
20 * vineetg: April 2011 :
21 * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
22 * helps avoid a shift when preparing PD0 from PTE
24 * vineetg: April 2011 : Preparing for MMU V3
25 * -MMU v2/v3 BCRs decoded differently
26 * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
27 * -tlb_entry_erase( ) can be void
28 * -local_flush_tlb_range( ):
29 * = need not "ceil" @end
30 * = walks MMU only if range spans < 32 entries, as opposed to 256
32 * Vineetg: Sept 10th 2008
33 * -Changes related to MMU v2 (Rel 4.8)
35 * Vineetg: Aug 29th 2008
36 * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
37 * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
38 * it fails. Thus need to load it with ANY valid value before invoking
41 * Vineetg: Aug 21th 2008:
42 * -Reduced the duration of IRQ lockouts in TLB Flush routines
43 * -Multiple copies of TLB erase code seperated into a "single" function
44 * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
45 * in interrupt-safe region.
47 * Vineetg: April 23rd Bug #93131
48 * Problem: tlb_flush_kernel_range() doesn't do anything if the range to
49 * flush is more than the size of TLB itself.
51 * Rahul Trivedi : Codito Technologies 2004
54 #include <linux/module.h>
55 #include <linux/bug.h>
56 #include <asm/arcregs.h>
57 #include <asm/setup.h>
58 #include <asm/mmu_context.h>
61 /* Need for ARC MMU v2
63 * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
64 * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
65 * map into same set, there would be contention for the 2 ways causing severe
68 * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
69 * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
70 * Given this, the thrasing problem should never happen because once the 3
71 * J-TLB entries are created (even though 3rd will knock out one of the prev
72 * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
74 * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
75 * This is a simple design for keeping them in sync. So what do we do?
76 * The solution which James came up was pretty neat. It utilised the assoc
77 * of uTLBs by not invalidating always but only when absolutely necessary.
79 * - Existing TLB commands work as before
80 * - New command (TLBWriteNI) for TLB write without clearing uTLBs
81 * - New command (TLBIVUTLB) to invalidate uTLBs.
83 * The uTLBs need only be invalidated when pages are being removed from the
84 * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
85 * as a result of a miss, the removed entry is still allowed to exist in the
86 * uTLBs as it is still valid and present in the OS page table. This allows the
87 * full associativity of the uTLBs to hide the limited associativity of the main
90 * During a miss handler, the new "TLBWriteNI" command is used to load
91 * entries without clearing the uTLBs.
93 * When the OS page table is updated, TLB entries that may be associated with a
94 * removed page are removed (flushed) from the TLB using TLBWrite. In this
95 * circumstance, the uTLBs must also be cleared. This is done by using the
96 * existing TLBWrite command. An explicit IVUTLB is also required for those
97 * corner cases when TLBWrite was not executed at all because the corresp
98 * J-TLB entry got evicted/replaced.
102 /* A copy of the ASID from the PID reg is kept in asid_cache */
103 DEFINE_PER_CPU(unsigned int, asid_cache
) = MM_CTXT_FIRST_CYCLE
;
106 * Utility Routine to erase a J-TLB entry
107 * Caller needs to setup Index Reg (manually or via getIndex)
109 static inline void __tlb_entry_erase(void)
111 write_aux_reg(ARC_REG_TLBPD1
, 0);
113 if (is_pae40_enabled())
114 write_aux_reg(ARC_REG_TLBPD1HI
, 0);
116 write_aux_reg(ARC_REG_TLBPD0
, 0);
117 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBWrite
);
120 #if (CONFIG_ARC_MMU_VER < 4)
122 static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid
)
126 write_aux_reg(ARC_REG_TLBPD0
, vaddr_n_asid
);
128 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBProbe
);
129 idx
= read_aux_reg(ARC_REG_TLBINDEX
);
134 static void tlb_entry_erase(unsigned int vaddr_n_asid
)
138 /* Locate the TLB entry for this vaddr + ASID */
139 idx
= tlb_entry_lkup(vaddr_n_asid
);
141 /* No error means entry found, zero it out */
142 if (likely(!(idx
& TLB_LKUP_ERR
))) {
145 /* Duplicate entry error */
146 WARN(idx
== TLB_DUP_ERR
, "Probe returned Dup PD for %x\n",
151 /****************************************************************************
152 * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
154 * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
156 * utlb_invalidate ( )
157 * -For v2 MMU calls Flush uTLB Cmd
158 * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
159 * This is because in v1 TLBWrite itself invalidate uTLBs
160 ***************************************************************************/
162 static void utlb_invalidate(void)
164 #if (CONFIG_ARC_MMU_VER >= 2)
166 #if (CONFIG_ARC_MMU_VER == 2)
167 /* MMU v2 introduced the uTLB Flush command.
168 * There was however an obscure hardware bug, where uTLB flush would
169 * fail when a prior probe for J-TLB (both totally unrelated) would
170 * return lkup err - because the entry didn't exist in MMU.
171 * The Workround was to set Index reg with some valid value, prior to
172 * flush. This was fixed in MMU v3 hence not needed any more
176 /* make sure INDEX Reg is valid */
177 idx
= read_aux_reg(ARC_REG_TLBINDEX
);
179 /* If not write some dummy val */
180 if (unlikely(idx
& TLB_LKUP_ERR
))
181 write_aux_reg(ARC_REG_TLBINDEX
, 0xa);
184 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBIVUTLB
);
189 static void tlb_entry_insert(unsigned int pd0
, pte_t pd1
)
194 * First verify if entry for this vaddr+ASID already exists
195 * This also sets up PD0 (vaddr, ASID..) for final commit
197 idx
= tlb_entry_lkup(pd0
);
200 * If Not already present get a free slot from MMU.
201 * Otherwise, Probe would have located the entry and set INDEX Reg
202 * with existing location. This will cause Write CMD to over-write
203 * existing entry with new PD0 and PD1
205 if (likely(idx
& TLB_LKUP_ERR
))
206 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBGetIndex
);
208 /* setup the other half of TLB entry (pfn, rwx..) */
209 write_aux_reg(ARC_REG_TLBPD1
, pd1
);
212 * Commit the Entry to MMU
213 * It doesn't sound safe to use the TLBWriteNI cmd here
214 * which doesn't flush uTLBs. I'd rather be safe than sorry.
216 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBWrite
);
219 #else /* CONFIG_ARC_MMU_VER >= 4) */
221 static void utlb_invalidate(void)
223 /* No need since uTLB is always in sync with JTLB */
226 static void tlb_entry_erase(unsigned int vaddr_n_asid
)
228 write_aux_reg(ARC_REG_TLBPD0
, vaddr_n_asid
| _PAGE_PRESENT
);
229 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBDeleteEntry
);
232 static void tlb_entry_insert(unsigned int pd0
, pte_t pd1
)
234 write_aux_reg(ARC_REG_TLBPD0
, pd0
);
235 write_aux_reg(ARC_REG_TLBPD1
, pd1
);
237 if (is_pae40_enabled())
238 write_aux_reg(ARC_REG_TLBPD1HI
, (u64
)pd1
>> 32);
240 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBInsertEntry
);
246 * Un-conditionally (without lookup) erase the entire MMU contents
249 noinline
void local_flush_tlb_all(void)
251 struct cpuinfo_arc_mmu
*mmu
= &cpuinfo_arc700
[smp_processor_id()].mmu
;
254 int num_tlb
= mmu
->sets
* mmu
->ways
;
256 local_irq_save(flags
);
258 /* Load PD0 and PD1 with template for a Blank Entry */
259 write_aux_reg(ARC_REG_TLBPD1
, 0);
261 if (is_pae40_enabled())
262 write_aux_reg(ARC_REG_TLBPD1HI
, 0);
264 write_aux_reg(ARC_REG_TLBPD0
, 0);
266 for (entry
= 0; entry
< num_tlb
; entry
++) {
267 /* write this entry to the TLB */
268 write_aux_reg(ARC_REG_TLBINDEX
, entry
);
269 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBWrite
);
272 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
)) {
273 const int stlb_idx
= 0x800;
275 /* Blank sTLB entry */
276 write_aux_reg(ARC_REG_TLBPD0
, _PAGE_HW_SZ
);
278 for (entry
= stlb_idx
; entry
< stlb_idx
+ 16; entry
++) {
279 write_aux_reg(ARC_REG_TLBINDEX
, entry
);
280 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBWrite
);
286 local_irq_restore(flags
);
290 * Flush the entrie MM for userland. The fastest way is to move to Next ASID
292 noinline
void local_flush_tlb_mm(struct mm_struct
*mm
)
295 * Small optimisation courtesy IA64
296 * flush_mm called during fork,exit,munmap etc, multiple times as well.
297 * Only for fork( ) do we need to move parent to a new MMU ctxt,
298 * all other cases are NOPs, hence this check.
300 if (atomic_read(&mm
->mm_users
) == 0)
304 * - Move to a new ASID, but only if the mm is still wired in
305 * (Android Binder ended up calling this for vma->mm != tsk->mm,
306 * causing h/w - s/w ASID to get out of sync)
307 * - Also get_new_mmu_context() new implementation allocates a new
308 * ASID only if it is not allocated already - so unallocate first
311 if (current
->mm
== mm
)
312 get_new_mmu_context(mm
);
316 * Flush a Range of TLB entries for userland.
317 * @start is inclusive, while @end is exclusive
318 * Difference between this and Kernel Range Flush is
319 * -Here the fastest way (if range is too large) is to move to next ASID
320 * without doing any explicit Shootdown
321 * -In case of kernel Flush, entry has to be shot down explictly
323 void local_flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
326 const unsigned int cpu
= smp_processor_id();
329 /* If range @start to @end is more than 32 TLB entries deep,
330 * its better to move to a new ASID rather than searching for
331 * individual entries and then shooting them down
333 * The calc above is rough, doesn't account for unaligned parts,
334 * since this is heuristics based anyways
336 if (unlikely((end
- start
) >= PAGE_SIZE
* 32)) {
337 local_flush_tlb_mm(vma
->vm_mm
);
342 * @start moved to page start: this alone suffices for checking
343 * loop end condition below, w/o need for aligning @end to end
344 * e.g. 2000 to 4001 will anyhow loop twice
348 local_irq_save(flags
);
350 if (asid_mm(vma
->vm_mm
, cpu
) != MM_CTXT_NO_ASID
) {
351 while (start
< end
) {
352 tlb_entry_erase(start
| hw_pid(vma
->vm_mm
, cpu
));
359 local_irq_restore(flags
);
362 /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
363 * @start, @end interpreted as kvaddr
364 * Interestingly, shared TLB entries can also be flushed using just
365 * @start,@end alone (interpreted as user vaddr), although technically SASID
366 * is also needed. However our smart TLbProbe lookup takes care of that.
368 void local_flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
372 /* exactly same as above, except for TLB entry not taking ASID */
374 if (unlikely((end
- start
) >= PAGE_SIZE
* 32)) {
375 local_flush_tlb_all();
381 local_irq_save(flags
);
382 while (start
< end
) {
383 tlb_entry_erase(start
);
389 local_irq_restore(flags
);
393 * Delete TLB entry in MMU for a given page (??? address)
394 * NOTE One TLB entry contains translation for single PAGE
397 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long page
)
399 const unsigned int cpu
= smp_processor_id();
402 /* Note that it is critical that interrupts are DISABLED between
403 * checking the ASID and using it flush the TLB entry
405 local_irq_save(flags
);
407 if (asid_mm(vma
->vm_mm
, cpu
) != MM_CTXT_NO_ASID
) {
408 tlb_entry_erase((page
& PAGE_MASK
) | hw_pid(vma
->vm_mm
, cpu
));
412 local_irq_restore(flags
);
418 struct vm_area_struct
*ta_vma
;
419 unsigned long ta_start
;
420 unsigned long ta_end
;
423 static inline void ipi_flush_tlb_page(void *arg
)
425 struct tlb_args
*ta
= arg
;
427 local_flush_tlb_page(ta
->ta_vma
, ta
->ta_start
);
430 static inline void ipi_flush_tlb_range(void *arg
)
432 struct tlb_args
*ta
= arg
;
434 local_flush_tlb_range(ta
->ta_vma
, ta
->ta_start
, ta
->ta_end
);
437 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
438 static inline void ipi_flush_pmd_tlb_range(void *arg
)
440 struct tlb_args
*ta
= arg
;
442 local_flush_pmd_tlb_range(ta
->ta_vma
, ta
->ta_start
, ta
->ta_end
);
446 static inline void ipi_flush_tlb_kernel_range(void *arg
)
448 struct tlb_args
*ta
= (struct tlb_args
*)arg
;
450 local_flush_tlb_kernel_range(ta
->ta_start
, ta
->ta_end
);
453 void flush_tlb_all(void)
455 on_each_cpu((smp_call_func_t
)local_flush_tlb_all
, NULL
, 1);
458 void flush_tlb_mm(struct mm_struct
*mm
)
460 on_each_cpu_mask(mm_cpumask(mm
), (smp_call_func_t
)local_flush_tlb_mm
,
464 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long uaddr
)
466 struct tlb_args ta
= {
471 on_each_cpu_mask(mm_cpumask(vma
->vm_mm
), ipi_flush_tlb_page
, &ta
, 1);
474 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
477 struct tlb_args ta
= {
483 on_each_cpu_mask(mm_cpumask(vma
->vm_mm
), ipi_flush_tlb_range
, &ta
, 1);
486 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
487 void flush_pmd_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
490 struct tlb_args ta
= {
496 on_each_cpu_mask(mm_cpumask(vma
->vm_mm
), ipi_flush_pmd_tlb_range
, &ta
, 1);
500 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
502 struct tlb_args ta
= {
507 on_each_cpu(ipi_flush_tlb_kernel_range
, &ta
, 1);
512 * Routine to create a TLB entry
514 void create_tlb(struct vm_area_struct
*vma
, unsigned long vaddr
, pte_t
*ptep
)
517 unsigned int asid_or_sasid
, rwx
;
522 * create_tlb() assumes that current->mm == vma->mm, since
523 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
524 * -completes the lazy write to SASID reg (again valid for curr tsk)
526 * Removing the assumption involves
527 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
528 * -Fix the TLB paranoid debug code to not trigger false negatives.
529 * -More importantly it makes this handler inconsistent with fast-path
530 * TLB Refill handler which always deals with "current"
532 * Lets see the use cases when current->mm != vma->mm and we land here
533 * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
534 * Here VM wants to pre-install a TLB entry for user stack while
535 * current->mm still points to pre-execve mm (hence the condition).
536 * However the stack vaddr is soon relocated (randomization) and
537 * move_page_tables() tries to undo that TLB entry.
538 * Thus not creating TLB entry is not any worse.
540 * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
541 * breakpoint in debugged task. Not creating a TLB now is not
542 * performance critical.
544 * Both the cases above are not good enough for code churn.
546 if (current
->active_mm
!= vma
->vm_mm
)
549 local_irq_save(flags
);
551 tlb_paranoid_check(asid_mm(vma
->vm_mm
, smp_processor_id()), vaddr
);
555 /* update this PTE credentials */
556 pte_val(*ptep
) |= (_PAGE_PRESENT
| _PAGE_ACCESSED
);
558 /* Create HW TLB(PD0,PD1) from PTE */
560 /* ASID for this task */
561 asid_or_sasid
= read_aux_reg(ARC_REG_PID
) & 0xff;
563 pd0
= vaddr
| asid_or_sasid
| (pte_val(*ptep
) & PTE_BITS_IN_PD0
);
566 * ARC MMU provides fully orthogonal access bits for K/U mode,
567 * however Linux only saves 1 set to save PTE real-estate
568 * Here we convert 3 PTE bits into 6 MMU bits:
569 * -Kernel only entries have Kr Kw Kx 0 0 0
570 * -User entries have mirrored K and U bits
572 rwx
= pte_val(*ptep
) & PTE_BITS_RWX
;
574 if (pte_val(*ptep
) & _PAGE_GLOBAL
)
575 rwx
<<= 3; /* r w x => Kr Kw Kx 0 0 0 */
577 rwx
|= (rwx
<< 3); /* r w x => Kr Kw Kx Ur Uw Ux */
579 pd1
= rwx
| (pte_val(*ptep
) & PTE_BITS_NON_RWX_IN_PD1
);
581 tlb_entry_insert(pd0
, pd1
);
583 local_irq_restore(flags
);
587 * Called at the end of pagefault, for a userspace mapped page
588 * -pre-install the corresponding TLB entry into MMU
589 * -Finalize the delayed D-cache flush of kernel mapping of page due to
590 * flush_dcache_page(), copy_user_page()
592 * Note that flush (when done) involves both WBACK - so physical page is
593 * in sync as well as INV - so any non-congruent aliases don't remain
595 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long vaddr_unaligned
,
598 unsigned long vaddr
= vaddr_unaligned
& PAGE_MASK
;
599 phys_addr_t paddr
= pte_val(*ptep
) & PAGE_MASK
;
600 struct page
*page
= pfn_to_page(pte_pfn(*ptep
));
602 create_tlb(vma
, vaddr
, ptep
);
604 if (page
== ZERO_PAGE(0)) {
609 * Exec page : Independent of aliasing/page-color considerations,
610 * since icache doesn't snoop dcache on ARC, any dirty
611 * K-mapping of a code page needs to be wback+inv so that
612 * icache fetch by userspace sees code correctly.
613 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
614 * so userspace sees the right data.
615 * (Avoids the flush for Non-exec + congruent mapping case)
617 if ((vma
->vm_flags
& VM_EXEC
) ||
618 addr_not_cache_congruent(paddr
, vaddr
)) {
620 int dirty
= !test_and_set_bit(PG_dc_clean
, &page
->flags
);
622 /* wback + inv dcache lines (K-mapping) */
623 __flush_dcache_page(paddr
, paddr
);
625 /* invalidate any existing icache lines (U-mapping) */
626 if (vma
->vm_flags
& VM_EXEC
)
627 __inv_icache_page(paddr
, vaddr
);
632 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
635 * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
638 * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
639 * new bit "SZ" in TLB page descriptor to distinguish between them.
640 * Super Page size is configurable in hardware (4K to 16M), but fixed once
643 * The exact THP size a Linx configuration will support is a function of:
644 * - MMU page size (typical 8K, RTL fixed)
645 * - software page walker address split between PGD:PTE:PFN (typical
646 * 11:8:13, but can be changed with 1 line)
647 * So for above default, THP size supported is 8K * (2^8) = 2M
649 * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
650 * reduces to 1 level (as PTE is folded into PGD and canonically referred
652 * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
655 void update_mmu_cache_pmd(struct vm_area_struct
*vma
, unsigned long addr
,
658 pte_t pte
= __pte(pmd_val(*pmd
));
659 update_mmu_cache(vma
, addr
, &pte
);
662 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
665 struct list_head
*lh
= (struct list_head
*) pgtable
;
667 assert_spin_locked(&mm
->page_table_lock
);
670 if (!pmd_huge_pte(mm
, pmdp
))
673 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
674 pmd_huge_pte(mm
, pmdp
) = pgtable
;
677 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
679 struct list_head
*lh
;
682 assert_spin_locked(&mm
->page_table_lock
);
684 pgtable
= pmd_huge_pte(mm
, pmdp
);
685 lh
= (struct list_head
*) pgtable
;
687 pmd_huge_pte(mm
, pmdp
) = NULL
;
689 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
693 pte_val(pgtable
[0]) = 0;
694 pte_val(pgtable
[1]) = 0;
699 void local_flush_pmd_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
705 local_irq_save(flags
);
707 cpu
= smp_processor_id();
709 if (likely(asid_mm(vma
->vm_mm
, cpu
) != MM_CTXT_NO_ASID
)) {
710 unsigned int asid
= hw_pid(vma
->vm_mm
, cpu
);
712 /* No need to loop here: this will always be for 1 Huge Page */
713 tlb_entry_erase(start
| _PAGE_HW_SZ
| asid
);
716 local_irq_restore(flags
);
721 /* Read the Cache Build Confuration Registers, Decode them and save into
722 * the cpuinfo structure for later use.
723 * No Validation is done here, simply read/convert the BCRs
725 void read_decode_mmu_bcr(void)
727 struct cpuinfo_arc_mmu
*mmu
= &cpuinfo_arc700
[smp_processor_id()].mmu
;
730 #ifdef CONFIG_CPU_BIG_ENDIAN
731 unsigned int ver
:8, ways
:4, sets
:4, u_itlb
:8, u_dtlb
:8;
733 unsigned int u_dtlb
:8, u_itlb
:8, sets
:4, ways
:4, ver
:8;
738 #ifdef CONFIG_CPU_BIG_ENDIAN
739 unsigned int ver
:8, ways
:4, sets
:4, res
:3, sasid
:1, pg_sz
:4,
742 unsigned int u_dtlb
:4, u_itlb
:4, pg_sz
:4, sasid
:1, res
:3, sets
:4,
748 #ifdef CONFIG_CPU_BIG_ENDIAN
749 unsigned int ver
:8, sasid
:1, sz1
:4, sz0
:4, res
:2, pae
:1,
750 n_ways
:2, n_entry
:2, n_super
:2, u_itlb
:3, u_dtlb
:3;
752 /* DTLB ITLB JES JE JA */
753 unsigned int u_dtlb
:3, u_itlb
:3, n_super
:2, n_entry
:2, n_ways
:2,
754 pae
:1, res
:2, sz0
:4, sz1
:4, sasid
:1, ver
:8;
758 tmp
= read_aux_reg(ARC_REG_MMU_BCR
);
759 mmu
->ver
= (tmp
>> 24);
762 mmu2
= (struct bcr_mmu_1_2
*)&tmp
;
763 mmu
->pg_sz_k
= TO_KB(0x2000);
764 mmu
->sets
= 1 << mmu2
->sets
;
765 mmu
->ways
= 1 << mmu2
->ways
;
766 mmu
->u_dtlb
= mmu2
->u_dtlb
;
767 mmu
->u_itlb
= mmu2
->u_itlb
;
768 } else if (mmu
->ver
== 3) {
769 mmu3
= (struct bcr_mmu_3
*)&tmp
;
770 mmu
->pg_sz_k
= 1 << (mmu3
->pg_sz
- 1);
771 mmu
->sets
= 1 << mmu3
->sets
;
772 mmu
->ways
= 1 << mmu3
->ways
;
773 mmu
->u_dtlb
= mmu3
->u_dtlb
;
774 mmu
->u_itlb
= mmu3
->u_itlb
;
775 mmu
->sasid
= mmu3
->sasid
;
777 mmu4
= (struct bcr_mmu_4
*)&tmp
;
778 mmu
->pg_sz_k
= 1 << (mmu4
->sz0
- 1);
779 mmu
->s_pg_sz_m
= 1 << (mmu4
->sz1
- 11);
780 mmu
->sets
= 64 << mmu4
->n_entry
;
781 mmu
->ways
= mmu4
->n_ways
* 2;
782 mmu
->u_dtlb
= mmu4
->u_dtlb
* 4;
783 mmu
->u_itlb
= mmu4
->u_itlb
* 4;
784 mmu
->sasid
= mmu4
->sasid
;
785 mmu
->pae
= mmu4
->pae
;
789 char *arc_mmu_mumbojumbo(int cpu_id
, char *buf
, int len
)
792 struct cpuinfo_arc_mmu
*p_mmu
= &cpuinfo_arc700
[cpu_id
].mmu
;
793 char super_pg
[64] = "";
795 if (p_mmu
->s_pg_sz_m
)
796 scnprintf(super_pg
, 64, "%dM Super Page%s, ",
798 IS_USED_CFG(CONFIG_TRANSPARENT_HUGEPAGE
));
800 n
+= scnprintf(buf
+ n
, len
- n
,
801 "MMU [v%x]\t: %dk PAGE, %sJTLB %d (%dx%d), uDTLB %d, uITLB %d %s%s\n",
802 p_mmu
->ver
, p_mmu
->pg_sz_k
, super_pg
,
803 p_mmu
->sets
* p_mmu
->ways
, p_mmu
->sets
, p_mmu
->ways
,
804 p_mmu
->u_dtlb
, p_mmu
->u_itlb
,
805 IS_AVAIL2(p_mmu
->pae
, "PAE40 ", CONFIG_ARC_HAS_PAE40
));
810 void arc_mmu_init(void)
813 struct cpuinfo_arc_mmu
*mmu
= &cpuinfo_arc700
[smp_processor_id()].mmu
;
815 printk(arc_mmu_mumbojumbo(0, str
, sizeof(str
)));
818 * Can't be done in processor.h due to header include depenedencies
820 BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE
<< 20), PMD_SIZE
));
823 * stack top size sanity check,
824 * Can't be done in processor.h due to header include depenedencies
826 BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP
, PMD_SIZE
));
828 /* For efficiency sake, kernel is compile time built for a MMU ver
829 * This must match the hardware it is running on.
830 * Linux built for MMU V2, if run on MMU V1 will break down because V1
831 * hardware doesn't understand cmds such as WriteNI, or IVUTLB
832 * On the other hand, Linux built for V1 if run on MMU V2 will do
833 * un-needed workarounds to prevent memcpy thrashing.
834 * Similarly MMU V3 has new features which won't work on older MMU
836 if (mmu
->ver
!= CONFIG_ARC_MMU_VER
) {
837 panic("MMU ver %d doesn't match kernel built for %d...\n",
838 mmu
->ver
, CONFIG_ARC_MMU_VER
);
841 if (mmu
->pg_sz_k
!= TO_KB(PAGE_SIZE
))
842 panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE
));
844 if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE
) &&
845 mmu
->s_pg_sz_m
!= TO_MB(HPAGE_PMD_SIZE
))
846 panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
847 (unsigned long)TO_MB(HPAGE_PMD_SIZE
));
849 if (IS_ENABLED(CONFIG_ARC_HAS_PAE40
) && !mmu
->pae
)
850 panic("Hardware doesn't support PAE40\n");
853 write_aux_reg(ARC_REG_PID
, MMU_ENABLE
);
855 /* In smp we use this reg for interrupt 1 scratch */
857 /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
858 write_aux_reg(ARC_REG_SCRATCH_DATA0
, swapper_pg_dir
);
863 * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
864 * The mapping is Column-first.
865 * --------------------- -----------
866 * |way0|way1|way2|way3| |way0|way1|
867 * --------------------- -----------
868 * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
869 * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
871 * [set127] | 508| 509| 510| 511| | 254| 255|
872 * --------------------- -----------
873 * For normal operations we don't(must not) care how above works since
874 * MMU cmd getIndex(vaddr) abstracts that out.
875 * However for walking WAYS of a SET, we need to know this
877 #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
879 /* Handling of Duplicate PD (TLB entry) in MMU.
880 * -Could be due to buggy customer tapeouts or obscure kernel bugs
881 * -MMU complaints not at the time of duplicate PD installation, but at the
882 * time of lookup matching multiple ways.
883 * -Ideally these should never happen - but if they do - workaround by deleting
885 * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
887 volatile int dup_pd_silent
; /* Be slient abt it or complain (default) */
889 void do_tlb_overlap_fault(unsigned long cause
, unsigned long address
,
890 struct pt_regs
*regs
)
892 struct cpuinfo_arc_mmu
*mmu
= &cpuinfo_arc700
[smp_processor_id()].mmu
;
893 unsigned int pd0
[mmu
->ways
];
897 local_irq_save(flags
);
899 /* re-enable the MMU */
900 write_aux_reg(ARC_REG_PID
, MMU_ENABLE
| read_aux_reg(ARC_REG_PID
));
902 /* loop thru all sets of TLB */
903 for (set
= 0; set
< mmu
->sets
; set
++) {
907 /* read out all the ways of current set */
908 for (way
= 0, is_valid
= 0; way
< mmu
->ways
; way
++) {
909 write_aux_reg(ARC_REG_TLBINDEX
,
910 SET_WAY_TO_IDX(mmu
, set
, way
));
911 write_aux_reg(ARC_REG_TLBCOMMAND
, TLBRead
);
912 pd0
[way
] = read_aux_reg(ARC_REG_TLBPD0
);
913 is_valid
|= pd0
[way
] & _PAGE_PRESENT
;
914 pd0
[way
] &= PAGE_MASK
;
917 /* If all the WAYS in SET are empty, skip to next SET */
921 /* Scan the set for duplicate ways: needs a nested loop */
922 for (way
= 0; way
< mmu
->ways
- 1; way
++) {
929 for (n
= way
+ 1; n
< mmu
->ways
; n
++) {
930 if (pd0
[way
] != pd0
[n
])
934 pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
935 pd0
[way
], set
, way
, n
);
938 * clear entry @way and not @n.
939 * This is critical to our optimised loop
942 write_aux_reg(ARC_REG_TLBINDEX
,
943 SET_WAY_TO_IDX(mmu
, set
, way
));
949 local_irq_restore(flags
);
952 /***********************************************************************
953 * Diagnostic Routines
954 * -Called from Low Level TLB Hanlders if things don;t look good
955 **********************************************************************/
957 #ifdef CONFIG_ARC_DBG_TLB_PARANOIA
960 * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
963 void print_asid_mismatch(int mm_asid
, int mmu_asid
, int is_fast_path
)
965 pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
966 is_fast_path
? "Fast" : "Slow", mm_asid
, mmu_asid
);
968 __asm__
__volatile__("flag 1");
971 void tlb_paranoid_check(unsigned int mm_asid
, unsigned long addr
)
973 unsigned int mmu_asid
;
975 mmu_asid
= read_aux_reg(ARC_REG_PID
) & 0xff;
978 * At the time of a TLB miss/installation
979 * - HW version needs to match SW version
980 * - SW needs to have a valid ASID
982 if (addr
< 0x70000000 &&
983 ((mm_asid
== MM_CTXT_NO_ASID
) ||
984 (mmu_asid
!= (mm_asid
& MM_CTXT_ASID_MASK
))))
985 print_asid_mismatch(mm_asid
, mmu_asid
, 0);