4 * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
5 * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
15 #include <asm/mmu_context.h>
18 * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
20 int sh64_tlb_init(void)
22 /* Assign some sane DTLB defaults */
23 cpu_data
->dtlb
.entries
= 64;
24 cpu_data
->dtlb
.step
= 0x10;
26 cpu_data
->dtlb
.first
= DTLB_FIXED
| cpu_data
->dtlb
.step
;
27 cpu_data
->dtlb
.next
= cpu_data
->dtlb
.first
;
29 cpu_data
->dtlb
.last
= DTLB_FIXED
|
30 ((cpu_data
->dtlb
.entries
- 1) *
33 /* And again for the ITLB */
34 cpu_data
->itlb
.entries
= 64;
35 cpu_data
->itlb
.step
= 0x10;
37 cpu_data
->itlb
.first
= ITLB_FIXED
| cpu_data
->itlb
.step
;
38 cpu_data
->itlb
.next
= cpu_data
->itlb
.first
;
39 cpu_data
->itlb
.last
= ITLB_FIXED
|
40 ((cpu_data
->itlb
.entries
- 1) *
47 * sh64_next_free_dtlb_entry - Find the next available DTLB entry
49 unsigned long long sh64_next_free_dtlb_entry(void)
51 return cpu_data
->dtlb
.next
;
55 * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
57 unsigned long long sh64_get_wired_dtlb_entry(void)
59 unsigned long long entry
= sh64_next_free_dtlb_entry();
61 cpu_data
->dtlb
.first
+= cpu_data
->dtlb
.step
;
62 cpu_data
->dtlb
.next
+= cpu_data
->dtlb
.step
;
68 * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
70 * @entry: Address of TLB slot.
72 * Works like a stack, last one to allocate must be first one to free.
74 int sh64_put_wired_dtlb_entry(unsigned long long entry
)
76 __flush_tlb_slot(entry
);
79 * We don't do any particularly useful tracking of wired entries,
80 * so this approach works like a stack .. last one to be allocated
81 * has to be the first one to be freed.
83 * We could potentially load wired entries into a list and work on
84 * rebalancing the list periodically (which also entails moving the
85 * contents of a TLB entry) .. though I have a feeling that this is
86 * more trouble than it's worth.
90 * Entry must be valid .. we don't want any ITLB addresses!
92 if (entry
<= DTLB_FIXED
)
96 * Next, check if we're within range to be freed. (ie, must be the
97 * entry beneath the first 'free' entry!
99 if (entry
< (cpu_data
->dtlb
.first
- cpu_data
->dtlb
.step
))
102 /* If we are, then bring this entry back into the list */
103 cpu_data
->dtlb
.first
-= cpu_data
->dtlb
.step
;
104 cpu_data
->dtlb
.next
= entry
;
110 * sh64_setup_tlb_slot - Load up a translation in a wired slot.
112 * @config_addr: Address of TLB slot.
113 * @eaddr: Virtual address.
114 * @asid: Address Space Identifier.
115 * @paddr: Physical address.
117 * Load up a virtual<->physical translation for @eaddr<->@paddr in the
118 * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
120 void sh64_setup_tlb_slot(unsigned long long config_addr
, unsigned long eaddr
,
121 unsigned long asid
, unsigned long paddr
)
123 unsigned long long pteh
, ptel
;
125 pteh
= neff_sign_extend(eaddr
);
127 pteh
|= (asid
<< PTEH_ASID_SHIFT
) | PTEH_VALID
;
128 ptel
= neff_sign_extend(paddr
);
130 ptel
|= (_PAGE_CACHABLE
| _PAGE_READ
| _PAGE_WRITE
);
132 asm volatile("putcfg %0, 1, %1\n\t"
134 : : "r" (config_addr
), "r" (ptel
), "r" (pteh
));
138 * sh64_teardown_tlb_slot - Teardown a translation.
140 * @config_addr: Address of TLB slot.
142 * Teardown any existing mapping in the TLB slot @config_addr.
144 void sh64_teardown_tlb_slot(unsigned long long config_addr
)
145 __attribute__ ((alias("__flush_tlb_slot")));
147 static int dtlb_entry
;
148 static unsigned long long dtlb_entries
[64];
150 void tlb_wire_entry(struct vm_area_struct
*vma
, unsigned long addr
, pte_t pte
)
152 unsigned long long entry
;
153 unsigned long paddr
, flags
;
155 BUG_ON(dtlb_entry
== ARRAY_SIZE(dtlb_entries
));
157 local_irq_save(flags
);
159 entry
= sh64_get_wired_dtlb_entry();
160 dtlb_entries
[dtlb_entry
++] = entry
;
162 paddr
= pte_val(pte
) & _PAGE_FLAGS_HARDWARE_MASK
;
165 sh64_setup_tlb_slot(entry
, addr
, get_asid(), paddr
);
167 local_irq_restore(flags
);
170 void tlb_unwire_entry(void)
172 unsigned long long entry
;
177 local_irq_save(flags
);
178 entry
= dtlb_entries
[dtlb_entry
--];
180 sh64_teardown_tlb_slot(entry
);
181 sh64_put_wired_dtlb_entry(entry
);
183 local_irq_restore(flags
);
186 void __update_tlb(struct vm_area_struct
*vma
, unsigned long address
, pte_t pte
)
188 unsigned long long ptel
;
189 unsigned long long pteh
=0;
190 struct tlb_info
*tlbp
;
191 unsigned long long next
;
192 unsigned int fault_code
= get_thread_fault_code();
200 pteh
= neff_sign_extend(address
& MMU_VPN_MASK
);
203 pteh
|= get_asid() << PTEH_ASID_SHIFT
;
206 /* Set PTEL register, set_pte has performed the sign extension */
207 ptel
&= _PAGE_FLAGS_HARDWARE_MASK
; /* drop software flags */
209 if (fault_code
& FAULT_CODE_ITLB
)
210 tlbp
= &cpu_data
->itlb
;
212 tlbp
= &cpu_data
->dtlb
;
215 __flush_tlb_slot(next
);
216 asm volatile ("putcfg %0,1,%2\n\n\t"
218 : : "r" (next
), "r" (pteh
), "r" (ptel
) );
221 if (next
> tlbp
->last
)