No empty .Rs/.Re
[netbsd-mini2440.git] / sys / arch / hpc / stand / hpcboot / sh3 / sh_mmu.cpp
blobc14d1847bf9fa4a6df6a9e19bde66efc0bd231d3
1 /* $NetBSD: sh_mmu.cpp,v 1.6 2006/03/05 04:05:39 uwe Exp $ */
3 /*-
4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by UCHIYAMA Yasushi.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
32 #include <sh3/sh_arch.h>
33 #include <sh3/sh_mmu.h>
35 #include <sh3/cpu/sh3.h>
36 #include <sh3/cpu/sh4.h>
39 // Get physical address from memory mapped TLB.
40 // SH3 version. SH4 can't do this method. because address/data array must be
41 // accessed from P2.
43 paddr_t
44 MemoryManager_SHMMU::searchPage(vaddr_t vaddr)
46 uint32_t vpn, idx, s, dum, aae, dae, entry_idx, asid;
47 paddr_t paddr = ~0;
48 int way, kmode;
50 vpn = vaddr & SH3_PAGE_MASK;
51 // Windows CE uses VPN-only index-mode.
52 idx = vaddr & SH3_MMU_VPN_MASK;
54 kmode = SetKMode(1);
55 // Get current ASID
56 asid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK;
58 // to avoid another TLB access, disable external interrupt.
59 s = suspendIntr();
61 do {
62 // load target address page to TLB
63 dum = _reg_read_4(vaddr);
64 _reg_write_4(vaddr, dum);
66 for (way = 0; way < SH3_MMU_WAY; way++) {
67 entry_idx = idx | (way << SH3_MMU_WAY_SHIFT);
68 // inquire MMU address array.
69 aae = _reg_read_4(SH3_MMUAA | entry_idx);
71 if (!(aae & SH3_MMU_D_VALID) ||
72 ((aae & SH3_MMUAA_D_ASID_MASK) != asid) ||
73 (((aae | idx) & SH3_PAGE_MASK) != vpn))
74 continue;
76 // entry found.
77 // inquire MMU data array to get its physical address.
78 dae = _reg_read_4(SH3_MMUDA | entry_idx);
79 paddr = (dae & SH3_PAGE_MASK) | (vaddr & ~SH3_PAGE_MASK);
80 break;
82 } while (paddr == ~0);
84 resumeIntr(s);
85 SetKMode(kmode);
87 return paddr;
90 void
91 MemoryManager_SHMMU::CacheDump()
93 static const char *able[] = {"dis", "en" };
94 int write_through_p0_u0_p3;
95 int write_through_p1;
96 uint32_t r;
97 int kmode;
99 DPRINTF_SETUP();
101 kmode = SetKMode(1);
102 switch (SHArchitecture::cpu_type()) {
103 default:
104 DPRINTF((TEXT("unknown architecture.\n")));
105 SetKMode(kmode);
106 return;
107 case 3:
108 r = _reg_read_4(SH3_CCR);
109 DPRINTF((TEXT("cache %Sabled"),
110 able[(r & SH3_CCR_CE ? 1 : 0)]));
111 if (r & SH3_CCR_RA)
112 DPRINTF((TEXT(" ram-mode")));
114 write_through_p0_u0_p3 = r & SH3_CCR_WT;
115 write_through_p1 = !(r & SH3_CCR_CB);
116 break;
117 case 4:
118 r = _reg_read_4(SH4_CCR);
119 DPRINTF((TEXT("I-cache %Sabled"),
120 able[(r & SH4_CCR_ICE) ? 1 : 0]));
121 if (r & SH4_CCR_IIX)
122 DPRINTF((TEXT(" index-mode ")));
123 DPRINTF((TEXT(" D-cache %Sabled"),
124 able[(r & SH4_CCR_OCE) ? 1 : 0]));
125 if (r & SH4_CCR_OIX)
126 DPRINTF((TEXT(" index-mode")));
127 if (r & SH4_CCR_ORA)
128 DPRINTF((TEXT(" ram-mode")));
130 write_through_p0_u0_p3 = r & SH4_CCR_WT;
131 write_through_p1 = !(r & SH4_CCR_CB);
132 break;
134 DPRINTF((TEXT(".")));
136 // Write-through/back
137 DPRINTF((TEXT(" P0, U0, P3 write-%S P1 write-%S\n"),
138 write_through_p0_u0_p3 ? "through" : "back",
139 write_through_p1 ? "through" : "back"));
141 SetKMode(kmode);
144 void
145 MemoryManager_SHMMU::MMUDump()
147 #define ON(x, c) ((x) & (c) ? '|' : '.')
148 uint32_t r, e, a;
149 int i, kmode;
151 DPRINTF_SETUP();
153 kmode = SetKMode(1);
154 DPRINTF((TEXT("MMU:\n")));
155 switch (SHArchitecture::cpu_type()) {
156 default:
157 DPRINTF((TEXT("unknown architecture.\n")));
158 SetKMode(kmode);
159 return;
160 case 3:
161 r = _reg_read_4(SH3_MMUCR);
162 if (!(r & SH3_MMUCR_AT))
163 goto disabled;
165 // MMU configuration.
166 DPRINTF((TEXT("%s index-mode, %s virtual storage mode\n"),
167 r & SH3_MMUCR_IX
168 ? TEXT("ASID + VPN") : TEXT("VPN only"),
169 r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));
171 // Dump TLB.
172 DPRINTF((TEXT("---TLB---\n")));
173 DPRINTF((TEXT(" VPN ASID PFN VDCG PR SZ\n")));
174 for (i = 0; i < SH3_MMU_WAY; i++) {
175 DPRINTF((TEXT(" [way %d]\n"), i));
176 for (e = 0; e < SH3_MMU_ENTRY; e++) {
177 // address/data array common offset.
178 a = (e << SH3_MMU_VPN_SHIFT) |
179 (i << SH3_MMU_WAY_SHIFT);
181 r = _reg_read_4(SH3_MMUAA | a);
182 DPRINTF((TEXT("0x%08x %3d"),
183 r & SH3_MMUAA_D_VPN_MASK,
184 r & SH3_MMUAA_D_ASID_MASK));
185 r = _reg_read_4(SH3_MMUDA | a);
186 DPRINTF((TEXT(" 0x%08x %c%c%c%c %d %dK\n"),
187 r & SH3_MMUDA_D_PPN_MASK,
188 ON(r, SH3_MMUDA_D_V),
189 ON(r, SH3_MMUDA_D_D),
190 ON(r, SH3_MMUDA_D_C),
191 ON(r, SH3_MMUDA_D_SH),
192 (r & SH3_MMUDA_D_PR_MASK) >>
193 SH3_MMUDA_D_PR_SHIFT,
194 r & SH3_MMUDA_D_SZ ? 4 : 1));
198 break;
199 case 4:
200 r = _reg_read_4(SH4_MMUCR);
201 if (!(r & SH4_MMUCR_AT))
202 goto disabled;
203 DPRINTF((TEXT("%s virtual storage mode,"),
204 r & SH3_MMUCR_SV ? TEXT("single") : TEXT("multiple")));
205 DPRINTF((TEXT(" SQ access: (priviledge%S)"),
206 r & SH4_MMUCR_SQMD ? "" : "/user"));
207 DPRINTF((TEXT("\n")));
208 #if sample_code
210 // Memory mapped TLB accessing program must run on P2.
211 // This is sample code.
213 // Dump ITLB
214 DPRINTF((TEXT("---ITLB---\n")));
215 for (i = 0; i < 4; i++) {
216 e = i << SH4_ITLB_E_SHIFT;
217 r = _reg_read_4(SH4_ITLB_AA | e);
218 DPRINTF((TEXT("%08x %3d _%c"),
219 r & SH4_ITLB_AA_VPN_MASK,
220 r & SH4_ITLB_AA_ASID_MASK,
221 ON(r, SH4_ITLB_AA_V)));
222 r = _reg_read_4(SH4_ITLB_DA1 | e);
223 DPRINTF((TEXT(" %08x %c%c_%c_ %1d"),
224 r & SH4_ITLB_DA1_PPN_MASK,
225 ON(r, SH4_ITLB_DA1_V),
226 ON(r, SH4_ITLB_DA1_C),
227 ON(r, SH4_ITLB_DA1_SH),
228 (r & SH4_ITLB_DA1_PR) >> SH4_UTLB_DA1_PR_SHIFT
230 r = _reg_read_4(SH4_ITLB_DA2 | e);
231 DPRINTF((TEXT(" %c%d\n"),
232 ON(r, SH4_ITLB_DA2_TC),
233 r & SH4_ITLB_DA2_SA_MASK));
235 // Dump UTLB
236 DPRINTF((TEXT("---UTLB---\n")));
237 for (i = 0; i < 64; i++) {
238 e = i << SH4_UTLB_E_SHIFT;
239 r = _reg_read_4(SH4_UTLB_AA | e);
240 DPRINTF((TEXT("%08x %3d %c%c"),
241 r & SH4_UTLB_AA_VPN_MASK,
242 ON(r, SH4_UTLB_AA_D),
243 ON(r, SH4_UTLB_AA_V),
244 r & SH4_UTLB_AA_ASID_MASK));
245 r = _reg_read_4(SH4_UTLB_DA1 | e);
246 DPRINTF((TEXT(" %08x %c%c%c%c%c %1d"),
247 r & SH4_UTLB_DA1_PPN_MASK,
248 ON(r, SH4_UTLB_DA1_V),
249 ON(r, SH4_UTLB_DA1_C),
250 ON(r, SH4_UTLB_DA1_D),
251 ON(r, SH4_UTLB_DA1_SH),
252 ON(r, SH4_UTLB_DA1_WT),
253 (r & SH4_UTLB_DA1_PR_MASK) >> SH4_UTLB_DA1_PR_SHIFT
255 r = _reg_read_4(SH4_UTLB_DA2 | e);
256 DPRINTF((TEXT(" %c%d\n"),
257 ON(r, SH4_UTLB_DA2_TC),
258 r & SH4_UTLB_DA2_SA_MASK));
260 #endif //sample_code
261 break;
264 SetKMode(kmode);
265 return;
267 disabled:
268 DPRINTF((TEXT("disabled.\n")));
269 SetKMode(kmode);
270 #undef ON