kvm: qemu: fix pci_enable_capabilities to set the CAP feature in pci::status
[kvm-userspace.git] / qemu / target-cris / mmu.c
blob9d7981663b489f848e8d5ce1c5461c86e78c383a
1 /*
2 * CRIS mmu emulation.
4 * Copyright (c) 2007 AXIS Communications AB
5 * Written by Edgar E. Iglesias.
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #ifndef CONFIG_USER_ONLY
24 #include <stdio.h>
25 #include <string.h>
26 #include <stdlib.h>
28 #include "config.h"
29 #include "cpu.h"
30 #include "mmu.h"
31 #include "exec-all.h"
33 #ifdef DEBUG
34 #define D(x) x
35 #define D_LOG(...) qemu_log(__VA__ARGS__)
36 #else
37 #define D(x)
38 #define D_LOG(...) do { } while (0)
39 #endif
41 void cris_mmu_init(CPUState *env)
43 env->mmu_rand_lfsr = 0xcccc;
46 #define SR_POLYNOM 0x8805
47 static inline unsigned int compute_polynom(unsigned int sr)
49 unsigned int i;
50 unsigned int f;
52 f = 0;
53 for (i = 0; i < 16; i++)
54 f += ((SR_POLYNOM >> i) & 1) & ((sr >> i) & 1);
56 return f;
59 static inline int cris_mmu_enabled(uint32_t rw_gc_cfg)
61 return (rw_gc_cfg & 12) != 0;
64 static inline int cris_mmu_segmented_addr(int seg, uint32_t rw_mm_cfg)
66 return (1 << seg) & rw_mm_cfg;
69 static uint32_t cris_mmu_translate_seg(CPUState *env, int seg)
71 uint32_t base;
72 int i;
74 if (seg < 8)
75 base = env->sregs[SFR_RW_MM_KBASE_LO];
76 else
77 base = env->sregs[SFR_RW_MM_KBASE_HI];
79 i = seg & 7;
80 base >>= i * 4;
81 base &= 15;
83 base <<= 28;
84 return base;
86 /* Used by the tlb decoder. */
87 #define EXTRACT_FIELD(src, start, end) \
88 (((src) >> start) & ((1 << (end - start + 1)) - 1))
90 static inline void set_field(uint32_t *dst, unsigned int val,
91 unsigned int offset, unsigned int width)
93 uint32_t mask;
95 mask = (1 << width) - 1;
96 mask <<= offset;
97 val <<= offset;
99 val &= mask;
100 *dst &= ~(mask);
101 *dst |= val;
104 #ifdef DEBUG
105 static void dump_tlb(CPUState *env, int mmu)
107 int set;
108 int idx;
109 uint32_t hi, lo, tlb_vpn, tlb_pfn;
111 for (set = 0; set < 4; set++) {
112 for (idx = 0; idx < 16; idx++) {
113 lo = env->tlbsets[mmu][set][idx].lo;
114 hi = env->tlbsets[mmu][set][idx].hi;
115 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
116 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
118 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
119 set, idx, hi, lo, tlb_vpn, tlb_pfn);
123 #endif
125 /* rw 0 = read, 1 = write, 2 = exec. */
126 static int cris_mmu_translate_page(struct cris_mmu_result_t *res,
127 CPUState *env, uint32_t vaddr,
128 int rw, int usermode)
130 unsigned int vpage;
131 unsigned int idx;
132 uint32_t pid, lo, hi;
133 uint32_t tlb_vpn, tlb_pfn = 0;
134 int tlb_pid, tlb_g, tlb_v, tlb_k, tlb_w, tlb_x;
135 int cfg_v, cfg_k, cfg_w, cfg_x;
136 int set, match = 0;
137 uint32_t r_cause;
138 uint32_t r_cfg;
139 int rwcause;
140 int mmu = 1; /* Data mmu is default. */
141 int vect_base;
143 r_cause = env->sregs[SFR_R_MM_CAUSE];
144 r_cfg = env->sregs[SFR_RW_MM_CFG];
145 pid = env->pregs[PR_PID] & 0xff;
147 switch (rw) {
148 case 2: rwcause = CRIS_MMU_ERR_EXEC; mmu = 0; break;
149 case 1: rwcause = CRIS_MMU_ERR_WRITE; break;
150 default:
151 case 0: rwcause = CRIS_MMU_ERR_READ; break;
154 /* I exception vectors 4 - 7, D 8 - 11. */
155 vect_base = (mmu + 1) * 4;
157 vpage = vaddr >> 13;
159 /* We know the index which to check on each set.
160 Scan both I and D. */
161 #if 0
162 for (set = 0; set < 4; set++) {
163 for (idx = 0; idx < 16; idx++) {
164 lo = env->tlbsets[mmu][set][idx].lo;
165 hi = env->tlbsets[mmu][set][idx].hi;
166 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
167 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
169 printf ("TLB: [%d][%d] hi=%x lo=%x v=%x p=%x\n",
170 set, idx, hi, lo, tlb_vpn, tlb_pfn);
173 #endif
175 idx = vpage & 15;
176 for (set = 0; set < 4; set++)
178 lo = env->tlbsets[mmu][set][idx].lo;
179 hi = env->tlbsets[mmu][set][idx].hi;
181 tlb_vpn = hi >> 13;
182 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
183 tlb_g = EXTRACT_FIELD(lo, 4, 4);
185 D_LOG("TLB[%d][%d][%d] v=%x vpage=%x lo=%x hi=%x\n",
186 mmu, set, idx, tlb_vpn, vpage, lo, hi);
187 if ((tlb_g || (tlb_pid == pid))
188 && tlb_vpn == vpage) {
189 match = 1;
190 break;
194 res->bf_vec = vect_base;
195 if (match) {
196 cfg_w = EXTRACT_FIELD(r_cfg, 19, 19);
197 cfg_k = EXTRACT_FIELD(r_cfg, 18, 18);
198 cfg_x = EXTRACT_FIELD(r_cfg, 17, 17);
199 cfg_v = EXTRACT_FIELD(r_cfg, 16, 16);
201 tlb_pfn = EXTRACT_FIELD(lo, 13, 31);
202 tlb_v = EXTRACT_FIELD(lo, 3, 3);
203 tlb_k = EXTRACT_FIELD(lo, 2, 2);
204 tlb_w = EXTRACT_FIELD(lo, 1, 1);
205 tlb_x = EXTRACT_FIELD(lo, 0, 0);
208 set_exception_vector(0x04, i_mmu_refill);
209 set_exception_vector(0x05, i_mmu_invalid);
210 set_exception_vector(0x06, i_mmu_access);
211 set_exception_vector(0x07, i_mmu_execute);
212 set_exception_vector(0x08, d_mmu_refill);
213 set_exception_vector(0x09, d_mmu_invalid);
214 set_exception_vector(0x0a, d_mmu_access);
215 set_exception_vector(0x0b, d_mmu_write);
217 if (cfg_k && tlb_k && usermode) {
218 D(printf ("tlb: kernel protected %x lo=%x pc=%x\n",
219 vaddr, lo, env->pc));
220 match = 0;
221 res->bf_vec = vect_base + 2;
222 } else if (rw == 1 && cfg_w && !tlb_w) {
223 D(printf ("tlb: write protected %x lo=%x pc=%x\n",
224 vaddr, lo, env->pc));
225 match = 0;
226 /* write accesses never go through the I mmu. */
227 res->bf_vec = vect_base + 3;
228 } else if (rw == 2 && cfg_x && !tlb_x) {
229 D(printf ("tlb: exec protected %x lo=%x pc=%x\n",
230 vaddr, lo, env->pc));
231 match = 0;
232 res->bf_vec = vect_base + 3;
233 } else if (cfg_v && !tlb_v) {
234 D(printf ("tlb: invalid %x\n", vaddr));
235 match = 0;
236 res->bf_vec = vect_base + 1;
239 res->prot = 0;
240 if (match) {
241 res->prot |= PAGE_READ;
242 if (tlb_w)
243 res->prot |= PAGE_WRITE;
244 if (tlb_x)
245 res->prot |= PAGE_EXEC;
247 else
248 D(dump_tlb(env, mmu));
249 } else {
250 /* If refill, provide a randomized set. */
251 set = env->mmu_rand_lfsr & 3;
254 if (!match) {
255 unsigned int f;
257 /* Update lfsr at every fault. */
258 f = compute_polynom(env->mmu_rand_lfsr);
259 env->mmu_rand_lfsr >>= 1;
260 env->mmu_rand_lfsr |= (f << 15);
261 env->mmu_rand_lfsr &= 0xffff;
263 /* Compute index. */
264 idx = vpage & 15;
266 /* Update RW_MM_TLB_SEL. */
267 env->sregs[SFR_RW_MM_TLB_SEL] = 0;
268 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], idx, 0, 4);
269 set_field(&env->sregs[SFR_RW_MM_TLB_SEL], set, 4, 2);
271 /* Update RW_MM_CAUSE. */
272 set_field(&r_cause, rwcause, 8, 2);
273 set_field(&r_cause, vpage, 13, 19);
274 set_field(&r_cause, pid, 0, 8);
275 env->sregs[SFR_R_MM_CAUSE] = r_cause;
276 D(printf("refill vaddr=%x pc=%x\n", vaddr, env->pc));
279 D(printf ("%s rw=%d mtch=%d pc=%x va=%x vpn=%x tlbvpn=%x pfn=%x pid=%x"
280 " %x cause=%x sel=%x sp=%x %x %x\n",
281 __func__, rw, match, env->pc,
282 vaddr, vpage,
283 tlb_vpn, tlb_pfn, tlb_pid,
284 pid,
285 r_cause,
286 env->sregs[SFR_RW_MM_TLB_SEL],
287 env->regs[R_SP], env->pregs[PR_USP], env->ksp));
289 res->phy = tlb_pfn << TARGET_PAGE_BITS;
290 return !match;
293 void cris_mmu_flush_pid(CPUState *env, uint32_t pid)
295 target_ulong vaddr;
296 unsigned int idx;
297 uint32_t lo, hi;
298 uint32_t tlb_vpn;
299 int tlb_pid, tlb_g, tlb_v;
300 unsigned int set;
301 unsigned int mmu;
303 pid &= 0xff;
304 for (mmu = 0; mmu < 2; mmu++) {
305 for (set = 0; set < 4; set++)
307 for (idx = 0; idx < 16; idx++) {
308 lo = env->tlbsets[mmu][set][idx].lo;
309 hi = env->tlbsets[mmu][set][idx].hi;
311 tlb_vpn = EXTRACT_FIELD(hi, 13, 31);
312 tlb_pid = EXTRACT_FIELD(hi, 0, 7);
313 tlb_g = EXTRACT_FIELD(lo, 4, 4);
314 tlb_v = EXTRACT_FIELD(lo, 3, 3);
316 if (tlb_v && !tlb_g && (tlb_pid == pid)) {
317 vaddr = tlb_vpn << TARGET_PAGE_BITS;
318 D_LOG("flush pid=%x vaddr=%x\n",
319 pid, vaddr);
320 tlb_flush_page(env, vaddr);
327 int cris_mmu_translate(struct cris_mmu_result_t *res,
328 CPUState *env, uint32_t vaddr,
329 int rw, int mmu_idx)
331 uint32_t phy = vaddr;
332 int seg;
333 int miss = 0;
334 int is_user = mmu_idx == MMU_USER_IDX;
335 uint32_t old_srs;
337 old_srs= env->pregs[PR_SRS];
339 /* rw == 2 means exec, map the access to the insn mmu. */
340 env->pregs[PR_SRS] = rw == 2 ? 1 : 2;
342 if (!cris_mmu_enabled(env->sregs[SFR_RW_GC_CFG])) {
343 res->phy = vaddr;
344 res->prot = PAGE_BITS;
345 goto done;
348 seg = vaddr >> 28;
349 if (cris_mmu_segmented_addr(seg, env->sregs[SFR_RW_MM_CFG]))
351 uint32_t base;
353 miss = 0;
354 base = cris_mmu_translate_seg(env, seg);
355 phy = base | (0x0fffffff & vaddr);
356 res->phy = phy;
357 res->prot = PAGE_BITS;
359 else
360 miss = cris_mmu_translate_page(res, env, vaddr, rw, is_user);
361 done:
362 env->pregs[PR_SRS] = old_srs;
363 return miss;
365 #endif