1 /* Blackfin Memory Management Unit (MMU) model.
3 Copyright (C) 2010-2019 Free Software Foundation, Inc.
4 Contributed by Analog Devices, Inc.
6 This file is part of simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "sim-options.h"
26 #include "dv-bfin_mmu.h"
27 #include "dv-bfin_cec.h"
29 /* XXX: Should this really be two blocks of registers ? PRM describes
30 these as two Content Addressable Memory (CAM) blocks. */
36 /* Order after here is important -- matches hardware MMR layout. */
37 bu32 sram_base_address
;
39 bu32 dmem_control
, dcplb_fault_status
, dcplb_fault_addr
;
40 char _dpad0
[0x100 - 0x0 - (4 * 4)];
42 char _dpad1
[0x200 - 0x100 - (4 * 16)];
44 char _dpad2
[0x300 - 0x200 - (4 * 16)];
46 char _dpad3
[0x400 - 0x300 - (4 * 1)];
49 char _dpad4
[0x1000 - 0x400 - (4 * 2)];
51 bu32 idk
; /* Filler MMR; hardware simply ignores. */
52 bu32 imem_control
, icplb_fault_status
, icplb_fault_addr
;
53 char _ipad0
[0x100 - 0x0 - (4 * 4)];
55 char _ipad1
[0x200 - 0x100 - (4 * 16)];
57 char _ipad2
[0x300 - 0x200 - (4 * 16)];
59 char _ipad3
[0x400 - 0x300 - (4 * 1)];
62 #define mmr_base() offsetof(struct bfin_mmu, sram_base_address)
63 #define mmr_offset(mmr) (offsetof(struct bfin_mmu, mmr) - mmr_base())
64 #define mmr_idx(mmr) (mmr_offset (mmr) / 4)
66 static const char * const mmr_names
[BFIN_COREMMR_MMU_SIZE
/ 4] =
68 "SRAM_BASE_ADDRESS", "DMEM_CONTROL", "DCPLB_FAULT_STATUS", "DCPLB_FAULT_ADDR",
69 [mmr_idx (dcplb_addr
[0])] = "DCPLB_ADDR0",
70 "DCPLB_ADDR1", "DCPLB_ADDR2", "DCPLB_ADDR3", "DCPLB_ADDR4", "DCPLB_ADDR5",
71 "DCPLB_ADDR6", "DCPLB_ADDR7", "DCPLB_ADDR8", "DCPLB_ADDR9", "DCPLB_ADDR10",
72 "DCPLB_ADDR11", "DCPLB_ADDR12", "DCPLB_ADDR13", "DCPLB_ADDR14", "DCPLB_ADDR15",
73 [mmr_idx (dcplb_data
[0])] = "DCPLB_DATA0",
74 "DCPLB_DATA1", "DCPLB_DATA2", "DCPLB_DATA3", "DCPLB_DATA4", "DCPLB_DATA5",
75 "DCPLB_DATA6", "DCPLB_DATA7", "DCPLB_DATA8", "DCPLB_DATA9", "DCPLB_DATA10",
76 "DCPLB_DATA11", "DCPLB_DATA12", "DCPLB_DATA13", "DCPLB_DATA14", "DCPLB_DATA15",
77 [mmr_idx (dtest_command
)] = "DTEST_COMMAND",
78 [mmr_idx (dtest_data
[0])] = "DTEST_DATA0", "DTEST_DATA1",
79 [mmr_idx (imem_control
)] = "IMEM_CONTROL", "ICPLB_FAULT_STATUS", "ICPLB_FAULT_ADDR",
80 [mmr_idx (icplb_addr
[0])] = "ICPLB_ADDR0",
81 "ICPLB_ADDR1", "ICPLB_ADDR2", "ICPLB_ADDR3", "ICPLB_ADDR4", "ICPLB_ADDR5",
82 "ICPLB_ADDR6", "ICPLB_ADDR7", "ICPLB_ADDR8", "ICPLB_ADDR9", "ICPLB_ADDR10",
83 "ICPLB_ADDR11", "ICPLB_ADDR12", "ICPLB_ADDR13", "ICPLB_ADDR14", "ICPLB_ADDR15",
84 [mmr_idx (icplb_data
[0])] = "ICPLB_DATA0",
85 "ICPLB_DATA1", "ICPLB_DATA2", "ICPLB_DATA3", "ICPLB_DATA4", "ICPLB_DATA5",
86 "ICPLB_DATA6", "ICPLB_DATA7", "ICPLB_DATA8", "ICPLB_DATA9", "ICPLB_DATA10",
87 "ICPLB_DATA11", "ICPLB_DATA12", "ICPLB_DATA13", "ICPLB_DATA14", "ICPLB_DATA15",
88 [mmr_idx (itest_command
)] = "ITEST_COMMAND",
89 [mmr_idx (itest_data
[0])] = "ITEST_DATA0", "ITEST_DATA1",
91 #define mmr_name(off) (mmr_names[(off) / 4] ? : "<INV>")
93 static bool bfin_mmu_skip_cplbs
= false;
96 bfin_mmu_io_write_buffer (struct hw
*me
, const void *source
,
97 int space
, address_word addr
, unsigned nr_bytes
)
99 struct bfin_mmu
*mmu
= hw_data (me
);
104 /* Invalid access mode is higher priority than missing register. */
105 if (!dv_bfin_mmr_require_32 (me
, addr
, nr_bytes
, true))
108 value
= dv_load_4 (source
);
110 mmr_off
= addr
- mmu
->base
;
111 valuep
= (void *)((unsigned long)mmu
+ mmr_base() + mmr_off
);
117 case mmr_offset(dmem_control
):
118 case mmr_offset(imem_control
):
119 /* XXX: IMC/DMC bit should add/remove L1 cache regions ... */
120 case mmr_offset(dtest_data
[0]) ... mmr_offset(dtest_data
[1]):
121 case mmr_offset(itest_data
[0]) ... mmr_offset(itest_data
[1]):
122 case mmr_offset(dcplb_addr
[0]) ... mmr_offset(dcplb_addr
[15]):
123 case mmr_offset(dcplb_data
[0]) ... mmr_offset(dcplb_data
[15]):
124 case mmr_offset(icplb_addr
[0]) ... mmr_offset(icplb_addr
[15]):
125 case mmr_offset(icplb_data
[0]) ... mmr_offset(icplb_data
[15]):
128 case mmr_offset(sram_base_address
):
129 case mmr_offset(dcplb_fault_status
):
130 case mmr_offset(dcplb_fault_addr
):
131 case mmr_offset(idk
):
132 case mmr_offset(icplb_fault_status
):
133 case mmr_offset(icplb_fault_addr
):
134 /* Discard writes to these. */
136 case mmr_offset(itest_command
):
137 /* XXX: Not supported atm. */
139 hw_abort (me
, "ITEST_COMMAND unimplemented");
141 case mmr_offset(dtest_command
):
142 /* Access L1 memory indirectly. */
146 bu32 addr
= mmu
->sram_base_address
|
147 ((value
>> (26 - 11)) & (1 << 11)) | /* addr bit 11 (Way0/Way1) */
148 ((value
>> (24 - 21)) & (1 << 21)) | /* addr bit 21 (Data/Inst) */
149 ((value
>> (23 - 15)) & (1 << 15)) | /* addr bit 15 (Data Bank) */
150 ((value
>> (16 - 12)) & (3 << 12)) | /* addr bits 13:12 (Subbank) */
151 (value
& 0x47F8); /* addr bits 14 & 10:3 */
153 if (!(value
& TEST_DATA_ARRAY
))
154 hw_abort (me
, "DTEST_COMMAND tag array unimplemented");
155 if (value
& 0xfa7cb801)
156 hw_abort (me
, "DTEST_COMMAND bits undefined");
158 if (value
& TEST_WRITE
)
159 sim_write (hw_system (me
), addr
, (void *)mmu
->dtest_data
, 8);
161 sim_read (hw_system (me
), addr
, (void *)mmu
->dtest_data
, 8);
165 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, true);
173 bfin_mmu_io_read_buffer (struct hw
*me
, void *dest
,
174 int space
, address_word addr
, unsigned nr_bytes
)
176 struct bfin_mmu
*mmu
= hw_data (me
);
180 /* Invalid access mode is higher priority than missing register. */
181 if (!dv_bfin_mmr_require_32 (me
, addr
, nr_bytes
, false))
184 mmr_off
= addr
- mmu
->base
;
185 valuep
= (void *)((unsigned long)mmu
+ mmr_base() + mmr_off
);
191 case mmr_offset(dmem_control
):
192 case mmr_offset(imem_control
):
193 case mmr_offset(dtest_command
):
194 case mmr_offset(dtest_data
[0]) ... mmr_offset(dtest_data
[2]):
195 case mmr_offset(itest_command
):
196 case mmr_offset(itest_data
[0]) ... mmr_offset(itest_data
[2]):
197 /* XXX: should do something here. */
198 case mmr_offset(dcplb_addr
[0]) ... mmr_offset(dcplb_addr
[15]):
199 case mmr_offset(dcplb_data
[0]) ... mmr_offset(dcplb_data
[15]):
200 case mmr_offset(icplb_addr
[0]) ... mmr_offset(icplb_addr
[15]):
201 case mmr_offset(icplb_data
[0]) ... mmr_offset(icplb_data
[15]):
202 case mmr_offset(sram_base_address
):
203 case mmr_offset(dcplb_fault_status
):
204 case mmr_offset(dcplb_fault_addr
):
205 case mmr_offset(idk
):
206 case mmr_offset(icplb_fault_status
):
207 case mmr_offset(icplb_fault_addr
):
208 dv_store_4 (dest
, *valuep
);
211 dv_bfin_mmr_invalid (me
, addr
, nr_bytes
, false);
219 attach_bfin_mmu_regs (struct hw
*me
, struct bfin_mmu
*mmu
)
221 address_word attach_address
;
223 unsigned attach_size
;
224 reg_property_spec reg
;
226 if (hw_find_property (me
, "reg") == NULL
)
227 hw_abort (me
, "Missing \"reg\" property");
229 if (!hw_find_reg_array_property (me
, "reg", 0, ®
))
230 hw_abort (me
, "\"reg\" property must contain three addr/size entries");
232 hw_unit_address_to_attach_address (hw_parent (me
),
234 &attach_space
, &attach_address
, me
);
235 hw_unit_size_to_attach_size (hw_parent (me
), ®
.size
, &attach_size
, me
);
237 if (attach_size
!= BFIN_COREMMR_MMU_SIZE
)
238 hw_abort (me
, "\"reg\" size must be %#x", BFIN_COREMMR_MMU_SIZE
);
240 hw_attach_address (hw_parent (me
),
241 0, attach_space
, attach_address
, attach_size
, me
);
243 mmu
->base
= attach_address
;
247 bfin_mmu_finish (struct hw
*me
)
249 struct bfin_mmu
*mmu
;
251 mmu
= HW_ZALLOC (me
, struct bfin_mmu
);
253 set_hw_data (me
, mmu
);
254 set_hw_io_read_buffer (me
, bfin_mmu_io_read_buffer
);
255 set_hw_io_write_buffer (me
, bfin_mmu_io_write_buffer
);
257 attach_bfin_mmu_regs (me
, mmu
);
259 /* Initialize the MMU. */
260 mmu
->sram_base_address
= 0xff800000 - 0;
261 /*(4 * 1024 * 1024 * CPU_INDEX (hw_system_cpu (me)));*/
262 mmu
->dmem_control
= 0x00000001;
263 mmu
->imem_control
= 0x00000001;
266 const struct hw_descriptor dv_bfin_mmu_descriptor
[] =
268 {"bfin_mmu", bfin_mmu_finish
,},
272 /* Device option parsing. */
274 static DECLARE_OPTION_HANDLER (bfin_mmu_option_handler
);
277 OPTION_MMU_SKIP_TABLES
= OPTION_START
,
280 const OPTION bfin_mmu_options
[] =
282 { {"mmu-skip-cplbs", no_argument
, NULL
, OPTION_MMU_SKIP_TABLES
},
283 '\0', NULL
, "Skip parsing of CPLB tables (big speed increase)",
284 bfin_mmu_option_handler
, NULL
},
286 { {NULL
, no_argument
, NULL
, 0}, '\0', NULL
, NULL
, NULL
, NULL
}
290 bfin_mmu_option_handler (SIM_DESC sd
, sim_cpu
*current_cpu
, int opt
,
291 char *arg
, int is_command
)
295 case OPTION_MMU_SKIP_TABLES
:
296 bfin_mmu_skip_cplbs
= true;
300 sim_io_eprintf (sd
, "Unknown Blackfin MMU option %d\n", opt
);
305 #define MMU_STATE(cpu) DV_STATE_CACHED (cpu, mmu)
308 _mmu_log_ifault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 pc
, bool supv
)
310 mmu
->icplb_fault_addr
= pc
;
311 mmu
->icplb_fault_status
= supv
<< 17;
315 mmu_log_ifault (SIM_CPU
*cpu
)
317 _mmu_log_ifault (cpu
, MMU_STATE (cpu
), PCREG
, cec_get_ivg (cpu
) >= 0);
321 _mmu_log_fault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 addr
, bool write
,
322 bool inst
, bool miss
, bool supv
, bool dag1
, bu32 faults
)
324 bu32
*fault_status
, *fault_addr
;
326 /* No logging in non-OS mode. */
330 fault_status
= inst
? &mmu
->icplb_fault_status
: &mmu
->dcplb_fault_status
;
331 fault_addr
= inst
? &mmu
->icplb_fault_addr
: &mmu
->dcplb_fault_addr
;
332 /* ICPLB regs always get updated. */
334 _mmu_log_ifault (cpu
, mmu
, PCREG
, supv
);
346 _mmu_process_fault (SIM_CPU
*cpu
, struct bfin_mmu
*mmu
, bu32 addr
, bool write
,
347 bool inst
, bool unaligned
, bool miss
, bool supv
, bool dag1
)
351 /* See order in mmu_check_addr() */
353 excp
= inst
? VEC_MISALI_I
: VEC_MISALI_D
;
354 else if (addr
>= BFIN_SYSTEM_MMR_BASE
)
357 excp
= inst
? VEC_CPLB_I_M
: VEC_CPLB_M
;
360 /* Misses are hardware errors. */
361 cec_hwerr (cpu
, HWERR_EXTERN_ADDR
);
365 _mmu_log_fault (cpu
, mmu
, addr
, write
, inst
, miss
, supv
, dag1
, 0);
366 cec_exception (cpu
, excp
);
370 mmu_process_fault (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
,
371 bool unaligned
, bool miss
)
373 SIM_DESC sd
= CPU_STATE (cpu
);
374 struct bfin_mmu
*mmu
;
376 if (STATE_ENVIRONMENT (sd
) != OPERATING_ENVIRONMENT
)
379 mmu
= MMU_STATE (cpu
);
381 _mmu_process_fault (cpu
, mmu
, addr
, write
, inst
, unaligned
, miss
,
382 cec_is_supervisor_mode (cpu
),
383 BFIN_CPU_STATE
.multi_pc
== PCREG
+ 6);
387 -2: no known problems
390 1: protection violation
395 mmu_check_implicit_addr (SIM_CPU
*cpu
, bu32 addr
, bool inst
, int size
,
396 bool supv
, bool dag1
)
398 bool l1
= ((addr
& 0xFF000000) == 0xFF000000);
399 bu32 amask
= (addr
& 0xFFF00000);
401 if (addr
& (size
- 1))
404 /* MMRs may never be executable or accessed from usermode. */
405 if (addr
>= BFIN_SYSTEM_MMR_BASE
)
409 else if (!supv
|| dag1
)
416 /* Some regions are not executable. */
417 /* XXX: Should this be in the model data ? Core B 561 ? */
419 return (amask
== 0xFFA00000) ? -1 : 1;
423 /* Some regions are not readable. */
424 /* XXX: Should this be in the model data ? Core B 561 ? */
426 return (amask
!= 0xFFA00000) ? -1 : 4;
432 /* Exception order per the PRM (first has highest):
433 Inst Multiple CPLB Hits
434 Inst Misaligned Access
435 Inst Protection Violation
437 Only the alignment matters in non-OS mode though. */
439 _mmu_check_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
, int size
)
441 SIM_DESC sd
= CPU_STATE (cpu
);
442 struct bfin_mmu
*mmu
;
443 bu32
*fault_status
, *fault_addr
, *mem_control
, *cplb_addr
, *cplb_data
;
445 bool supv
, do_excp
, dag1
;
448 supv
= cec_is_supervisor_mode (cpu
);
449 dag1
= (BFIN_CPU_STATE
.multi_pc
== PCREG
+ 6);
451 if (STATE_ENVIRONMENT (sd
) != OPERATING_ENVIRONMENT
|| bfin_mmu_skip_cplbs
)
453 int ret
= mmu_check_implicit_addr (cpu
, addr
, inst
, size
, supv
, dag1
);
454 /* Valid hits and misses are OK in non-OS envs. */
457 _mmu_process_fault (cpu
, NULL
, addr
, write
, inst
, (ret
== 3), false, supv
, dag1
);
460 mmu
= MMU_STATE (cpu
);
461 fault_status
= inst
? &mmu
->icplb_fault_status
: &mmu
->dcplb_fault_status
;
462 fault_addr
= inst
? &mmu
->icplb_fault_addr
: &mmu
->dcplb_fault_addr
;
463 mem_control
= inst
? &mmu
->imem_control
: &mmu
->dmem_control
;
464 cplb_addr
= inst
? &mmu
->icplb_addr
[0] : &mmu
->dcplb_addr
[0];
465 cplb_data
= inst
? &mmu
->icplb_data
[0] : &mmu
->dcplb_data
[0];
471 /* CPLBs disabled -> little to do. */
472 if (!(*mem_control
& ENCPLB
))
478 /* Check all the CPLBs first. */
479 for (i
= 0; i
< 16; ++i
)
481 const bu32 pages
[4] = { 0x400, 0x1000, 0x100000, 0x400000 };
482 bu32 addr_lo
, addr_hi
;
484 /* Skip invalid entries. */
485 if (!(cplb_data
[i
] & CPLB_VALID
))
488 /* See if this entry covers this address. */
489 addr_lo
= cplb_addr
[i
];
490 addr_hi
= cplb_addr
[i
] + pages
[(cplb_data
[i
] & PAGE_SIZE
) >> 16];
491 if (addr
< addr_lo
|| addr
>= addr_hi
)
498 if (!supv
&& !(cplb_data
[i
] & CPLB_USER_WR
))
500 if (supv
&& !(cplb_data
[i
] & CPLB_SUPV_WR
))
502 if ((cplb_data
[i
] & (CPLB_WT
| CPLB_L1_CHBL
| CPLB_DIRTY
)) == CPLB_L1_CHBL
)
507 if (!supv
&& !(cplb_data
[i
] & CPLB_USER_RD
))
512 /* Handle default/implicit CPLBs. */
513 if (!do_excp
&& hits
< 2)
517 ihits
= mmu_check_implicit_addr (cpu
, addr
, inst
, size
, supv
, dag1
);
520 /* No faults and one match -> good to go. */
527 cec_hwerr (cpu
, HWERR_EXTERN_ADDR
);
534 /* Normalize hit count so hits==2 is always multiple hit exception. */
535 hits
= min (2, hits
);
537 _mmu_log_fault (cpu
, mmu
, addr
, write
, inst
, hits
== 0, supv
, dag1
, faults
);
541 int iexcps
[] = { VEC_CPLB_I_M
, VEC_CPLB_I_VL
, VEC_CPLB_I_MHIT
, VEC_MISALI_I
};
546 int dexcps
[] = { VEC_CPLB_M
, VEC_CPLB_VL
, VEC_CPLB_MHIT
, VEC_MISALI_D
};
552 mmu_check_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
, int size
)
554 int excp
= _mmu_check_addr (cpu
, addr
, write
, inst
, size
);
556 cec_exception (cpu
, excp
);
560 mmu_check_cache_addr (SIM_CPU
*cpu
, bu32 addr
, bool write
, bool inst
)
565 cacheaddr
= addr
& ~(BFIN_L1_CACHE_BYTES
- 1);
566 excp
= _mmu_check_addr (cpu
, cacheaddr
, write
, inst
, BFIN_L1_CACHE_BYTES
);
570 /* Most exceptions are ignored with cache funcs. */
571 /* XXX: Not sure if we should be ignoring CPLB misses. */
574 if (excp
== VEC_CPLB_I_VL
)
579 if (excp
== VEC_CPLB_VL
)
582 cec_exception (cpu
, excp
);