[binutils, ARM, 5/16] BF insns infrastructure with new global reloc R_ARM_THM_BF16
[binutils-gdb.git] / sim / frv / memory.c
blobfb214448ee02509ac37678abf4617001b74a0a90
1 /* frv memory model.
2 Copyright (C) 1999-2019 Free Software Foundation, Inc.
3 Contributed by Red Hat
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20 #define WANT_CPU frvbf
21 #define WANT_CPU_FRVBF
23 #include "sim-main.h"
24 #include "cgen-mem.h"
25 #include "bfd.h"
27 /* Check for alignment and access restrictions. Return the corrected address.
29 static SI
30 fr400_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
32 /* Check access restrictions for double word loads only. */
33 if (align_mask == 7)
35 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
36 frv_queue_data_access_error_interrupt (current_cpu, address);
38 return address;
41 static SI
42 fr500_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
44 if (address & align_mask)
46 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
47 address &= ~align_mask;
50 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
51 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
52 frv_queue_data_access_error_interrupt (current_cpu, address);
54 return address;
57 static SI
58 fr550_check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
60 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
61 || (align_mask > 0x3
62 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
63 frv_queue_data_access_error_interrupt (current_cpu, address);
65 return address;
68 static SI
69 check_data_read_address (SIM_CPU *current_cpu, SI address, int align_mask)
71 SIM_DESC sd = CPU_STATE (current_cpu);
72 switch (STATE_ARCHITECTURE (sd)->mach)
74 case bfd_mach_fr400:
75 case bfd_mach_fr450:
76 address = fr400_check_data_read_address (current_cpu, address,
77 align_mask);
78 break;
79 case bfd_mach_frvtomcat:
80 case bfd_mach_fr500:
81 case bfd_mach_frv:
82 address = fr500_check_data_read_address (current_cpu, address,
83 align_mask);
84 break;
85 case bfd_mach_fr550:
86 address = fr550_check_data_read_address (current_cpu, address,
87 align_mask);
88 break;
89 default:
90 break;
93 return address;
96 static SI
97 fr400_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
99 if (address & align_mask)
101 /* Make sure that this exception is not masked. */
102 USI isr = GET_ISR ();
103 if (! GET_ISR_EMAM (isr))
105 /* Bad alignment causes a data_access_error on fr400. */
106 frv_queue_data_access_error_interrupt (current_cpu, address);
108 address &= ~align_mask;
110 /* Nothing to check. */
111 return address;
114 static SI
115 fr500_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
117 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff
118 || (USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
119 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
120 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
121 frv_queue_data_access_exception_interrupt (current_cpu);
123 return address;
126 static SI
127 fr550_check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
129 /* No alignment restrictions on fr550 */
131 if ((USI)address >= 0xfe000000 && (USI)address <= 0xfe3fffff
132 || (USI)address >= 0xfe408000 && (USI)address <= 0xfe7fffff)
133 frv_queue_data_access_exception_interrupt (current_cpu);
134 else
136 USI hsr0 = GET_HSR0 ();
137 if (! GET_HSR0_RME (hsr0)
138 && (USI)address >= 0xfe400000 && (USI)address <= 0xfe407fff)
139 frv_queue_data_access_exception_interrupt (current_cpu);
142 return address;
145 static SI
146 check_readwrite_address (SIM_CPU *current_cpu, SI address, int align_mask)
148 SIM_DESC sd = CPU_STATE (current_cpu);
149 switch (STATE_ARCHITECTURE (sd)->mach)
151 case bfd_mach_fr400:
152 case bfd_mach_fr450:
153 address = fr400_check_readwrite_address (current_cpu, address,
154 align_mask);
155 break;
156 case bfd_mach_frvtomcat:
157 case bfd_mach_fr500:
158 case bfd_mach_frv:
159 address = fr500_check_readwrite_address (current_cpu, address,
160 align_mask);
161 break;
162 case bfd_mach_fr550:
163 address = fr550_check_readwrite_address (current_cpu, address,
164 align_mask);
165 break;
166 default:
167 break;
170 return address;
173 static PCADDR
174 fr400_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
175 int align_mask)
177 if (address & align_mask)
179 frv_queue_instruction_access_error_interrupt (current_cpu);
180 address &= ~align_mask;
182 else if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
183 frv_queue_instruction_access_error_interrupt (current_cpu);
185 return address;
188 static PCADDR
189 fr500_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
190 int align_mask)
192 if (address & align_mask)
194 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
195 address &= ~align_mask;
198 if ((USI)address >= 0xfeff0600 && (USI)address <= 0xfeff7fff
199 || (USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff)
200 frv_queue_instruction_access_error_interrupt (current_cpu);
201 else if ((USI)address >= 0xfe004000 && (USI)address <= 0xfe3fffff
202 || (USI)address >= 0xfe400000 && (USI)address <= 0xfe403fff
203 || (USI)address >= 0xfe404000 && (USI)address <= 0xfe7fffff)
204 frv_queue_instruction_access_exception_interrupt (current_cpu);
205 else
207 USI hsr0 = GET_HSR0 ();
208 if (! GET_HSR0_RME (hsr0)
209 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe003fff)
210 frv_queue_instruction_access_exception_interrupt (current_cpu);
213 return address;
216 static PCADDR
217 fr550_check_insn_read_address (SIM_CPU *current_cpu, PCADDR address,
218 int align_mask)
220 address &= ~align_mask;
222 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfeffffff)
223 frv_queue_instruction_access_error_interrupt (current_cpu);
224 else if ((USI)address >= 0xfe008000 && (USI)address <= 0xfe7fffff)
225 frv_queue_instruction_access_exception_interrupt (current_cpu);
226 else
228 USI hsr0 = GET_HSR0 ();
229 if (! GET_HSR0_RME (hsr0)
230 && (USI)address >= 0xfe000000 && (USI)address <= 0xfe007fff)
231 frv_queue_instruction_access_exception_interrupt (current_cpu);
234 return address;
237 static PCADDR
238 check_insn_read_address (SIM_CPU *current_cpu, PCADDR address, int align_mask)
240 SIM_DESC sd = CPU_STATE (current_cpu);
241 switch (STATE_ARCHITECTURE (sd)->mach)
243 case bfd_mach_fr400:
244 case bfd_mach_fr450:
245 address = fr400_check_insn_read_address (current_cpu, address,
246 align_mask);
247 break;
248 case bfd_mach_frvtomcat:
249 case bfd_mach_fr500:
250 case bfd_mach_frv:
251 address = fr500_check_insn_read_address (current_cpu, address,
252 align_mask);
253 break;
254 case bfd_mach_fr550:
255 address = fr550_check_insn_read_address (current_cpu, address,
256 align_mask);
257 break;
258 default:
259 break;
262 return address;
265 /* Memory reads. */
267 frvbf_read_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address)
269 USI hsr0 = GET_HSR0 ();
270 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
272 /* Check for access exceptions. */
273 address = check_data_read_address (current_cpu, address, 0);
274 address = check_readwrite_address (current_cpu, address, 0);
276 /* If we need to count cycles, then the cache operation will be
277 initiated from the model profiling functions.
278 See frvbf_model_.... */
279 if (model_insn)
281 CPU_LOAD_ADDRESS (current_cpu) = address;
282 CPU_LOAD_LENGTH (current_cpu) = 1;
283 CPU_LOAD_SIGNED (current_cpu) = 1;
284 return 0xb7; /* any random value */
287 if (GET_HSR0_DCE (hsr0))
289 int cycles;
290 cycles = frv_cache_read (cache, 0, address);
291 if (cycles != 0)
292 return CACHE_RETURN_DATA (cache, 0, address, QI, 1);
295 return GETMEMQI (current_cpu, pc, address);
299 frvbf_read_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address)
301 USI hsr0 = GET_HSR0 ();
302 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
304 /* Check for access exceptions. */
305 address = check_data_read_address (current_cpu, address, 0);
306 address = check_readwrite_address (current_cpu, address, 0);
308 /* If we need to count cycles, then the cache operation will be
309 initiated from the model profiling functions.
310 See frvbf_model_.... */
311 if (model_insn)
313 CPU_LOAD_ADDRESS (current_cpu) = address;
314 CPU_LOAD_LENGTH (current_cpu) = 1;
315 CPU_LOAD_SIGNED (current_cpu) = 0;
316 return 0xb7; /* any random value */
319 if (GET_HSR0_DCE (hsr0))
321 int cycles;
322 cycles = frv_cache_read (cache, 0, address);
323 if (cycles != 0)
324 return CACHE_RETURN_DATA (cache, 0, address, UQI, 1);
327 return GETMEMUQI (current_cpu, pc, address);
330 /* Read a HI which spans two cache lines */
331 static HI
332 read_mem_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
334 HI value = frvbf_read_mem_QI (current_cpu, pc, address);
335 value <<= 8;
336 value |= frvbf_read_mem_UQI (current_cpu, pc, address + 1);
337 return T2H_2 (value);
341 frvbf_read_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address)
343 USI hsr0;
344 FRV_CACHE *cache;
346 /* Check for access exceptions. */
347 address = check_data_read_address (current_cpu, address, 1);
348 address = check_readwrite_address (current_cpu, address, 1);
350 /* If we need to count cycles, then the cache operation will be
351 initiated from the model profiling functions.
352 See frvbf_model_.... */
353 hsr0 = GET_HSR0 ();
354 cache = CPU_DATA_CACHE (current_cpu);
355 if (model_insn)
357 CPU_LOAD_ADDRESS (current_cpu) = address;
358 CPU_LOAD_LENGTH (current_cpu) = 2;
359 CPU_LOAD_SIGNED (current_cpu) = 1;
360 return 0xb711; /* any random value */
363 if (GET_HSR0_DCE (hsr0))
365 int cycles;
366 /* Handle access which crosses cache line boundary */
367 SIM_DESC sd = CPU_STATE (current_cpu);
368 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
370 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
371 return read_mem_unaligned_HI (current_cpu, pc, address);
373 cycles = frv_cache_read (cache, 0, address);
374 if (cycles != 0)
375 return CACHE_RETURN_DATA (cache, 0, address, HI, 2);
378 return GETMEMHI (current_cpu, pc, address);
382 frvbf_read_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address)
384 USI hsr0;
385 FRV_CACHE *cache;
387 /* Check for access exceptions. */
388 address = check_data_read_address (current_cpu, address, 1);
389 address = check_readwrite_address (current_cpu, address, 1);
391 /* If we need to count cycles, then the cache operation will be
392 initiated from the model profiling functions.
393 See frvbf_model_.... */
394 hsr0 = GET_HSR0 ();
395 cache = CPU_DATA_CACHE (current_cpu);
396 if (model_insn)
398 CPU_LOAD_ADDRESS (current_cpu) = address;
399 CPU_LOAD_LENGTH (current_cpu) = 2;
400 CPU_LOAD_SIGNED (current_cpu) = 0;
401 return 0xb711; /* any random value */
404 if (GET_HSR0_DCE (hsr0))
406 int cycles;
407 /* Handle access which crosses cache line boundary */
408 SIM_DESC sd = CPU_STATE (current_cpu);
409 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
411 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
412 return read_mem_unaligned_HI (current_cpu, pc, address);
414 cycles = frv_cache_read (cache, 0, address);
415 if (cycles != 0)
416 return CACHE_RETURN_DATA (cache, 0, address, UHI, 2);
419 return GETMEMUHI (current_cpu, pc, address);
422 /* Read a SI which spans two cache lines */
423 static SI
424 read_mem_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
426 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
427 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
428 char valarray[4];
429 SI SIvalue;
430 HI HIvalue;
432 switch (hi_len)
434 case 1:
435 valarray[0] = frvbf_read_mem_QI (current_cpu, pc, address);
436 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address + 1);
437 SIvalue = H2T_4 (SIvalue);
438 memcpy (valarray + 1, (char*)&SIvalue, 3);
439 break;
440 case 2:
441 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address);
442 HIvalue = H2T_2 (HIvalue);
443 memcpy (valarray, (char*)&HIvalue, 2);
444 HIvalue = frvbf_read_mem_HI (current_cpu, pc, address + 2);
445 HIvalue = H2T_2 (HIvalue);
446 memcpy (valarray + 2, (char*)&HIvalue, 2);
447 break;
448 case 3:
449 SIvalue = frvbf_read_mem_SI (current_cpu, pc, address - 1);
450 SIvalue = H2T_4 (SIvalue);
451 memcpy (valarray, (char*)&SIvalue, 3);
452 valarray[3] = frvbf_read_mem_QI (current_cpu, pc, address + 3);
453 break;
454 default:
455 abort (); /* can't happen */
457 return T2H_4 (*(SI*)valarray);
461 frvbf_read_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address)
463 FRV_CACHE *cache;
464 USI hsr0;
466 /* Check for access exceptions. */
467 address = check_data_read_address (current_cpu, address, 3);
468 address = check_readwrite_address (current_cpu, address, 3);
470 hsr0 = GET_HSR0 ();
471 cache = CPU_DATA_CACHE (current_cpu);
472 /* If we need to count cycles, then the cache operation will be
473 initiated from the model profiling functions.
474 See frvbf_model_.... */
475 if (model_insn)
477 CPU_LOAD_ADDRESS (current_cpu) = address;
478 CPU_LOAD_LENGTH (current_cpu) = 4;
479 return 0x37111319; /* any random value */
482 if (GET_HSR0_DCE (hsr0))
484 int cycles;
485 /* Handle access which crosses cache line boundary */
486 SIM_DESC sd = CPU_STATE (current_cpu);
487 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
489 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
490 return read_mem_unaligned_SI (current_cpu, pc, address);
492 cycles = frv_cache_read (cache, 0, address);
493 if (cycles != 0)
494 return CACHE_RETURN_DATA (cache, 0, address, SI, 4);
497 return GETMEMSI (current_cpu, pc, address);
501 frvbf_read_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address)
503 return frvbf_read_mem_SI (current_cpu, pc, address);
506 /* Read a SI which spans two cache lines */
507 static DI
508 read_mem_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
510 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
511 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
512 DI value, value1;
514 switch (hi_len)
516 case 1:
517 value = frvbf_read_mem_QI (current_cpu, pc, address);
518 value <<= 56;
519 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 1);
520 value1 = H2T_8 (value1);
521 value |= value1 & ((DI)0x00ffffff << 32);
522 value |= value1 & 0xffffffffu;
523 break;
524 case 2:
525 value = frvbf_read_mem_HI (current_cpu, pc, address);
526 value = H2T_2 (value);
527 value <<= 48;
528 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 2);
529 value1 = H2T_8 (value1);
530 value |= value1 & ((DI)0x0000ffff << 32);
531 value |= value1 & 0xffffffffu;
532 break;
533 case 3:
534 value = frvbf_read_mem_SI (current_cpu, pc, address - 1);
535 value = H2T_4 (value);
536 value <<= 40;
537 value1 = frvbf_read_mem_DI (current_cpu, pc, address + 3);
538 value1 = H2T_8 (value1);
539 value |= value1 & ((DI)0x000000ff << 32);
540 value |= value1 & 0xffffffffu;
541 break;
542 case 4:
543 value = frvbf_read_mem_SI (current_cpu, pc, address);
544 value = H2T_4 (value);
545 value <<= 32;
546 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 4);
547 value1 = H2T_4 (value1);
548 value |= value1 & 0xffffffffu;
549 break;
550 case 5:
551 value = frvbf_read_mem_DI (current_cpu, pc, address - 3);
552 value = H2T_8 (value);
553 value <<= 24;
554 value1 = frvbf_read_mem_SI (current_cpu, pc, address + 5);
555 value1 = H2T_4 (value1);
556 value |= value1 & 0x00ffffff;
557 break;
558 case 6:
559 value = frvbf_read_mem_DI (current_cpu, pc, address - 2);
560 value = H2T_8 (value);
561 value <<= 16;
562 value1 = frvbf_read_mem_HI (current_cpu, pc, address + 6);
563 value1 = H2T_2 (value1);
564 value |= value1 & 0x0000ffff;
565 break;
566 case 7:
567 value = frvbf_read_mem_DI (current_cpu, pc, address - 1);
568 value = H2T_8 (value);
569 value <<= 8;
570 value1 = frvbf_read_mem_QI (current_cpu, pc, address + 7);
571 value |= value1 & 0x000000ff;
572 break;
573 default:
574 abort (); /* can't happen */
576 return T2H_8 (value);
580 frvbf_read_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address)
582 USI hsr0;
583 FRV_CACHE *cache;
585 /* Check for access exceptions. */
586 address = check_data_read_address (current_cpu, address, 7);
587 address = check_readwrite_address (current_cpu, address, 7);
589 /* If we need to count cycles, then the cache operation will be
590 initiated from the model profiling functions.
591 See frvbf_model_.... */
592 hsr0 = GET_HSR0 ();
593 cache = CPU_DATA_CACHE (current_cpu);
594 if (model_insn)
596 CPU_LOAD_ADDRESS (current_cpu) = address;
597 CPU_LOAD_LENGTH (current_cpu) = 8;
598 return 0x37111319; /* any random value */
601 if (GET_HSR0_DCE (hsr0))
603 int cycles;
604 /* Handle access which crosses cache line boundary */
605 SIM_DESC sd = CPU_STATE (current_cpu);
606 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
608 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
609 return read_mem_unaligned_DI (current_cpu, pc, address);
611 cycles = frv_cache_read (cache, 0, address);
612 if (cycles != 0)
613 return CACHE_RETURN_DATA (cache, 0, address, DI, 8);
616 return GETMEMDI (current_cpu, pc, address);
620 frvbf_read_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address)
622 USI hsr0;
623 FRV_CACHE *cache;
625 /* Check for access exceptions. */
626 address = check_data_read_address (current_cpu, address, 7);
627 address = check_readwrite_address (current_cpu, address, 7);
629 /* If we need to count cycles, then the cache operation will be
630 initiated from the model profiling functions.
631 See frvbf_model_.... */
632 hsr0 = GET_HSR0 ();
633 cache = CPU_DATA_CACHE (current_cpu);
634 if (model_insn)
636 CPU_LOAD_ADDRESS (current_cpu) = address;
637 CPU_LOAD_LENGTH (current_cpu) = 8;
638 return 0x37111319; /* any random value */
641 if (GET_HSR0_DCE (hsr0))
643 int cycles;
644 /* Handle access which crosses cache line boundary */
645 SIM_DESC sd = CPU_STATE (current_cpu);
646 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
648 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
649 return read_mem_unaligned_DI (current_cpu, pc, address);
651 cycles = frv_cache_read (cache, 0, address);
652 if (cycles != 0)
653 return CACHE_RETURN_DATA (cache, 0, address, DF, 8);
656 return GETMEMDF (current_cpu, pc, address);
660 frvbf_read_imem_USI (SIM_CPU *current_cpu, PCADDR vpc)
662 USI hsr0;
663 vpc = check_insn_read_address (current_cpu, vpc, 3);
665 hsr0 = GET_HSR0 ();
666 if (GET_HSR0_ICE (hsr0))
668 FRV_CACHE *cache;
669 USI value;
671 /* We don't want this to show up in the cache statistics. That read
672 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
673 passively here. */
674 cache = CPU_INSN_CACHE (current_cpu);
675 if (frv_cache_read_passive_SI (cache, vpc, &value))
676 return value;
678 return sim_core_read_unaligned_4 (current_cpu, vpc, read_map, vpc);
681 static SI
682 fr400_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
684 if (align_mask == 7
685 && address >= 0xfe800000 && address <= 0xfeffffff)
686 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
688 return address;
691 static SI
692 fr500_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
694 if (address & align_mask)
696 struct frv_interrupt_queue_element *item =
697 frv_queue_mem_address_not_aligned_interrupt (current_cpu, address);
698 /* Record the correct vliw slot with the interrupt. */
699 if (item != NULL)
700 item->slot = frv_interrupt_state.slot;
701 address &= ~align_mask;
703 if (address >= 0xfeff0600 && address <= 0xfeff7fff
704 || address >= 0xfe800000 && address <= 0xfefeffff)
705 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
707 return address;
710 static SI
711 fr550_check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
713 if ((USI)address >= 0xfe800000 && (USI)address <= 0xfefeffff
714 || (align_mask > 0x3
715 && ((USI)address >= 0xfeff0000 && (USI)address <= 0xfeffffff)))
716 frv_queue_program_interrupt (current_cpu, FRV_DATA_STORE_ERROR);
718 return address;
721 static SI
722 check_write_address (SIM_CPU *current_cpu, SI address, int align_mask)
724 SIM_DESC sd = CPU_STATE (current_cpu);
725 switch (STATE_ARCHITECTURE (sd)->mach)
727 case bfd_mach_fr400:
728 case bfd_mach_fr450:
729 address = fr400_check_write_address (current_cpu, address, align_mask);
730 break;
731 case bfd_mach_frvtomcat:
732 case bfd_mach_fr500:
733 case bfd_mach_frv:
734 address = fr500_check_write_address (current_cpu, address, align_mask);
735 break;
736 case bfd_mach_fr550:
737 address = fr550_check_write_address (current_cpu, address, align_mask);
738 break;
739 default:
740 break;
742 return address;
745 void
746 frvbf_write_mem_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
748 USI hsr0;
749 hsr0 = GET_HSR0 ();
750 if (GET_HSR0_DCE (hsr0))
751 sim_queue_fn_mem_qi_write (current_cpu, frvbf_mem_set_QI, address, value);
752 else
753 sim_queue_mem_qi_write (current_cpu, address, value);
754 frv_set_write_queue_slot (current_cpu);
757 void
758 frvbf_write_mem_UQI (SIM_CPU *current_cpu, IADDR pc, SI address, UQI value)
760 frvbf_write_mem_QI (current_cpu, pc, address, value);
763 void
764 frvbf_write_mem_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
766 USI hsr0;
767 hsr0 = GET_HSR0 ();
768 if (GET_HSR0_DCE (hsr0))
769 sim_queue_fn_mem_hi_write (current_cpu, frvbf_mem_set_HI, address, value);
770 else
771 sim_queue_mem_hi_write (current_cpu, address, value);
772 frv_set_write_queue_slot (current_cpu);
775 void
776 frvbf_write_mem_UHI (SIM_CPU *current_cpu, IADDR pc, SI address, UHI value)
778 frvbf_write_mem_HI (current_cpu, pc, address, value);
781 void
782 frvbf_write_mem_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
784 USI hsr0;
785 hsr0 = GET_HSR0 ();
786 if (GET_HSR0_DCE (hsr0))
787 sim_queue_fn_mem_si_write (current_cpu, frvbf_mem_set_SI, address, value);
788 else
789 sim_queue_mem_si_write (current_cpu, address, value);
790 frv_set_write_queue_slot (current_cpu);
793 void
794 frvbf_write_mem_WI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
796 frvbf_write_mem_SI (current_cpu, pc, address, value);
799 void
800 frvbf_write_mem_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
802 USI hsr0;
803 hsr0 = GET_HSR0 ();
804 if (GET_HSR0_DCE (hsr0))
805 sim_queue_fn_mem_di_write (current_cpu, frvbf_mem_set_DI, address, value);
806 else
807 sim_queue_mem_di_write (current_cpu, address, value);
808 frv_set_write_queue_slot (current_cpu);
811 void
812 frvbf_write_mem_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
814 USI hsr0;
815 hsr0 = GET_HSR0 ();
816 if (GET_HSR0_DCE (hsr0))
817 sim_queue_fn_mem_df_write (current_cpu, frvbf_mem_set_DF, address, value);
818 else
819 sim_queue_mem_df_write (current_cpu, address, value);
820 frv_set_write_queue_slot (current_cpu);
823 /* Memory writes. These do the actual writing through the cache. */
824 void
825 frvbf_mem_set_QI (SIM_CPU *current_cpu, IADDR pc, SI address, QI value)
827 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
829 /* Check for access errors. */
830 address = check_write_address (current_cpu, address, 0);
831 address = check_readwrite_address (current_cpu, address, 0);
833 /* If we need to count cycles, then submit the write request to the cache
834 and let it prioritize the request. Otherwise perform the write now. */
835 if (model_insn)
837 int slot = UNIT_I0;
838 frv_cache_request_store (cache, address, slot, (char *)&value,
839 sizeof (value));
841 else
842 frv_cache_write (cache, address, (char *)&value, sizeof (value));
845 /* Write a HI which spans two cache lines */
846 static void
847 mem_set_unaligned_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
849 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
850 /* value is already in target byte order */
851 frv_cache_write (cache, address, (char *)&value, 1);
852 frv_cache_write (cache, address + 1, ((char *)&value + 1), 1);
855 void
856 frvbf_mem_set_HI (SIM_CPU *current_cpu, IADDR pc, SI address, HI value)
858 FRV_CACHE *cache;
860 /* Check for access errors. */
861 address = check_write_address (current_cpu, address, 1);
862 address = check_readwrite_address (current_cpu, address, 1);
864 /* If we need to count cycles, then submit the write request to the cache
865 and let it prioritize the request. Otherwise perform the write now. */
866 value = H2T_2 (value);
867 cache = CPU_DATA_CACHE (current_cpu);
868 if (model_insn)
870 int slot = UNIT_I0;
871 frv_cache_request_store (cache, address, slot,
872 (char *)&value, sizeof (value));
874 else
876 /* Handle access which crosses cache line boundary */
877 SIM_DESC sd = CPU_STATE (current_cpu);
878 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
880 if (DATA_CROSSES_CACHE_LINE (cache, address, 2))
882 mem_set_unaligned_HI (current_cpu, pc, address, value);
883 return;
886 frv_cache_write (cache, address, (char *)&value, sizeof (value));
890 /* Write a SI which spans two cache lines */
891 static void
892 mem_set_unaligned_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
894 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
895 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
896 /* value is already in target byte order */
897 frv_cache_write (cache, address, (char *)&value, hi_len);
898 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 4 - hi_len);
901 void
902 frvbf_mem_set_SI (SIM_CPU *current_cpu, IADDR pc, SI address, SI value)
904 FRV_CACHE *cache;
906 /* Check for access errors. */
907 address = check_write_address (current_cpu, address, 3);
908 address = check_readwrite_address (current_cpu, address, 3);
910 /* If we need to count cycles, then submit the write request to the cache
911 and let it prioritize the request. Otherwise perform the write now. */
912 cache = CPU_DATA_CACHE (current_cpu);
913 value = H2T_4 (value);
914 if (model_insn)
916 int slot = UNIT_I0;
917 frv_cache_request_store (cache, address, slot,
918 (char *)&value, sizeof (value));
920 else
922 /* Handle access which crosses cache line boundary */
923 SIM_DESC sd = CPU_STATE (current_cpu);
924 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
926 if (DATA_CROSSES_CACHE_LINE (cache, address, 4))
928 mem_set_unaligned_SI (current_cpu, pc, address, value);
929 return;
932 frv_cache_write (cache, address, (char *)&value, sizeof (value));
936 /* Write a DI which spans two cache lines */
937 static void
938 mem_set_unaligned_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
940 FRV_CACHE *cache = CPU_DATA_CACHE (current_cpu);
941 unsigned hi_len = cache->line_size - (address & (cache->line_size - 1));
942 /* value is already in target byte order */
943 frv_cache_write (cache, address, (char *)&value, hi_len);
944 frv_cache_write (cache, address + hi_len, (char *)&value + hi_len, 8 - hi_len);
947 void
948 frvbf_mem_set_DI (SIM_CPU *current_cpu, IADDR pc, SI address, DI value)
950 FRV_CACHE *cache;
952 /* Check for access errors. */
953 address = check_write_address (current_cpu, address, 7);
954 address = check_readwrite_address (current_cpu, address, 7);
956 /* If we need to count cycles, then submit the write request to the cache
957 and let it prioritize the request. Otherwise perform the write now. */
958 value = H2T_8 (value);
959 cache = CPU_DATA_CACHE (current_cpu);
960 if (model_insn)
962 int slot = UNIT_I0;
963 frv_cache_request_store (cache, address, slot,
964 (char *)&value, sizeof (value));
966 else
968 /* Handle access which crosses cache line boundary */
969 SIM_DESC sd = CPU_STATE (current_cpu);
970 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
972 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
974 mem_set_unaligned_DI (current_cpu, pc, address, value);
975 return;
978 frv_cache_write (cache, address, (char *)&value, sizeof (value));
982 void
983 frvbf_mem_set_DF (SIM_CPU *current_cpu, IADDR pc, SI address, DF value)
985 FRV_CACHE *cache;
987 /* Check for access errors. */
988 address = check_write_address (current_cpu, address, 7);
989 address = check_readwrite_address (current_cpu, address, 7);
991 /* If we need to count cycles, then submit the write request to the cache
992 and let it prioritize the request. Otherwise perform the write now. */
993 value = H2T_8 (value);
994 cache = CPU_DATA_CACHE (current_cpu);
995 if (model_insn)
997 int slot = UNIT_I0;
998 frv_cache_request_store (cache, address, slot,
999 (char *)&value, sizeof (value));
1001 else
1003 /* Handle access which crosses cache line boundary */
1004 SIM_DESC sd = CPU_STATE (current_cpu);
1005 if (STATE_ARCHITECTURE (sd)->mach == bfd_mach_fr550)
1007 if (DATA_CROSSES_CACHE_LINE (cache, address, 8))
1009 mem_set_unaligned_DI (current_cpu, pc, address, value);
1010 return;
1013 frv_cache_write (cache, address, (char *)&value, sizeof (value));
1017 void
1018 frvbf_mem_set_XI (SIM_CPU *current_cpu, IADDR pc, SI address, SI *value)
1020 int i;
1021 FRV_CACHE *cache;
1023 /* Check for access errors. */
1024 address = check_write_address (current_cpu, address, 0xf);
1025 address = check_readwrite_address (current_cpu, address, 0xf);
1027 /* TODO -- reverse word order as well? */
1028 for (i = 0; i < 4; ++i)
1029 value[i] = H2T_4 (value[i]);
1031 /* If we need to count cycles, then submit the write request to the cache
1032 and let it prioritize the request. Otherwise perform the write now. */
1033 cache = CPU_DATA_CACHE (current_cpu);
1034 if (model_insn)
1036 int slot = UNIT_I0;
1037 frv_cache_request_store (cache, address, slot, (char*)value, 16);
1039 else
1040 frv_cache_write (cache, address, (char*)value, 16);
1043 /* Record the current VLIW slot on the element at the top of the write queue.
1045 void
1046 frv_set_write_queue_slot (SIM_CPU *current_cpu)
1048 FRV_VLIW *vliw = CPU_VLIW (current_cpu);
1049 int slot = vliw->next_slot - 1;
1050 CGEN_WRITE_QUEUE *q = CPU_WRITE_QUEUE (current_cpu);
1051 int ix = CGEN_WRITE_QUEUE_INDEX (q) - 1;
1052 CGEN_WRITE_QUEUE_ELEMENT *item = CGEN_WRITE_QUEUE_ELEMENT (q, ix);
1053 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item) = (*vliw->current_vliw)[slot];