2 Copyright (C) 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
5 This file is part of the GNU simulators.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2, or (at your option)
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License along
18 with this program; if not, write to the Free Software Foundation, Inc.,
19 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
21 #define WANT_CPU frvbf
22 #define WANT_CPU_FRVBF
28 /* Check for alignment and access restrictions. Return the corrected address.
31 fr400_check_data_read_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
33 /* Check access restrictions for double word loads only. */
36 if ((USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfeffffff)
37 frv_queue_data_access_error_interrupt (current_cpu
, address
);
43 fr500_check_data_read_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
45 if (address
& align_mask
)
47 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
48 address
&= ~align_mask
;
51 if ((USI
)address
>= 0xfeff0600 && (USI
)address
<= 0xfeff7fff
52 || (USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfefeffff)
53 frv_queue_data_access_error_interrupt (current_cpu
, address
);
59 fr550_check_data_read_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
61 if ((USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfefeffff
63 && ((USI
)address
>= 0xfeff0000 && (USI
)address
<= 0xfeffffff)))
64 frv_queue_data_access_error_interrupt (current_cpu
, address
);
70 check_data_read_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
72 SIM_DESC sd
= CPU_STATE (current_cpu
);
73 switch (STATE_ARCHITECTURE (sd
)->mach
)
77 address
= fr400_check_data_read_address (current_cpu
, address
,
80 case bfd_mach_frvtomcat
:
83 address
= fr500_check_data_read_address (current_cpu
, address
,
87 address
= fr550_check_data_read_address (current_cpu
, address
,
98 fr400_check_readwrite_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
100 if (address
& align_mask
)
102 /* Make sure that this exception is not masked. */
103 USI isr
= GET_ISR ();
104 if (! GET_ISR_EMAM (isr
))
106 /* Bad alignment causes a data_access_error on fr400. */
107 frv_queue_data_access_error_interrupt (current_cpu
, address
);
109 address
&= ~align_mask
;
111 /* Nothing to check. */
116 fr500_check_readwrite_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
118 if ((USI
)address
>= 0xfe000000 && (USI
)address
<= 0xfe003fff
119 || (USI
)address
>= 0xfe004000 && (USI
)address
<= 0xfe3fffff
120 || (USI
)address
>= 0xfe400000 && (USI
)address
<= 0xfe403fff
121 || (USI
)address
>= 0xfe404000 && (USI
)address
<= 0xfe7fffff)
122 frv_queue_data_access_exception_interrupt (current_cpu
);
128 fr550_check_readwrite_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
130 /* No alignment restrictions on fr550 */
132 if ((USI
)address
>= 0xfe000000 && (USI
)address
<= 0xfe3fffff
133 || (USI
)address
>= 0xfe408000 && (USI
)address
<= 0xfe7fffff)
134 frv_queue_data_access_exception_interrupt (current_cpu
);
137 USI hsr0
= GET_HSR0 ();
138 if (! GET_HSR0_RME (hsr0
)
139 && (USI
)address
>= 0xfe400000 && (USI
)address
<= 0xfe407fff)
140 frv_queue_data_access_exception_interrupt (current_cpu
);
147 check_readwrite_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
149 SIM_DESC sd
= CPU_STATE (current_cpu
);
150 switch (STATE_ARCHITECTURE (sd
)->mach
)
154 address
= fr400_check_readwrite_address (current_cpu
, address
,
157 case bfd_mach_frvtomcat
:
160 address
= fr500_check_readwrite_address (current_cpu
, address
,
164 address
= fr550_check_readwrite_address (current_cpu
, address
,
175 fr400_check_insn_read_address (SIM_CPU
*current_cpu
, PCADDR address
,
178 if (address
& align_mask
)
180 frv_queue_instruction_access_error_interrupt (current_cpu
);
181 address
&= ~align_mask
;
183 else if ((USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfeffffff)
184 frv_queue_instruction_access_error_interrupt (current_cpu
);
190 fr500_check_insn_read_address (SIM_CPU
*current_cpu
, PCADDR address
,
193 if (address
& align_mask
)
195 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
196 address
&= ~align_mask
;
199 if ((USI
)address
>= 0xfeff0600 && (USI
)address
<= 0xfeff7fff
200 || (USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfefeffff)
201 frv_queue_instruction_access_error_interrupt (current_cpu
);
202 else if ((USI
)address
>= 0xfe004000 && (USI
)address
<= 0xfe3fffff
203 || (USI
)address
>= 0xfe400000 && (USI
)address
<= 0xfe403fff
204 || (USI
)address
>= 0xfe404000 && (USI
)address
<= 0xfe7fffff)
205 frv_queue_instruction_access_exception_interrupt (current_cpu
);
208 USI hsr0
= GET_HSR0 ();
209 if (! GET_HSR0_RME (hsr0
)
210 && (USI
)address
>= 0xfe000000 && (USI
)address
<= 0xfe003fff)
211 frv_queue_instruction_access_exception_interrupt (current_cpu
);
218 fr550_check_insn_read_address (SIM_CPU
*current_cpu
, PCADDR address
,
221 address
&= ~align_mask
;
223 if ((USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfeffffff)
224 frv_queue_instruction_access_error_interrupt (current_cpu
);
225 else if ((USI
)address
>= 0xfe008000 && (USI
)address
<= 0xfe7fffff)
226 frv_queue_instruction_access_exception_interrupt (current_cpu
);
229 USI hsr0
= GET_HSR0 ();
230 if (! GET_HSR0_RME (hsr0
)
231 && (USI
)address
>= 0xfe000000 && (USI
)address
<= 0xfe007fff)
232 frv_queue_instruction_access_exception_interrupt (current_cpu
);
239 check_insn_read_address (SIM_CPU
*current_cpu
, PCADDR address
, int align_mask
)
241 SIM_DESC sd
= CPU_STATE (current_cpu
);
242 switch (STATE_ARCHITECTURE (sd
)->mach
)
246 address
= fr400_check_insn_read_address (current_cpu
, address
,
249 case bfd_mach_frvtomcat
:
252 address
= fr500_check_insn_read_address (current_cpu
, address
,
256 address
= fr550_check_insn_read_address (current_cpu
, address
,
268 frvbf_read_mem_QI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
270 USI hsr0
= GET_HSR0 ();
271 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
273 /* Check for access exceptions. */
274 address
= check_data_read_address (current_cpu
, address
, 0);
275 address
= check_readwrite_address (current_cpu
, address
, 0);
277 /* If we need to count cycles, then the cache operation will be
278 initiated from the model profiling functions.
279 See frvbf_model_.... */
282 CPU_LOAD_ADDRESS (current_cpu
) = address
;
283 CPU_LOAD_LENGTH (current_cpu
) = 1;
284 CPU_LOAD_SIGNED (current_cpu
) = 1;
285 return 0xb7; /* any random value */
288 if (GET_HSR0_DCE (hsr0
))
291 cycles
= frv_cache_read (cache
, 0, address
);
293 return CACHE_RETURN_DATA (cache
, 0, address
, QI
, 1);
296 return GETMEMQI (current_cpu
, pc
, address
);
300 frvbf_read_mem_UQI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
302 USI hsr0
= GET_HSR0 ();
303 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
305 /* Check for access exceptions. */
306 address
= check_data_read_address (current_cpu
, address
, 0);
307 address
= check_readwrite_address (current_cpu
, address
, 0);
309 /* If we need to count cycles, then the cache operation will be
310 initiated from the model profiling functions.
311 See frvbf_model_.... */
314 CPU_LOAD_ADDRESS (current_cpu
) = address
;
315 CPU_LOAD_LENGTH (current_cpu
) = 1;
316 CPU_LOAD_SIGNED (current_cpu
) = 0;
317 return 0xb7; /* any random value */
320 if (GET_HSR0_DCE (hsr0
))
323 cycles
= frv_cache_read (cache
, 0, address
);
325 return CACHE_RETURN_DATA (cache
, 0, address
, UQI
, 1);
328 return GETMEMUQI (current_cpu
, pc
, address
);
331 /* Read a HI which spans two cache lines */
333 read_mem_unaligned_HI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
335 HI value
= frvbf_read_mem_QI (current_cpu
, pc
, address
);
337 value
|= frvbf_read_mem_UQI (current_cpu
, pc
, address
+ 1);
338 return T2H_2 (value
);
342 frvbf_read_mem_HI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
347 /* Check for access exceptions. */
348 address
= check_data_read_address (current_cpu
, address
, 1);
349 address
= check_readwrite_address (current_cpu
, address
, 1);
351 /* If we need to count cycles, then the cache operation will be
352 initiated from the model profiling functions.
353 See frvbf_model_.... */
355 cache
= CPU_DATA_CACHE (current_cpu
);
358 CPU_LOAD_ADDRESS (current_cpu
) = address
;
359 CPU_LOAD_LENGTH (current_cpu
) = 2;
360 CPU_LOAD_SIGNED (current_cpu
) = 1;
361 return 0xb711; /* any random value */
364 if (GET_HSR0_DCE (hsr0
))
367 /* Handle access which crosses cache line boundary */
368 SIM_DESC sd
= CPU_STATE (current_cpu
);
369 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
371 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 2))
372 return read_mem_unaligned_HI (current_cpu
, pc
, address
);
374 cycles
= frv_cache_read (cache
, 0, address
);
376 return CACHE_RETURN_DATA (cache
, 0, address
, HI
, 2);
379 return GETMEMHI (current_cpu
, pc
, address
);
383 frvbf_read_mem_UHI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
388 /* Check for access exceptions. */
389 address
= check_data_read_address (current_cpu
, address
, 1);
390 address
= check_readwrite_address (current_cpu
, address
, 1);
392 /* If we need to count cycles, then the cache operation will be
393 initiated from the model profiling functions.
394 See frvbf_model_.... */
396 cache
= CPU_DATA_CACHE (current_cpu
);
399 CPU_LOAD_ADDRESS (current_cpu
) = address
;
400 CPU_LOAD_LENGTH (current_cpu
) = 2;
401 CPU_LOAD_SIGNED (current_cpu
) = 0;
402 return 0xb711; /* any random value */
405 if (GET_HSR0_DCE (hsr0
))
408 /* Handle access which crosses cache line boundary */
409 SIM_DESC sd
= CPU_STATE (current_cpu
);
410 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
412 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 2))
413 return read_mem_unaligned_HI (current_cpu
, pc
, address
);
415 cycles
= frv_cache_read (cache
, 0, address
);
417 return CACHE_RETURN_DATA (cache
, 0, address
, UHI
, 2);
420 return GETMEMUHI (current_cpu
, pc
, address
);
423 /* Read a SI which spans two cache lines */
425 read_mem_unaligned_SI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
427 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
428 unsigned hi_len
= cache
->line_size
- (address
& (cache
->line_size
- 1));
436 valarray
[0] = frvbf_read_mem_QI (current_cpu
, pc
, address
);
437 SIvalue
= frvbf_read_mem_SI (current_cpu
, pc
, address
+ 1);
438 SIvalue
= H2T_4 (SIvalue
);
439 memcpy (valarray
+ 1, (char*)&SIvalue
, 3);
442 HIvalue
= frvbf_read_mem_HI (current_cpu
, pc
, address
);
443 HIvalue
= H2T_2 (HIvalue
);
444 memcpy (valarray
, (char*)&HIvalue
, 2);
445 HIvalue
= frvbf_read_mem_HI (current_cpu
, pc
, address
+ 2);
446 HIvalue
= H2T_2 (HIvalue
);
447 memcpy (valarray
+ 2, (char*)&HIvalue
, 2);
450 SIvalue
= frvbf_read_mem_SI (current_cpu
, pc
, address
- 1);
451 SIvalue
= H2T_4 (SIvalue
);
452 memcpy (valarray
, (char*)&SIvalue
, 3);
453 valarray
[3] = frvbf_read_mem_QI (current_cpu
, pc
, address
+ 3);
456 abort (); /* can't happen */
458 return T2H_4 (*(SI
*)valarray
);
462 frvbf_read_mem_SI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
467 /* Check for access exceptions. */
468 address
= check_data_read_address (current_cpu
, address
, 3);
469 address
= check_readwrite_address (current_cpu
, address
, 3);
472 cache
= CPU_DATA_CACHE (current_cpu
);
473 /* If we need to count cycles, then the cache operation will be
474 initiated from the model profiling functions.
475 See frvbf_model_.... */
478 CPU_LOAD_ADDRESS (current_cpu
) = address
;
479 CPU_LOAD_LENGTH (current_cpu
) = 4;
480 return 0x37111319; /* any random value */
483 if (GET_HSR0_DCE (hsr0
))
486 /* Handle access which crosses cache line boundary */
487 SIM_DESC sd
= CPU_STATE (current_cpu
);
488 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
490 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 4))
491 return read_mem_unaligned_SI (current_cpu
, pc
, address
);
493 cycles
= frv_cache_read (cache
, 0, address
);
495 return CACHE_RETURN_DATA (cache
, 0, address
, SI
, 4);
498 return GETMEMSI (current_cpu
, pc
, address
);
502 frvbf_read_mem_WI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
504 return frvbf_read_mem_SI (current_cpu
, pc
, address
);
507 /* Read a SI which spans two cache lines */
509 read_mem_unaligned_DI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
511 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
512 unsigned hi_len
= cache
->line_size
- (address
& (cache
->line_size
- 1));
518 value
= frvbf_read_mem_QI (current_cpu
, pc
, address
);
520 value1
= frvbf_read_mem_DI (current_cpu
, pc
, address
+ 1);
521 value1
= H2T_8 (value1
);
522 value
|= value1
& ((DI
)0x00ffffff << 32);
523 value
|= value1
& 0xffffffffu
;
526 value
= frvbf_read_mem_HI (current_cpu
, pc
, address
);
527 value
= H2T_2 (value
);
529 value1
= frvbf_read_mem_DI (current_cpu
, pc
, address
+ 2);
530 value1
= H2T_8 (value1
);
531 value
|= value1
& ((DI
)0x0000ffff << 32);
532 value
|= value1
& 0xffffffffu
;
535 value
= frvbf_read_mem_SI (current_cpu
, pc
, address
- 1);
536 value
= H2T_4 (value
);
538 value1
= frvbf_read_mem_DI (current_cpu
, pc
, address
+ 3);
539 value1
= H2T_8 (value1
);
540 value
|= value1
& ((DI
)0x000000ff << 32);
541 value
|= value1
& 0xffffffffu
;
544 value
= frvbf_read_mem_SI (current_cpu
, pc
, address
);
545 value
= H2T_4 (value
);
547 value1
= frvbf_read_mem_SI (current_cpu
, pc
, address
+ 4);
548 value1
= H2T_4 (value1
);
549 value
|= value1
& 0xffffffffu
;
552 value
= frvbf_read_mem_DI (current_cpu
, pc
, address
- 3);
553 value
= H2T_8 (value
);
555 value1
= frvbf_read_mem_SI (current_cpu
, pc
, address
+ 5);
556 value1
= H2T_4 (value1
);
557 value
|= value1
& 0x00ffffff;
560 value
= frvbf_read_mem_DI (current_cpu
, pc
, address
- 2);
561 value
= H2T_8 (value
);
563 value1
= frvbf_read_mem_HI (current_cpu
, pc
, address
+ 6);
564 value1
= H2T_2 (value1
);
565 value
|= value1
& 0x0000ffff;
568 value
= frvbf_read_mem_DI (current_cpu
, pc
, address
- 1);
569 value
= H2T_8 (value
);
571 value1
= frvbf_read_mem_QI (current_cpu
, pc
, address
+ 7);
572 value
|= value1
& 0x000000ff;
575 abort (); /* can't happen */
577 return T2H_8 (value
);
581 frvbf_read_mem_DI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
586 /* Check for access exceptions. */
587 address
= check_data_read_address (current_cpu
, address
, 7);
588 address
= check_readwrite_address (current_cpu
, address
, 7);
590 /* If we need to count cycles, then the cache operation will be
591 initiated from the model profiling functions.
592 See frvbf_model_.... */
594 cache
= CPU_DATA_CACHE (current_cpu
);
597 CPU_LOAD_ADDRESS (current_cpu
) = address
;
598 CPU_LOAD_LENGTH (current_cpu
) = 8;
599 return 0x37111319; /* any random value */
602 if (GET_HSR0_DCE (hsr0
))
605 /* Handle access which crosses cache line boundary */
606 SIM_DESC sd
= CPU_STATE (current_cpu
);
607 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
609 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 8))
610 return read_mem_unaligned_DI (current_cpu
, pc
, address
);
612 cycles
= frv_cache_read (cache
, 0, address
);
614 return CACHE_RETURN_DATA (cache
, 0, address
, DI
, 8);
617 return GETMEMDI (current_cpu
, pc
, address
);
621 frvbf_read_mem_DF (SIM_CPU
*current_cpu
, IADDR pc
, SI address
)
626 /* Check for access exceptions. */
627 address
= check_data_read_address (current_cpu
, address
, 7);
628 address
= check_readwrite_address (current_cpu
, address
, 7);
630 /* If we need to count cycles, then the cache operation will be
631 initiated from the model profiling functions.
632 See frvbf_model_.... */
634 cache
= CPU_DATA_CACHE (current_cpu
);
637 CPU_LOAD_ADDRESS (current_cpu
) = address
;
638 CPU_LOAD_LENGTH (current_cpu
) = 8;
639 return 0x37111319; /* any random value */
642 if (GET_HSR0_DCE (hsr0
))
645 /* Handle access which crosses cache line boundary */
646 SIM_DESC sd
= CPU_STATE (current_cpu
);
647 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
649 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 8))
650 return read_mem_unaligned_DI (current_cpu
, pc
, address
);
652 cycles
= frv_cache_read (cache
, 0, address
);
654 return CACHE_RETURN_DATA (cache
, 0, address
, DF
, 8);
657 return GETMEMDF (current_cpu
, pc
, address
);
661 frvbf_read_imem_USI (SIM_CPU
*current_cpu
, PCADDR vpc
)
664 vpc
= check_insn_read_address (current_cpu
, vpc
, 3);
667 if (GET_HSR0_ICE (hsr0
))
672 /* We don't want this to show up in the cache statistics. That read
673 is done in frvbf_simulate_insn_prefetch. So read the cache or memory
675 cache
= CPU_INSN_CACHE (current_cpu
);
676 if (frv_cache_read_passive_SI (cache
, vpc
, &value
))
679 return sim_core_read_unaligned_4 (current_cpu
, vpc
, read_map
, vpc
);
683 fr400_check_write_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
686 && address
>= 0xfe800000 && address
<= 0xfeffffff)
687 frv_queue_program_interrupt (current_cpu
, FRV_DATA_STORE_ERROR
);
693 fr500_check_write_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
695 if (address
& align_mask
)
697 struct frv_interrupt_queue_element
*item
=
698 frv_queue_mem_address_not_aligned_interrupt (current_cpu
, address
);
699 /* Record the correct vliw slot with the interrupt. */
701 item
->slot
= frv_interrupt_state
.slot
;
702 address
&= ~align_mask
;
704 if (address
>= 0xfeff0600 && address
<= 0xfeff7fff
705 || address
>= 0xfe800000 && address
<= 0xfefeffff)
706 frv_queue_program_interrupt (current_cpu
, FRV_DATA_STORE_ERROR
);
712 fr550_check_write_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
714 if ((USI
)address
>= 0xfe800000 && (USI
)address
<= 0xfefeffff
716 && ((USI
)address
>= 0xfeff0000 && (USI
)address
<= 0xfeffffff)))
717 frv_queue_program_interrupt (current_cpu
, FRV_DATA_STORE_ERROR
);
723 check_write_address (SIM_CPU
*current_cpu
, SI address
, int align_mask
)
725 SIM_DESC sd
= CPU_STATE (current_cpu
);
726 switch (STATE_ARCHITECTURE (sd
)->mach
)
730 address
= fr400_check_write_address (current_cpu
, address
, align_mask
);
732 case bfd_mach_frvtomcat
:
735 address
= fr500_check_write_address (current_cpu
, address
, align_mask
);
738 address
= fr550_check_write_address (current_cpu
, address
, align_mask
);
747 frvbf_write_mem_QI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, QI value
)
751 if (GET_HSR0_DCE (hsr0
))
752 sim_queue_fn_mem_qi_write (current_cpu
, frvbf_mem_set_QI
, address
, value
);
754 sim_queue_mem_qi_write (current_cpu
, address
, value
);
755 frv_set_write_queue_slot (current_cpu
);
759 frvbf_write_mem_UQI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, UQI value
)
761 frvbf_write_mem_QI (current_cpu
, pc
, address
, value
);
765 frvbf_write_mem_HI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, HI value
)
769 if (GET_HSR0_DCE (hsr0
))
770 sim_queue_fn_mem_hi_write (current_cpu
, frvbf_mem_set_HI
, address
, value
);
772 sim_queue_mem_hi_write (current_cpu
, address
, value
);
773 frv_set_write_queue_slot (current_cpu
);
777 frvbf_write_mem_UHI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, UHI value
)
779 frvbf_write_mem_HI (current_cpu
, pc
, address
, value
);
783 frvbf_write_mem_SI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, SI value
)
787 if (GET_HSR0_DCE (hsr0
))
788 sim_queue_fn_mem_si_write (current_cpu
, frvbf_mem_set_SI
, address
, value
);
790 sim_queue_mem_si_write (current_cpu
, address
, value
);
791 frv_set_write_queue_slot (current_cpu
);
795 frvbf_write_mem_WI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, SI value
)
797 frvbf_write_mem_SI (current_cpu
, pc
, address
, value
);
801 frvbf_write_mem_DI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, DI value
)
805 if (GET_HSR0_DCE (hsr0
))
806 sim_queue_fn_mem_di_write (current_cpu
, frvbf_mem_set_DI
, address
, value
);
808 sim_queue_mem_di_write (current_cpu
, address
, value
);
809 frv_set_write_queue_slot (current_cpu
);
813 frvbf_write_mem_DF (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, DF value
)
817 if (GET_HSR0_DCE (hsr0
))
818 sim_queue_fn_mem_df_write (current_cpu
, frvbf_mem_set_DF
, address
, value
);
820 sim_queue_mem_df_write (current_cpu
, address
, value
);
821 frv_set_write_queue_slot (current_cpu
);
824 /* Memory writes. These do the actual writing through the cache. */
826 frvbf_mem_set_QI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, QI value
)
828 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
830 /* Check for access errors. */
831 address
= check_write_address (current_cpu
, address
, 0);
832 address
= check_readwrite_address (current_cpu
, address
, 0);
834 /* If we need to count cycles, then submit the write request to the cache
835 and let it prioritize the request. Otherwise perform the write now. */
839 frv_cache_request_store (cache
, address
, slot
, (char *)&value
,
843 frv_cache_write (cache
, address
, (char *)&value
, sizeof (value
));
846 /* Write a HI which spans two cache lines */
848 mem_set_unaligned_HI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, HI value
)
850 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
851 /* value is already in target byte order */
852 frv_cache_write (cache
, address
, (char *)&value
, 1);
853 frv_cache_write (cache
, address
+ 1, ((char *)&value
+ 1), 1);
857 frvbf_mem_set_HI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, HI value
)
861 /* Check for access errors. */
862 address
= check_write_address (current_cpu
, address
, 1);
863 address
= check_readwrite_address (current_cpu
, address
, 1);
865 /* If we need to count cycles, then submit the write request to the cache
866 and let it prioritize the request. Otherwise perform the write now. */
867 value
= H2T_2 (value
);
868 cache
= CPU_DATA_CACHE (current_cpu
);
872 frv_cache_request_store (cache
, address
, slot
,
873 (char *)&value
, sizeof (value
));
877 /* Handle access which crosses cache line boundary */
878 SIM_DESC sd
= CPU_STATE (current_cpu
);
879 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
881 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 2))
883 mem_set_unaligned_HI (current_cpu
, pc
, address
, value
);
887 frv_cache_write (cache
, address
, (char *)&value
, sizeof (value
));
891 /* Write a SI which spans two cache lines */
893 mem_set_unaligned_SI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, SI value
)
895 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
896 unsigned hi_len
= cache
->line_size
- (address
& (cache
->line_size
- 1));
897 /* value is already in target byte order */
898 frv_cache_write (cache
, address
, (char *)&value
, hi_len
);
899 frv_cache_write (cache
, address
+ hi_len
, (char *)&value
+ hi_len
, 4 - hi_len
);
903 frvbf_mem_set_SI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, SI value
)
907 /* Check for access errors. */
908 address
= check_write_address (current_cpu
, address
, 3);
909 address
= check_readwrite_address (current_cpu
, address
, 3);
911 /* If we need to count cycles, then submit the write request to the cache
912 and let it prioritize the request. Otherwise perform the write now. */
913 cache
= CPU_DATA_CACHE (current_cpu
);
914 value
= H2T_4 (value
);
918 frv_cache_request_store (cache
, address
, slot
,
919 (char *)&value
, sizeof (value
));
923 /* Handle access which crosses cache line boundary */
924 SIM_DESC sd
= CPU_STATE (current_cpu
);
925 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
927 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 4))
929 mem_set_unaligned_SI (current_cpu
, pc
, address
, value
);
933 frv_cache_write (cache
, address
, (char *)&value
, sizeof (value
));
937 /* Write a DI which spans two cache lines */
939 mem_set_unaligned_DI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, DI value
)
941 FRV_CACHE
*cache
= CPU_DATA_CACHE (current_cpu
);
942 unsigned hi_len
= cache
->line_size
- (address
& (cache
->line_size
- 1));
943 /* value is already in target byte order */
944 frv_cache_write (cache
, address
, (char *)&value
, hi_len
);
945 frv_cache_write (cache
, address
+ hi_len
, (char *)&value
+ hi_len
, 8 - hi_len
);
949 frvbf_mem_set_DI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, DI value
)
953 /* Check for access errors. */
954 address
= check_write_address (current_cpu
, address
, 7);
955 address
= check_readwrite_address (current_cpu
, address
, 7);
957 /* If we need to count cycles, then submit the write request to the cache
958 and let it prioritize the request. Otherwise perform the write now. */
959 value
= H2T_8 (value
);
960 cache
= CPU_DATA_CACHE (current_cpu
);
964 frv_cache_request_store (cache
, address
, slot
,
965 (char *)&value
, sizeof (value
));
969 /* Handle access which crosses cache line boundary */
970 SIM_DESC sd
= CPU_STATE (current_cpu
);
971 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
973 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 8))
975 mem_set_unaligned_DI (current_cpu
, pc
, address
, value
);
979 frv_cache_write (cache
, address
, (char *)&value
, sizeof (value
));
984 frvbf_mem_set_DF (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, DF value
)
988 /* Check for access errors. */
989 address
= check_write_address (current_cpu
, address
, 7);
990 address
= check_readwrite_address (current_cpu
, address
, 7);
992 /* If we need to count cycles, then submit the write request to the cache
993 and let it prioritize the request. Otherwise perform the write now. */
994 value
= H2T_8 (value
);
995 cache
= CPU_DATA_CACHE (current_cpu
);
999 frv_cache_request_store (cache
, address
, slot
,
1000 (char *)&value
, sizeof (value
));
1004 /* Handle access which crosses cache line boundary */
1005 SIM_DESC sd
= CPU_STATE (current_cpu
);
1006 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr550
)
1008 if (DATA_CROSSES_CACHE_LINE (cache
, address
, 8))
1010 mem_set_unaligned_DI (current_cpu
, pc
, address
, value
);
1014 frv_cache_write (cache
, address
, (char *)&value
, sizeof (value
));
1019 frvbf_mem_set_XI (SIM_CPU
*current_cpu
, IADDR pc
, SI address
, SI
*value
)
1024 /* Check for access errors. */
1025 address
= check_write_address (current_cpu
, address
, 0xf);
1026 address
= check_readwrite_address (current_cpu
, address
, 0xf);
1028 /* TODO -- reverse word order as well? */
1029 for (i
= 0; i
< 4; ++i
)
1030 value
[i
] = H2T_4 (value
[i
]);
1032 /* If we need to count cycles, then submit the write request to the cache
1033 and let it prioritize the request. Otherwise perform the write now. */
1034 cache
= CPU_DATA_CACHE (current_cpu
);
1038 frv_cache_request_store (cache
, address
, slot
, (char*)value
, 16);
1041 frv_cache_write (cache
, address
, (char*)value
, 16);
1044 /* Record the current VLIW slot on the element at the top of the write queue.
1047 frv_set_write_queue_slot (SIM_CPU
*current_cpu
)
1049 FRV_VLIW
*vliw
= CPU_VLIW (current_cpu
);
1050 int slot
= vliw
->next_slot
- 1;
1051 CGEN_WRITE_QUEUE
*q
= CPU_WRITE_QUEUE (current_cpu
);
1052 int ix
= CGEN_WRITE_QUEUE_INDEX (q
) - 1;
1053 CGEN_WRITE_QUEUE_ELEMENT
*item
= CGEN_WRITE_QUEUE_ELEMENT (q
, ix
);
1054 CGEN_WRITE_QUEUE_ELEMENT_PIPE (item
) = (*vliw
->current_vliw
)[slot
];