1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998-2020 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #define WANT_CPU_FRVBF
28 #if WITH_PROFILE_MODEL_P
31 #include "profile-fr400.h"
32 #include "profile-fr500.h"
33 #include "profile-fr550.h"
36 reset_gr_flags (SIM_CPU
*cpu
, INT gr
)
38 SIM_DESC sd
= CPU_STATE (cpu
);
39 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
40 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
41 fr400_reset_gr_flags (cpu
, gr
);
42 /* Other machines have no gr flags right now. */
46 reset_fr_flags (SIM_CPU
*cpu
, INT fr
)
48 SIM_DESC sd
= CPU_STATE (cpu
);
49 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
50 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
51 fr400_reset_fr_flags (cpu
, fr
);
52 else if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
53 fr500_reset_fr_flags (cpu
, fr
);
57 reset_acc_flags (SIM_CPU
*cpu
, INT acc
)
59 SIM_DESC sd
= CPU_STATE (cpu
);
60 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
61 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
62 fr400_reset_acc_flags (cpu
, acc
);
63 /* Other machines have no acc flags right now. */
67 reset_cc_flags (SIM_CPU
*cpu
, INT cc
)
69 SIM_DESC sd
= CPU_STATE (cpu
);
70 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
71 fr500_reset_cc_flags (cpu
, cc
);
72 /* Other machines have no cc flags. */
76 set_use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
80 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
81 reset_gr_flags (cpu
, gr
);
82 ps
->cur_gr_complex
|= (((DI
)1) << gr
);
87 set_use_not_gr_complex (SIM_CPU
*cpu
, INT gr
)
91 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
92 ps
->cur_gr_complex
&= ~(((DI
)1) << gr
);
97 use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
101 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
102 return ps
->cur_gr_complex
& (((DI
)1) << gr
);
107 /* Globals flag indicates whether this insn is being modeled. */
108 enum FRV_INSN_MODELING model_insn
= FRV_INSN_NO_MODELING
;
110 /* static buffer for the name of the currently most restrictive hazard. */
111 static char hazard_name
[100] = "";
113 /* Print information about the wait applied to an entire VLIW insn. */
114 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer
[]
116 {1, NO_REQNO
}, {1, NO_REQNO
} /* init with impossible address. */
128 /* A queue of load requests from the data cache. Use to keep track of loads
129 which are still pending. */
130 /* TODO -- some of these are mutually exclusive and can use a union. */
145 enum cache_request request
;
146 } CACHE_QUEUE_ELEMENT
;
148 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
153 CACHE_QUEUE_ELEMENT q
[CACHE_QUEUE_SIZE
];
154 } cache_queue
= {0, 0};
156 /* Queue a request for a load from the cache. The load will be queued as
157 'inactive' and will be requested after the given number
158 of cycles have passed from the point the load is activated. */
160 request_cache_load (SIM_CPU
*cpu
, INT regnum
, int regtype
, int cycles
)
162 CACHE_QUEUE_ELEMENT
*q
;
166 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
168 if (CPU_LOAD_LENGTH (cpu
) == 0)
171 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
172 abort (); /* TODO: Make the queue dynamic */
174 q
= & cache_queue
.q
[cache_queue
.ix
];
177 q
->reqno
= cache_queue
.reqno
++;
178 q
->request
= cache_load
;
179 q
->cache
= CPU_DATA_CACHE (cpu
);
180 q
->address
= CPU_LOAD_ADDRESS (cpu
);
181 q
->length
= CPU_LOAD_LENGTH (cpu
);
182 q
->is_signed
= CPU_LOAD_SIGNED (cpu
);
184 q
->regtype
= regtype
;
188 vliw
= CPU_VLIW (cpu
);
189 slot
= vliw
->next_slot
- 1;
190 q
->slot
= (*vliw
->current_vliw
)[slot
];
192 CPU_LOAD_LENGTH (cpu
) = 0;
195 /* Queue a request to flush the cache. The request will be queued as
196 'inactive' and will be requested after the given number
197 of cycles have passed from the point the request is activated. */
199 request_cache_flush (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
201 CACHE_QUEUE_ELEMENT
*q
;
205 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
206 abort (); /* TODO: Make the queue dynamic */
208 q
= & cache_queue
.q
[cache_queue
.ix
];
211 q
->reqno
= cache_queue
.reqno
++;
212 q
->request
= cache_flush
;
214 q
->address
= CPU_LOAD_ADDRESS (cpu
);
215 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
219 vliw
= CPU_VLIW (cpu
);
220 slot
= vliw
->next_slot
- 1;
221 q
->slot
= (*vliw
->current_vliw
)[slot
];
224 /* Queue a request to invalidate the cache. The request will be queued as
225 'inactive' and will be requested after the given number
226 of cycles have passed from the point the request is activated. */
228 request_cache_invalidate (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
230 CACHE_QUEUE_ELEMENT
*q
;
234 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
235 abort (); /* TODO: Make the queue dynamic */
237 q
= & cache_queue
.q
[cache_queue
.ix
];
240 q
->reqno
= cache_queue
.reqno
++;
241 q
->request
= cache_invalidate
;
243 q
->address
= CPU_LOAD_ADDRESS (cpu
);
244 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
248 vliw
= CPU_VLIW (cpu
);
249 slot
= vliw
->next_slot
- 1;
250 q
->slot
= (*vliw
->current_vliw
)[slot
];
253 /* Queue a request to preload the cache. The request will be queued as
254 'inactive' and will be requested after the given number
255 of cycles have passed from the point the request is activated. */
257 request_cache_preload (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
259 CACHE_QUEUE_ELEMENT
*q
;
263 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
264 abort (); /* TODO: Make the queue dynamic */
266 q
= & cache_queue
.q
[cache_queue
.ix
];
269 q
->reqno
= cache_queue
.reqno
++;
270 q
->request
= cache_preload
;
272 q
->address
= CPU_LOAD_ADDRESS (cpu
);
273 q
->length
= CPU_LOAD_LENGTH (cpu
);
274 q
->lock
= CPU_LOAD_LOCK (cpu
);
278 vliw
= CPU_VLIW (cpu
);
279 slot
= vliw
->next_slot
- 1;
280 q
->slot
= (*vliw
->current_vliw
)[slot
];
282 CPU_LOAD_LENGTH (cpu
) = 0;
285 /* Queue a request to unlock the cache. The request will be queued as
286 'inactive' and will be requested after the given number
287 of cycles have passed from the point the request is activated. */
289 request_cache_unlock (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
291 CACHE_QUEUE_ELEMENT
*q
;
295 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
296 abort (); /* TODO: Make the queue dynamic */
298 q
= & cache_queue
.q
[cache_queue
.ix
];
301 q
->reqno
= cache_queue
.reqno
++;
302 q
->request
= cache_unlock
;
304 q
->address
= CPU_LOAD_ADDRESS (cpu
);
308 vliw
= CPU_VLIW (cpu
);
309 slot
= vliw
->next_slot
- 1;
310 q
->slot
= (*vliw
->current_vliw
)[slot
];
314 submit_cache_request (CACHE_QUEUE_ELEMENT
*q
)
319 frv_cache_request_load (q
->cache
, q
->reqno
, q
->address
, q
->slot
);
322 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
325 case cache_invalidate
:
326 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
330 frv_cache_request_preload (q
->cache
, q
->address
, q
->slot
,
334 frv_cache_request_unlock (q
->cache
, q
->address
, q
->slot
);
341 /* Activate all inactive load requests. */
343 activate_cache_requests (SIM_CPU
*cpu
)
346 for (i
= 0; i
< cache_queue
.ix
; ++i
)
348 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
352 /* Submit the request now if the cycle count is zero. */
354 submit_cache_request (q
);
359 /* Check to see if a load is pending which affects the given register(s).
362 load_pending_for_register (SIM_CPU
*cpu
, int regnum
, int words
, int regtype
)
365 for (i
= 0; i
< cache_queue
.ix
; ++i
)
367 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
369 /* Must be the same kind of register. */
370 if (! q
->active
|| q
->request
!= cache_load
|| q
->regtype
!= regtype
)
373 /* If the registers numbers are equal, then we have a match. */
374 if (q
->regnum
== regnum
)
375 return 1; /* load pending */
377 /* Check for overlap of a load with a multi-word register. */
378 if (regnum
< q
->regnum
)
380 if (regnum
+ words
> q
->regnum
)
383 /* Check for overlap of a multi-word load with the register. */
386 int data_words
= (q
->length
+ sizeof (SI
) - 1) / sizeof (SI
);
387 if (q
->regnum
+ data_words
> regnum
)
392 return 0; /* no load pending */
395 /* Check to see if a cache flush pending which affects the given address. */
397 flush_pending_for_address (SIM_CPU
*cpu
, SI address
)
399 int line_mask
= ~(CPU_DATA_CACHE (cpu
)->line_size
- 1);
401 for (i
= 0; i
< cache_queue
.ix
; ++i
)
403 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
405 /* Must be the same kind of request and active. */
406 if (! q
->active
|| q
->request
!= cache_flush
)
409 /* If the addresses are equal, then we have a match. */
410 if ((q
->address
& line_mask
) == (address
& line_mask
))
411 return 1; /* flush pending */
414 return 0; /* no flush pending */
418 remove_cache_queue_element (SIM_CPU
*cpu
, int i
)
420 /* If we are removing the load of a FR register, then remember which one(s).
422 CACHE_QUEUE_ELEMENT q
= cache_queue
.q
[i
];
424 for (--cache_queue
.ix
; i
< cache_queue
.ix
; ++i
)
425 cache_queue
.q
[i
] = cache_queue
.q
[i
+ 1];
427 /* If we removed a load of a FR register, check to see if any other loads
428 of that register is still queued. If not, then apply the queued post
429 processing time of that register to its latency. Also apply
430 1 extra cycle of latency to the register since it was a floating point
432 if (q
.request
== cache_load
&& q
.regtype
!= REGTYPE_NONE
)
434 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
435 int data_words
= (q
.length
+ sizeof (SI
) - 1) / sizeof (SI
);
437 for (j
= 0; j
< data_words
; ++j
)
439 int regnum
= q
.regnum
+ j
;
440 if (! load_pending_for_register (cpu
, regnum
, 1, q
.regtype
))
442 if (q
.regtype
== REGTYPE_FR
)
444 int *fr
= ps
->fr_busy
;
445 fr
[regnum
] += 1 + ps
->fr_ptime
[regnum
];
446 ps
->fr_ptime
[regnum
] = 0;
453 /* Copy data from the cache buffer to the target register(s). */
455 copy_load_data (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
, int slot
,
456 CACHE_QUEUE_ELEMENT
*q
)
461 if (q
->regtype
== REGTYPE_FR
)
465 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
466 SET_H_FR (q
->regnum
, value
);
470 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
471 SET_H_FR (q
->regnum
, value
);
478 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
479 SET_H_GR (q
->regnum
, value
);
483 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
484 SET_H_GR (q
->regnum
, value
);
489 if (q
->regtype
== REGTYPE_FR
)
493 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
494 SET_H_FR (q
->regnum
, value
);
498 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
499 SET_H_FR (q
->regnum
, value
);
506 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
507 SET_H_GR (q
->regnum
, value
);
511 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
512 SET_H_GR (q
->regnum
, value
);
517 if (q
->regtype
== REGTYPE_FR
)
520 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SF
, 4));
525 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SI
, 4));
529 if (q
->regtype
== REGTYPE_FR
)
531 SET_H_FR_DOUBLE (q
->regnum
,
532 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DF
, 8));
536 SET_H_GR_DOUBLE (q
->regnum
,
537 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DI
, 8));
541 if (q
->regtype
== REGTYPE_FR
)
542 frvbf_h_fr_quad_set_handler (current_cpu
, q
->regnum
,
543 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
547 frvbf_h_gr_quad_set_handler (current_cpu
, q
->regnum
,
548 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
558 request_complete (SIM_CPU
*cpu
, CACHE_QUEUE_ELEMENT
*q
)
561 if (! q
->active
|| q
->cycles
> 0)
564 cache
= CPU_DATA_CACHE (cpu
);
568 /* For loads, we must wait until the data is returned from the cache. */
569 if (frv_cache_data_in_buffer (cache
, 0, q
->address
, q
->reqno
))
571 copy_load_data (cpu
, cache
, 0, q
);
574 if (frv_cache_data_in_buffer (cache
, 1, q
->address
, q
->reqno
))
576 copy_load_data (cpu
, cache
, 1, q
);
582 /* We must wait until the data is flushed. */
583 if (frv_cache_data_flushed (cache
, 0, q
->address
, q
->reqno
))
585 if (frv_cache_data_flushed (cache
, 1, q
->address
, q
->reqno
))
590 /* All other requests are complete once they've been made. */
597 /* Run the insn and data caches through the given number of cycles, taking
598 note of load requests which are fullfilled as a result. */
600 run_caches (SIM_CPU
*cpu
, int cycles
)
602 FRV_CACHE
* data_cache
= CPU_DATA_CACHE (cpu
);
603 FRV_CACHE
* insn_cache
= CPU_INSN_CACHE (cpu
);
605 /* For each cycle, run the caches, noting which requests have been fullfilled
606 and submitting new requests on their designated cycles. */
607 for (i
= 0; i
< cycles
; ++i
)
610 /* Run the caches through 1 cycle. */
611 frv_cache_run (data_cache
, 1);
612 frv_cache_run (insn_cache
, 1);
614 /* Note whether prefetched insn data has been loaded yet. */
615 for (j
= LS
; j
< FRV_CACHE_PIPELINES
; ++j
)
617 if (frv_insn_fetch_buffer
[j
].reqno
!= NO_REQNO
618 && frv_cache_data_in_buffer (insn_cache
, j
,
619 frv_insn_fetch_buffer
[j
].address
,
620 frv_insn_fetch_buffer
[j
].reqno
))
621 frv_insn_fetch_buffer
[j
].reqno
= NO_REQNO
;
624 /* Check to see which requests have been satisfied and which should
626 for (j
= 0; j
< cache_queue
.ix
; ++j
)
628 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[j
];
632 /* If a load has been satisfied, complete the operation and remove it
634 if (request_complete (cpu
, q
))
636 remove_cache_queue_element (cpu
, j
);
641 /* Decrease the cycle count of each queued request.
642 Submit a request for each queued request whose cycle count has
646 submit_cache_request (q
);
652 apply_latency_adjustments (SIM_CPU
*cpu
)
654 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
656 /* update the latencies of the registers. */
657 int *fr
= ps
->fr_busy
;
658 int *acc
= ps
->acc_busy
;
659 for (i
= 0; i
< 64; ++i
)
661 if (ps
->fr_busy_adjust
[i
] > 0)
662 *fr
-= ps
->fr_busy_adjust
[i
]; /* OK if it goes negative. */
663 if (ps
->acc_busy_adjust
[i
] > 0)
664 *acc
-= ps
->acc_busy_adjust
[i
]; /* OK if it goes negative. */
670 /* Account for the number of cycles which have just passed in the latency of
671 various system elements. Works for negative cycles too so that latency
672 can be extended in the case of insn fetch latency.
673 If negative or zero, then no adjustment is necessary. */
675 update_latencies (SIM_CPU
*cpu
, int cycles
)
677 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
679 /* update the latencies of the registers. */
686 int *gr
= ps
->gr_busy
;
687 int *fr
= ps
->fr_busy
;
688 int *acc
= ps
->acc_busy
;
690 /* This loop handles GR, FR and ACC registers. */
691 for (i
= 0; i
< 64; ++i
)
696 reset_gr_flags (cpu
, i
);
700 /* If the busy drops to 0, then mark the register as
704 int *fr_lat
= ps
->fr_latency
+ i
;
706 ps
->fr_busy_adjust
[i
] = 0;
707 /* Only clear flags if this register has no target latency. */
709 reset_fr_flags (cpu
, i
);
713 /* If the busy drops to 0, then mark the register as
717 int *acc_lat
= ps
->acc_latency
+ i
;
719 ps
->acc_busy_adjust
[i
] = 0;
720 /* Only clear flags if this register has no target latency. */
722 reset_acc_flags (cpu
, i
);
730 /* This loop handles CCR registers. */
732 for (i
= 0; i
< 8; ++i
)
737 reset_cc_flags (cpu
, i
);
743 /* This loop handles SPR registers. */
745 for (i
= 0; i
< 4096; ++i
)
753 /* This loop handles resources. */
754 idiv
= ps
->idiv_busy
;
755 fdiv
= ps
->fdiv_busy
;
756 fsqrt
= ps
->fsqrt_busy
;
757 for (i
= 0; i
< 2; ++i
)
759 *idiv
= (*idiv
<= cycles
) ? 0 : (*idiv
- cycles
);
760 *fdiv
= (*fdiv
<= cycles
) ? 0 : (*fdiv
- cycles
);
761 *fsqrt
= (*fsqrt
<= cycles
) ? 0 : (*fsqrt
- cycles
);
766 /* Float and media units can occur in 4 slots on some machines. */
767 flt
= ps
->float_busy
;
768 media
= ps
->media_busy
;
769 for (i
= 0; i
< 4; ++i
)
771 *flt
= (*flt
<= cycles
) ? 0 : (*flt
- cycles
);
772 *media
= (*media
<= cycles
) ? 0 : (*media
- cycles
);
778 /* Print information about the wait for the given number of cycles. */
780 frv_model_trace_wait_cycles (SIM_CPU
*cpu
, int cycles
, const char *hazard_name
)
782 if (TRACE_INSN_P (cpu
) && cycles
> 0)
784 SIM_DESC sd
= CPU_STATE (cpu
);
785 trace_printf (sd
, cpu
, "**** %s wait %d cycles ***\n",
786 hazard_name
, cycles
);
791 trace_vliw_wait_cycles (SIM_CPU
*cpu
)
793 if (TRACE_INSN_P (cpu
))
795 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
796 frv_model_trace_wait_cycles (cpu
, ps
->vliw_wait
, hazard_name
);
800 /* Wait for the given number of cycles. */
802 frv_model_advance_cycles (SIM_CPU
*cpu
, int cycles
)
804 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
805 update_latencies (cpu
, cycles
);
806 run_caches (cpu
, cycles
);
807 PROFILE_MODEL_TOTAL_CYCLES (p
) += cycles
;
811 handle_resource_wait (SIM_CPU
*cpu
)
813 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
814 if (ps
->vliw_wait
!= 0)
815 frv_model_advance_cycles (cpu
, ps
->vliw_wait
);
816 if (ps
->vliw_load_stall
> ps
->vliw_wait
)
817 ps
->vliw_load_stall
-= ps
->vliw_wait
;
819 ps
->vliw_load_stall
= 0;
822 /* Account for the number of cycles until these resources will be available
825 update_target_latencies (SIM_CPU
*cpu
)
827 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
829 /* update the latencies of the registers. */
831 int *gr_lat
= ps
->gr_latency
;
832 int *fr_lat
= ps
->fr_latency
;
833 int *acc_lat
= ps
->acc_latency
;
836 int *gr
= ps
->gr_busy
;
837 int *fr
= ps
->fr_busy
;
838 int *acc
= ps
->acc_busy
;
840 /* This loop handles GR, FR and ACC registers. */
841 for (i
= 0; i
< 64; ++i
)
862 /* This loop handles CCR registers. */
864 ccr_lat
= ps
->ccr_latency
;
865 for (i
= 0; i
< 8; ++i
)
874 /* This loop handles SPR registers. */
876 spr_lat
= ps
->spr_latency
;
877 for (i
= 0; i
< 4096; ++i
)
888 /* Run the caches until all pending cache flushes are complete. */
890 wait_for_flush (SIM_CPU
*cpu
)
892 SI address
= CPU_LOAD_ADDRESS (cpu
);
894 while (flush_pending_for_address (cpu
, address
))
896 frv_model_advance_cycles (cpu
, 1);
899 if (TRACE_INSN_P (cpu
) && wait
)
901 sprintf (hazard_name
, "Data cache flush address %p:", address
);
902 frv_model_trace_wait_cycles (cpu
, wait
, hazard_name
);
906 /* Initialize cycle counting for an insn.
907 FIRST_P is non-zero if this is the first insn in a set of parallel
910 frvbf_model_insn_before (SIM_CPU
*cpu
, int first_p
)
912 SIM_DESC sd
= CPU_STATE (cpu
);
913 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
917 memset (ps
->fr_busy_adjust
, 0, sizeof (ps
->fr_busy_adjust
));
918 memset (ps
->acc_busy_adjust
, 0, sizeof (ps
->acc_busy_adjust
));
924 ps
->vliw_branch_taken
= 0;
925 ps
->vliw_load_stall
= 0;
928 switch (STATE_ARCHITECTURE (sd
)->mach
)
932 fr400_model_insn_before (cpu
, first_p
);
935 fr500_model_insn_before (cpu
, first_p
);
938 fr550_model_insn_before (cpu
, first_p
);
945 wait_for_flush (cpu
);
948 /* Record the cycles computed for an insn.
949 LAST_P is non-zero if this is the last insn in a set of parallel insns,
950 and we update the total cycle count.
951 CYCLES is the cycle count of the insn. */
954 frvbf_model_insn_after (SIM_CPU
*cpu
, int last_p
, int cycles
)
956 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
957 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
958 SIM_DESC sd
= CPU_STATE (cpu
);
960 PROFILE_MODEL_CUR_INSN_CYCLES (p
) = cycles
;
962 /* The number of cycles for a VLIW insn is the maximum number of cycles
963 used by any individual insn within it. */
964 if (cycles
> ps
->vliw_cycles
)
965 ps
->vliw_cycles
= cycles
;
969 /* This is the last insn in a VLIW insn. */
970 struct frv_interrupt_timer
*timer
= & frv_interrupt_state
.timer
;
972 activate_cache_requests (cpu
); /* before advancing cycles. */
973 apply_latency_adjustments (cpu
); /* must go first. */
974 update_target_latencies (cpu
); /* must go next. */
975 frv_model_advance_cycles (cpu
, ps
->vliw_cycles
);
977 PROFILE_MODEL_LOAD_STALL_CYCLES (p
) += ps
->vliw_load_stall
;
979 /* Check the interrupt timer. cycles contains the total cycle count. */
982 cycles
= PROFILE_MODEL_TOTAL_CYCLES (p
);
983 if (timer
->current
% timer
->value
984 + (cycles
- timer
->current
) >= timer
->value
)
985 frv_queue_external_interrupt (cpu
, timer
->interrupt
);
986 timer
->current
= cycles
;
989 ps
->past_first_p
= 0; /* Next one will be the first in a new VLIW. */
990 ps
->branch_address
= -1;
993 ps
->past_first_p
= 1;
995 switch (STATE_ARCHITECTURE (sd
)->mach
)
999 fr400_model_insn_after (cpu
, last_p
, cycles
);
1001 case bfd_mach_fr500
:
1002 fr500_model_insn_after (cpu
, last_p
, cycles
);
1004 case bfd_mach_fr550
:
1005 fr550_model_insn_after (cpu
, last_p
, cycles
);
1013 frvbf_model_branch (SIM_CPU
*current_cpu
, PCADDR target
, int hint
)
1015 /* Record the hint and branch address for use in profiling. */
1016 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1017 ps
->branch_hint
= hint
;
1018 ps
->branch_address
= target
;
1021 /* Top up the latency of the given GR by the given number of cycles. */
1023 update_GR_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1027 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1028 int *gr
= ps
->gr_latency
;
1029 if (gr
[out_GR
] < cycles
)
1030 gr
[out_GR
] = cycles
;
1035 decrease_GR_busy (SIM_CPU
*cpu
, INT in_GR
, int cycles
)
1039 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1040 int *gr
= ps
->gr_busy
;
1041 gr
[in_GR
] -= cycles
;
1045 /* Top up the latency of the given double GR by the number of cycles. */
1047 update_GRdouble_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1051 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1052 int *gr
= ps
->gr_latency
;
1053 if (gr
[out_GR
] < cycles
)
1054 gr
[out_GR
] = cycles
;
1055 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1056 gr
[out_GR
+ 1] = cycles
;
1061 update_GR_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1065 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1066 int *gr
= ps
->gr_latency
;
1068 /* The latency of the GR will be at least the number of cycles used
1070 if (gr
[out_GR
] < cycles
)
1071 gr
[out_GR
] = cycles
;
1073 /* The latency will also depend on how long it takes to retrieve the
1074 data from the cache or memory. Assume that the load is issued
1075 after the last cycle of the insn. */
1076 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1081 update_GRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1085 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1086 int *gr
= ps
->gr_latency
;
1088 /* The latency of the GR will be at least the number of cycles used
1090 if (gr
[out_GR
] < cycles
)
1091 gr
[out_GR
] = cycles
;
1092 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1093 gr
[out_GR
+ 1] = cycles
;
1095 /* The latency will also depend on how long it takes to retrieve the
1096 data from the cache or memory. Assume that the load is issued
1097 after the last cycle of the insn. */
1098 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1103 update_GR_latency_for_swap (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1105 update_GR_latency_for_load (cpu
, out_GR
, cycles
);
1108 /* Top up the latency of the given FR by the given number of cycles. */
1110 update_FR_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1114 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1115 int *fr
= ps
->fr_latency
;
1116 if (fr
[out_FR
] < cycles
)
1117 fr
[out_FR
] = cycles
;
1121 /* Top up the latency of the given double FR by the number of cycles. */
1123 update_FRdouble_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1127 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1128 int *fr
= ps
->fr_latency
;
1129 if (fr
[out_FR
] < cycles
)
1130 fr
[out_FR
] = cycles
;
1131 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1132 fr
[out_FR
+ 1] = cycles
;
1137 update_FR_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1141 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1142 int *fr
= ps
->fr_latency
;
1144 /* The latency of the FR will be at least the number of cycles used
1146 if (fr
[out_FR
] < cycles
)
1147 fr
[out_FR
] = cycles
;
1149 /* The latency will also depend on how long it takes to retrieve the
1150 data from the cache or memory. Assume that the load is issued
1151 after the last cycle of the insn. */
1152 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1157 update_FRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1161 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1162 int *fr
= ps
->fr_latency
;
1164 /* The latency of the FR will be at least the number of cycles used
1166 if (fr
[out_FR
] < cycles
)
1167 fr
[out_FR
] = cycles
;
1168 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1169 fr
[out_FR
+ 1] = cycles
;
1171 /* The latency will also depend on how long it takes to retrieve the
1172 data from the cache or memory. Assume that the load is issued
1173 after the last cycle of the insn. */
1174 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1178 /* Top up the post-processing time of the given FR by the given number of
1181 update_FR_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1185 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1186 /* If a load is pending on this register, then add the cycles to
1187 the post processing time for this register. Otherwise apply it
1188 directly to the latency of the register. */
1189 if (! load_pending_for_register (cpu
, out_FR
, 1, REGTYPE_FR
))
1191 int *fr
= ps
->fr_latency
;
1192 fr
[out_FR
] += cycles
;
1195 ps
->fr_ptime
[out_FR
] += cycles
;
1200 update_FRdouble_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1204 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1205 /* If a load is pending on this register, then add the cycles to
1206 the post processing time for this register. Otherwise apply it
1207 directly to the latency of the register. */
1208 if (! load_pending_for_register (cpu
, out_FR
, 2, REGTYPE_FR
))
1210 int *fr
= ps
->fr_latency
;
1211 fr
[out_FR
] += cycles
;
1213 fr
[out_FR
+ 1] += cycles
;
1217 ps
->fr_ptime
[out_FR
] += cycles
;
1219 ps
->fr_ptime
[out_FR
+ 1] += cycles
;
1224 /* Top up the post-processing time of the given ACC by the given number of
1227 update_ACC_ptime (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1231 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1232 /* No load can be pending on this register. Apply the cycles
1233 directly to the latency of the register. */
1234 int *acc
= ps
->acc_latency
;
1235 acc
[out_ACC
] += cycles
;
1239 /* Top up the post-processing time of the given SPR by the given number of
1242 update_SPR_ptime (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1246 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1247 /* No load can be pending on this register. Apply the cycles
1248 directly to the latency of the register. */
1249 int *spr
= ps
->spr_latency
;
1250 spr
[out_SPR
] += cycles
;
1255 decrease_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1259 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1260 int *acc
= ps
->acc_busy
;
1261 acc
[out_ACC
] -= cycles
;
1262 if (ps
->acc_busy_adjust
[out_ACC
] >= 0
1263 && cycles
> ps
->acc_busy_adjust
[out_ACC
])
1264 ps
->acc_busy_adjust
[out_ACC
] = cycles
;
1269 increase_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1273 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1274 int *acc
= ps
->acc_busy
;
1275 acc
[out_ACC
] += cycles
;
1280 enforce_full_acc_latency (SIM_CPU
*cpu
, INT in_ACC
)
1282 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1283 ps
->acc_busy_adjust
[in_ACC
] = -1;
1287 decrease_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1291 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1292 int *fr
= ps
->fr_busy
;
1293 fr
[out_FR
] -= cycles
;
1294 if (ps
->fr_busy_adjust
[out_FR
] >= 0
1295 && cycles
> ps
->fr_busy_adjust
[out_FR
])
1296 ps
->fr_busy_adjust
[out_FR
] = cycles
;
1301 increase_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1305 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1306 int *fr
= ps
->fr_busy
;
1307 fr
[out_FR
] += cycles
;
1311 /* Top up the latency of the given ACC by the given number of cycles. */
1313 update_ACC_latency (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1317 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1318 int *acc
= ps
->acc_latency
;
1319 if (acc
[out_ACC
] < cycles
)
1320 acc
[out_ACC
] = cycles
;
1324 /* Top up the latency of the given CCR by the given number of cycles. */
1326 update_CCR_latency (SIM_CPU
*cpu
, INT out_CCR
, int cycles
)
1330 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1331 int *ccr
= ps
->ccr_latency
;
1332 if (ccr
[out_CCR
] < cycles
)
1333 ccr
[out_CCR
] = cycles
;
1337 /* Top up the latency of the given SPR by the given number of cycles. */
1339 update_SPR_latency (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1343 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1344 int *spr
= ps
->spr_latency
;
1345 if (spr
[out_SPR
] < cycles
)
1346 spr
[out_SPR
] = cycles
;
1350 /* Top up the latency of the given integer division resource by the given
1351 number of cycles. */
1353 update_idiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1355 /* operate directly on the busy cycles since each resource can only
1356 be used once in a VLIW insn. */
1357 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1358 int *r
= ps
->idiv_busy
;
1359 r
[in_resource
] = cycles
;
1362 /* Set the latency of the given resource to the given number of cycles. */
1364 update_fdiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1366 /* operate directly on the busy cycles since each resource can only
1367 be used once in a VLIW insn. */
1368 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1369 int *r
= ps
->fdiv_busy
;
1370 r
[in_resource
] = cycles
;
1373 /* Set the latency of the given resource to the given number of cycles. */
1375 update_fsqrt_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1377 /* operate directly on the busy cycles since each resource can only
1378 be used once in a VLIW insn. */
1379 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1380 int *r
= ps
->fsqrt_busy
;
1381 r
[in_resource
] = cycles
;
1384 /* Set the latency of the given resource to the given number of cycles. */
1386 update_float_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1388 /* operate directly on the busy cycles since each resource can only
1389 be used once in a VLIW insn. */
1390 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1391 int *r
= ps
->float_busy
;
1392 r
[in_resource
] = cycles
;
1396 update_media_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1398 /* operate directly on the busy cycles since each resource can only
1399 be used once in a VLIW insn. */
1400 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1401 int *r
= ps
->media_busy
;
1402 r
[in_resource
] = cycles
;
1405 /* Set the branch penalty to the given number of cycles. */
1407 update_branch_penalty (SIM_CPU
*cpu
, int cycles
)
1409 /* operate directly on the busy cycles since only one branch can occur
1411 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1412 ps
->branch_penalty
= cycles
;
1415 /* Check the availability of the given GR register and update the number
1416 of cycles the current VLIW insn must wait until it is available. */
1418 vliw_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1420 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1421 int *gr
= ps
->gr_busy
;
1422 /* If the latency of the register is greater than the current wait
1423 then update the current wait. */
1424 if (in_GR
>= 0 && gr
[in_GR
] > ps
->vliw_wait
)
1426 if (TRACE_INSN_P (cpu
))
1427 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1428 ps
->vliw_wait
= gr
[in_GR
];
1432 /* Check the availability of the given GR register and update the number
1433 of cycles the current VLIW insn must wait until it is available. */
1435 vliw_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1437 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1438 int *gr
= ps
->gr_busy
;
1439 /* If the latency of the register is greater than the current wait
1440 then update the current wait. */
1443 if (gr
[in_GR
] > ps
->vliw_wait
)
1445 if (TRACE_INSN_P (cpu
))
1446 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1447 ps
->vliw_wait
= gr
[in_GR
];
1449 if (in_GR
< 63 && gr
[in_GR
+ 1] > ps
->vliw_wait
)
1451 if (TRACE_INSN_P (cpu
))
1452 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
+ 1);
1453 ps
->vliw_wait
= gr
[in_GR
+ 1];
1458 /* Check the availability of the given FR register and update the number
1459 of cycles the current VLIW insn must wait until it is available. */
1461 vliw_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1463 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1464 int *fr
= ps
->fr_busy
;
1465 /* If the latency of the register is greater than the current wait
1466 then update the current wait. */
1467 if (in_FR
>= 0 && fr
[in_FR
] > ps
->vliw_wait
)
1469 if (TRACE_INSN_P (cpu
))
1470 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1471 ps
->vliw_wait
= fr
[in_FR
];
1475 /* Check the availability of the given GR register and update the number
1476 of cycles the current VLIW insn must wait until it is available. */
1478 vliw_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1480 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1481 int *fr
= ps
->fr_busy
;
1482 /* If the latency of the register is greater than the current wait
1483 then update the current wait. */
1486 if (fr
[in_FR
] > ps
->vliw_wait
)
1488 if (TRACE_INSN_P (cpu
))
1489 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1490 ps
->vliw_wait
= fr
[in_FR
];
1492 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->vliw_wait
)
1494 if (TRACE_INSN_P (cpu
))
1495 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1496 ps
->vliw_wait
= fr
[in_FR
+ 1];
1501 /* Check the availability of the given CCR register and update the number
1502 of cycles the current VLIW insn must wait until it is available. */
1504 vliw_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1506 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1507 int *ccr
= ps
->ccr_busy
;
1508 /* If the latency of the register is greater than the current wait
1509 then update the current wait. */
1510 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->vliw_wait
)
1512 if (TRACE_INSN_P (cpu
))
1515 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
-4);
1517 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1519 ps
->vliw_wait
= ccr
[in_CCR
];
1523 /* Check the availability of the given ACC register and update the number
1524 of cycles the current VLIW insn must wait until it is available. */
1526 vliw_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1528 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1529 int *acc
= ps
->acc_busy
;
1530 /* If the latency of the register is greater than the current wait
1531 then update the current wait. */
1532 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->vliw_wait
)
1534 if (TRACE_INSN_P (cpu
))
1535 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1536 ps
->vliw_wait
= acc
[in_ACC
];
1540 /* Check the availability of the given SPR register and update the number
1541 of cycles the current VLIW insn must wait until it is available. */
1543 vliw_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1545 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1546 int *spr
= ps
->spr_busy
;
1547 /* If the latency of the register is greater than the current wait
1548 then update the current wait. */
1549 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->vliw_wait
)
1551 if (TRACE_INSN_P (cpu
))
1552 sprintf (hazard_name
, "Data hazard for spr %d:", in_SPR
);
1553 ps
->vliw_wait
= spr
[in_SPR
];
1557 /* Check the availability of the given integer division resource and update
1558 the number of cycles the current VLIW insn must wait until it is available.
1561 vliw_wait_for_idiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1563 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1564 int *r
= ps
->idiv_busy
;
1565 /* If the latency of the resource is greater than the current wait
1566 then update the current wait. */
1567 if (r
[in_resource
] > ps
->vliw_wait
)
1569 if (TRACE_INSN_P (cpu
))
1571 sprintf (hazard_name
, "Resource hazard for integer division in slot I%d:", in_resource
);
1573 ps
->vliw_wait
= r
[in_resource
];
1577 /* Check the availability of the given float division resource and update
1578 the number of cycles the current VLIW insn must wait until it is available.
1581 vliw_wait_for_fdiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1583 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1584 int *r
= ps
->fdiv_busy
;
1585 /* If the latency of the resource is greater than the current wait
1586 then update the current wait. */
1587 if (r
[in_resource
] > ps
->vliw_wait
)
1589 if (TRACE_INSN_P (cpu
))
1591 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", in_resource
);
1593 ps
->vliw_wait
= r
[in_resource
];
1597 /* Check the availability of the given float square root resource and update
1598 the number of cycles the current VLIW insn must wait until it is available.
1601 vliw_wait_for_fsqrt_resource (SIM_CPU
*cpu
, INT in_resource
)
1603 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1604 int *r
= ps
->fsqrt_busy
;
1605 /* If the latency of the resource is greater than the current wait
1606 then update the current wait. */
1607 if (r
[in_resource
] > ps
->vliw_wait
)
1609 if (TRACE_INSN_P (cpu
))
1611 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", in_resource
);
1613 ps
->vliw_wait
= r
[in_resource
];
1617 /* Check the availability of the given float unit resource and update
1618 the number of cycles the current VLIW insn must wait until it is available.
1621 vliw_wait_for_float_resource (SIM_CPU
*cpu
, INT in_resource
)
1623 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1624 int *r
= ps
->float_busy
;
1625 /* If the latency of the resource is greater than the current wait
1626 then update the current wait. */
1627 if (r
[in_resource
] > ps
->vliw_wait
)
1629 if (TRACE_INSN_P (cpu
))
1631 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", in_resource
);
1633 ps
->vliw_wait
= r
[in_resource
];
1637 /* Check the availability of the given media unit resource and update
1638 the number of cycles the current VLIW insn must wait until it is available.
1641 vliw_wait_for_media_resource (SIM_CPU
*cpu
, INT in_resource
)
1643 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1644 int *r
= ps
->media_busy
;
1645 /* If the latency of the resource is greater than the current wait
1646 then update the current wait. */
1647 if (r
[in_resource
] > ps
->vliw_wait
)
1649 if (TRACE_INSN_P (cpu
))
1651 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", in_resource
);
1653 ps
->vliw_wait
= r
[in_resource
];
1657 /* Run the caches until all requests for the given register(s) are satisfied. */
1659 load_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1664 while (load_pending_for_register (cpu
, in_GR
, 1/*words*/, REGTYPE_NONE
))
1666 frv_model_advance_cycles (cpu
, 1);
1671 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1672 ps
->vliw_wait
+= wait
;
1673 ps
->vliw_load_stall
+= wait
;
1674 if (TRACE_INSN_P (cpu
))
1675 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1681 load_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1685 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1688 while (load_pending_for_register (cpu
, in_FR
, 1/*words*/, REGTYPE_FR
))
1690 frv_model_advance_cycles (cpu
, 1);
1693 /* Post processing time may have been added to the register's
1694 latency after the loads were processed. Account for that too.
1700 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1702 /* Update the vliw_wait with the number of cycles we waited for the
1703 load and any post-processing. */
1706 ps
->vliw_wait
+= wait
;
1707 ps
->vliw_load_stall
+= wait
;
1708 if (TRACE_INSN_P (cpu
))
1709 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1715 load_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1720 while (load_pending_for_register (cpu
, in_GR
, 2/*words*/, REGTYPE_NONE
))
1722 frv_model_advance_cycles (cpu
, 1);
1727 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1728 ps
->vliw_wait
+= wait
;
1729 ps
->vliw_load_stall
+= wait
;
1730 if (TRACE_INSN_P (cpu
))
1731 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1737 load_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1741 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1744 while (load_pending_for_register (cpu
, in_FR
, 2/*words*/, REGTYPE_FR
))
1746 frv_model_advance_cycles (cpu
, 1);
1749 /* Post processing time may have been added to the registers'
1750 latencies after the loads were processed. Account for that too.
1756 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1762 wait
+= fr
[in_FR
+ 1];
1763 frv_model_advance_cycles (cpu
, fr
[in_FR
+ 1]);
1766 /* Update the vliw_wait with the number of cycles we waited for the
1767 load and any post-processing. */
1770 ps
->vliw_wait
+= wait
;
1771 ps
->vliw_load_stall
+= wait
;
1772 if (TRACE_INSN_P (cpu
))
1773 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1779 enforce_full_fr_latency (SIM_CPU
*cpu
, INT in_FR
)
1781 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1782 ps
->fr_busy_adjust
[in_FR
] = -1;
1785 /* Calculate how long the post processing for a floating point insn must
1786 wait for resources to become available. */
1788 post_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1790 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1791 int *fr
= ps
->fr_busy
;
1793 if (in_FR
>= 0 && fr
[in_FR
] > ps
->post_wait
)
1795 ps
->post_wait
= fr
[in_FR
];
1796 if (TRACE_INSN_P (cpu
))
1797 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1801 /* Calculate how long the post processing for a floating point insn must
1802 wait for resources to become available. */
1804 post_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1806 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1807 int *fr
= ps
->fr_busy
;
1811 if (fr
[in_FR
] > ps
->post_wait
)
1813 ps
->post_wait
= fr
[in_FR
];
1814 if (TRACE_INSN_P (cpu
))
1815 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1817 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->post_wait
)
1819 ps
->post_wait
= fr
[in_FR
+ 1];
1820 if (TRACE_INSN_P (cpu
))
1821 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1827 post_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1829 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1830 int *acc
= ps
->acc_busy
;
1832 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->post_wait
)
1834 ps
->post_wait
= acc
[in_ACC
];
1835 if (TRACE_INSN_P (cpu
))
1836 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1841 post_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1843 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1844 int *ccr
= ps
->ccr_busy
;
1846 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->post_wait
)
1848 ps
->post_wait
= ccr
[in_CCR
];
1849 if (TRACE_INSN_P (cpu
))
1852 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
- 4);
1854 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1860 post_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1862 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1863 int *spr
= ps
->spr_busy
;
1865 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->post_wait
)
1867 ps
->post_wait
= spr
[in_SPR
];
1868 if (TRACE_INSN_P (cpu
))
1869 sprintf (hazard_name
, "Data hazard for spr[%d]:", in_SPR
);
1874 post_wait_for_fdiv (SIM_CPU
*cpu
, INT slot
)
1876 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1877 int *fdiv
= ps
->fdiv_busy
;
1879 /* Multiple floating point divisions in the same slot need only wait 1
1881 if (fdiv
[slot
] > 0 && 1 > ps
->post_wait
)
1884 if (TRACE_INSN_P (cpu
))
1886 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", slot
);
1892 post_wait_for_fsqrt (SIM_CPU
*cpu
, INT slot
)
1894 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1895 int *fsqrt
= ps
->fsqrt_busy
;
1897 /* Multiple floating point square roots in the same slot need only wait 1
1899 if (fsqrt
[slot
] > 0 && 1 > ps
->post_wait
)
1902 if (TRACE_INSN_P (cpu
))
1904 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", slot
);
1910 post_wait_for_float (SIM_CPU
*cpu
, INT slot
)
1912 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1913 int *flt
= ps
->float_busy
;
1915 /* Multiple floating point square roots in the same slot need only wait 1
1917 if (flt
[slot
] > ps
->post_wait
)
1919 ps
->post_wait
= flt
[slot
];
1920 if (TRACE_INSN_P (cpu
))
1922 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", slot
);
1928 post_wait_for_media (SIM_CPU
*cpu
, INT slot
)
1930 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1931 int *media
= ps
->media_busy
;
1933 /* Multiple floating point square roots in the same slot need only wait 1
1935 if (media
[slot
] > ps
->post_wait
)
1937 ps
->post_wait
= media
[slot
];
1938 if (TRACE_INSN_P (cpu
))
1940 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", slot
);
1945 /* Print cpu-specific profile information. */
1946 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1949 print_cache (SIM_CPU
*cpu
, FRV_CACHE
*cache
, const char *cache_name
)
1951 SIM_DESC sd
= CPU_STATE (cpu
);
1958 sim_io_printf (sd
, " %s Cache\n\n", cache_name
);
1959 accesses
= cache
->statistics
.accesses
;
1960 sim_io_printf (sd
, " Total accesses: %s\n", COMMAS (accesses
));
1964 unsigned hits
= cache
->statistics
.hits
;
1965 sim_io_printf (sd
, " Hits: %s\n", COMMAS (hits
));
1966 rate
= (float)hits
/ accesses
;
1967 sim_io_printf (sd
, " Hit rate: %.2f%%\n", rate
* 100);
1971 sim_io_printf (sd
, " Model %s has no %s cache\n",
1972 MODEL_NAME (CPU_MODEL (cpu
)), cache_name
);
1974 sim_io_printf (sd
, "\n");
1977 /* This table must correspond to the UNIT_ATTR table in
1978 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1979 listed since the others cannot occur after mapping. */
1984 "I0", "I1", "I01", "I2", "I3", "IALL",
1985 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1991 print_parallel (SIM_CPU
*cpu
, int verbose
)
1993 SIM_DESC sd
= CPU_STATE (cpu
);
1994 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
1995 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1996 unsigned total
, vliw
;
2000 sim_io_printf (sd
, "Model %s Parallelization\n\n",
2001 MODEL_NAME (CPU_MODEL (cpu
)));
2003 total
= PROFILE_TOTAL_INSN_COUNT (p
);
2004 sim_io_printf (sd
, " Total instructions: %s\n", COMMAS (total
));
2005 vliw
= ps
->vliw_insns
;
2006 sim_io_printf (sd
, " VLIW instructions: %s\n", COMMAS (vliw
));
2007 average
= (float)total
/ vliw
;
2008 sim_io_printf (sd
, " Average VLIW length: %.2f\n", average
);
2009 average
= (float)PROFILE_MODEL_TOTAL_CYCLES (p
) / vliw
;
2010 sim_io_printf (sd
, " Cycles per VLIW instruction: %.2f\n", average
);
2011 average
= (float)total
/ PROFILE_MODEL_TOTAL_CYCLES (p
);
2012 sim_io_printf (sd
, " Instructions per cycle: %.2f\n", average
);
2018 int max_name_len
= 0;
2019 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2021 if (INSNS_IN_SLOT (i
))
2024 if (INSNS_IN_SLOT (i
) > max_val
)
2025 max_val
= INSNS_IN_SLOT (i
);
2026 len
= strlen (slot_names
[i
]);
2027 if (len
> max_name_len
)
2033 sim_io_printf (sd
, "\n");
2034 sim_io_printf (sd
, " Instructions per slot:\n");
2035 sim_io_printf (sd
, "\n");
2036 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2038 if (INSNS_IN_SLOT (i
) != 0)
2040 sim_io_printf (sd
, " %*s: %*s: ",
2041 max_name_len
, slot_names
[i
],
2042 max_val
< 10000 ? 5 : 10,
2043 COMMAS (INSNS_IN_SLOT (i
)));
2044 sim_profile_print_bar (sd
, cpu
, PROFILE_HISTOGRAM_WIDTH
,
2047 sim_io_printf (sd
, "\n");
2050 } /* details to print */
2053 sim_io_printf (sd
, "\n");
2057 frv_profile_info (SIM_CPU
*cpu
, int verbose
)
2059 /* FIXME: Need to add smp support. */
2060 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
2062 #if WITH_PROFILE_PARALLEL_P
2063 if (PROFILE_FLAGS (p
) [PROFILE_PARALLEL_IDX
])
2064 print_parallel (cpu
, verbose
);
2067 #if WITH_PROFILE_CACHE_P
2068 if (PROFILE_FLAGS (p
) [PROFILE_CACHE_IDX
])
2070 SIM_DESC sd
= CPU_STATE (cpu
);
2071 sim_io_printf (sd
, "Model %s Cache Statistics\n\n",
2072 MODEL_NAME (CPU_MODEL (cpu
)));
2073 print_cache (cpu
, CPU_INSN_CACHE (cpu
), "Instruction");
2074 print_cache (cpu
, CPU_DATA_CACHE (cpu
), "Data");
2076 #endif /* WITH_PROFILE_CACHE_P */
2079 /* A hack to get registers referenced for profiling. */
2080 SI
frv_ref_SI (SI ref
) {return ref
;}
2081 #endif /* WITH_PROFILE_MODEL_P */