1 /* frv simulator machine independent profiling code.
3 Copyright (C) 1998, 1999, 2000, 2001, 2003 Free Software Foundation, Inc.
6 This file is part of the GNU simulators.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License along
19 with this program; if not, write to the Free Software Foundation, Inc.,
20 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #define WANT_CPU_FRVBF
29 #if WITH_PROFILE_MODEL_P
32 #include "profile-fr400.h"
33 #include "profile-fr500.h"
34 #include "profile-fr550.h"
37 reset_gr_flags (SIM_CPU
*cpu
, INT gr
)
39 SIM_DESC sd
= CPU_STATE (cpu
);
40 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
41 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
42 fr400_reset_gr_flags (cpu
, gr
);
43 /* Other machines have no gr flags right now. */
47 reset_fr_flags (SIM_CPU
*cpu
, INT fr
)
49 SIM_DESC sd
= CPU_STATE (cpu
);
50 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
51 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
52 fr400_reset_fr_flags (cpu
, fr
);
53 else if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
54 fr500_reset_fr_flags (cpu
, fr
);
58 reset_acc_flags (SIM_CPU
*cpu
, INT acc
)
60 SIM_DESC sd
= CPU_STATE (cpu
);
61 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr400
62 || STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr450
)
63 fr400_reset_acc_flags (cpu
, acc
);
64 /* Other machines have no acc flags right now. */
68 reset_cc_flags (SIM_CPU
*cpu
, INT cc
)
70 SIM_DESC sd
= CPU_STATE (cpu
);
71 if (STATE_ARCHITECTURE (sd
)->mach
== bfd_mach_fr500
)
72 fr500_reset_cc_flags (cpu
, cc
);
73 /* Other machines have no cc flags. */
77 set_use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
81 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
82 reset_gr_flags (cpu
, gr
);
83 ps
->cur_gr_complex
|= (((DI
)1) << gr
);
88 set_use_not_gr_complex (SIM_CPU
*cpu
, INT gr
)
92 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
93 ps
->cur_gr_complex
&= ~(((DI
)1) << gr
);
98 use_is_gr_complex (SIM_CPU
*cpu
, INT gr
)
102 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
103 return ps
->cur_gr_complex
& (((DI
)1) << gr
);
108 /* Globals flag indicates whether this insn is being modeled. */
109 enum FRV_INSN_MODELING model_insn
= FRV_INSN_NO_MODELING
;
111 /* static buffer for the name of the currently most restrictive hazard. */
112 static char hazard_name
[100] = "";
114 /* Print information about the wait applied to an entire VLIW insn. */
115 FRV_INSN_FETCH_BUFFER frv_insn_fetch_buffer
[]
117 {1, NO_REQNO
}, {1, NO_REQNO
} /* init with impossible address. */
129 /* A queue of load requests from the data cache. Use to keep track of loads
130 which are still pending. */
131 /* TODO -- some of these are mutually exclusive and can use a union. */
146 enum cache_request request
;
147 } CACHE_QUEUE_ELEMENT
;
149 #define CACHE_QUEUE_SIZE 64 /* TODO -- make queue dynamic */
154 CACHE_QUEUE_ELEMENT q
[CACHE_QUEUE_SIZE
];
155 } cache_queue
= {0, 0};
157 /* Queue a request for a load from the cache. The load will be queued as
158 'inactive' and will be requested after the given number
159 of cycles have passed from the point the load is activated. */
161 request_cache_load (SIM_CPU
*cpu
, INT regnum
, int regtype
, int cycles
)
163 CACHE_QUEUE_ELEMENT
*q
;
167 /* For a conditional load which was not executed, CPU_LOAD_LENGTH will be
169 if (CPU_LOAD_LENGTH (cpu
) == 0)
172 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
173 abort (); /* TODO: Make the queue dynamic */
175 q
= & cache_queue
.q
[cache_queue
.ix
];
178 q
->reqno
= cache_queue
.reqno
++;
179 q
->request
= cache_load
;
180 q
->cache
= CPU_DATA_CACHE (cpu
);
181 q
->address
= CPU_LOAD_ADDRESS (cpu
);
182 q
->length
= CPU_LOAD_LENGTH (cpu
);
183 q
->is_signed
= CPU_LOAD_SIGNED (cpu
);
185 q
->regtype
= regtype
;
189 vliw
= CPU_VLIW (cpu
);
190 slot
= vliw
->next_slot
- 1;
191 q
->slot
= (*vliw
->current_vliw
)[slot
];
193 CPU_LOAD_LENGTH (cpu
) = 0;
196 /* Queue a request to flush the cache. The request will be queued as
197 'inactive' and will be requested after the given number
198 of cycles have passed from the point the request is activated. */
200 request_cache_flush (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
202 CACHE_QUEUE_ELEMENT
*q
;
206 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
207 abort (); /* TODO: Make the queue dynamic */
209 q
= & cache_queue
.q
[cache_queue
.ix
];
212 q
->reqno
= cache_queue
.reqno
++;
213 q
->request
= cache_flush
;
215 q
->address
= CPU_LOAD_ADDRESS (cpu
);
216 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
220 vliw
= CPU_VLIW (cpu
);
221 slot
= vliw
->next_slot
- 1;
222 q
->slot
= (*vliw
->current_vliw
)[slot
];
225 /* Queue a request to invalidate the cache. The request will be queued as
226 'inactive' and will be requested after the given number
227 of cycles have passed from the point the request is activated. */
229 request_cache_invalidate (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
231 CACHE_QUEUE_ELEMENT
*q
;
235 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
236 abort (); /* TODO: Make the queue dynamic */
238 q
= & cache_queue
.q
[cache_queue
.ix
];
241 q
->reqno
= cache_queue
.reqno
++;
242 q
->request
= cache_invalidate
;
244 q
->address
= CPU_LOAD_ADDRESS (cpu
);
245 q
->all
= CPU_PROFILE_STATE (cpu
)->all_cache_entries
;
249 vliw
= CPU_VLIW (cpu
);
250 slot
= vliw
->next_slot
- 1;
251 q
->slot
= (*vliw
->current_vliw
)[slot
];
254 /* Queue a request to preload the cache. The request will be queued as
255 'inactive' and will be requested after the given number
256 of cycles have passed from the point the request is activated. */
258 request_cache_preload (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
260 CACHE_QUEUE_ELEMENT
*q
;
264 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
265 abort (); /* TODO: Make the queue dynamic */
267 q
= & cache_queue
.q
[cache_queue
.ix
];
270 q
->reqno
= cache_queue
.reqno
++;
271 q
->request
= cache_preload
;
273 q
->address
= CPU_LOAD_ADDRESS (cpu
);
274 q
->length
= CPU_LOAD_LENGTH (cpu
);
275 q
->lock
= CPU_LOAD_LOCK (cpu
);
279 vliw
= CPU_VLIW (cpu
);
280 slot
= vliw
->next_slot
- 1;
281 q
->slot
= (*vliw
->current_vliw
)[slot
];
283 CPU_LOAD_LENGTH (cpu
) = 0;
286 /* Queue a request to unlock the cache. The request will be queued as
287 'inactive' and will be requested after the given number
288 of cycles have passed from the point the request is activated. */
290 request_cache_unlock (SIM_CPU
*cpu
, FRV_CACHE
*cache
, int cycles
)
292 CACHE_QUEUE_ELEMENT
*q
;
296 if (cache_queue
.ix
>= CACHE_QUEUE_SIZE
)
297 abort (); /* TODO: Make the queue dynamic */
299 q
= & cache_queue
.q
[cache_queue
.ix
];
302 q
->reqno
= cache_queue
.reqno
++;
303 q
->request
= cache_unlock
;
305 q
->address
= CPU_LOAD_ADDRESS (cpu
);
309 vliw
= CPU_VLIW (cpu
);
310 slot
= vliw
->next_slot
- 1;
311 q
->slot
= (*vliw
->current_vliw
)[slot
];
315 submit_cache_request (CACHE_QUEUE_ELEMENT
*q
)
320 frv_cache_request_load (q
->cache
, q
->reqno
, q
->address
, q
->slot
);
323 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
326 case cache_invalidate
:
327 frv_cache_request_invalidate (q
->cache
, q
->reqno
, q
->address
, q
->slot
,
331 frv_cache_request_preload (q
->cache
, q
->address
, q
->slot
,
335 frv_cache_request_unlock (q
->cache
, q
->address
, q
->slot
);
342 /* Activate all inactive load requests. */
344 activate_cache_requests (SIM_CPU
*cpu
)
347 for (i
= 0; i
< cache_queue
.ix
; ++i
)
349 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
353 /* Submit the request now if the cycle count is zero. */
355 submit_cache_request (q
);
360 /* Check to see if a load is pending which affects the given register(s).
363 load_pending_for_register (SIM_CPU
*cpu
, int regnum
, int words
, int regtype
)
366 for (i
= 0; i
< cache_queue
.ix
; ++i
)
368 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
370 /* Must be the same kind of register. */
371 if (! q
->active
|| q
->request
!= cache_load
|| q
->regtype
!= regtype
)
374 /* If the registers numbers are equal, then we have a match. */
375 if (q
->regnum
== regnum
)
376 return 1; /* load pending */
378 /* Check for overlap of a load with a multi-word register. */
379 if (regnum
< q
->regnum
)
381 if (regnum
+ words
> q
->regnum
)
384 /* Check for overlap of a multi-word load with the register. */
387 int data_words
= (q
->length
+ sizeof (SI
) - 1) / sizeof (SI
);
388 if (q
->regnum
+ data_words
> regnum
)
393 return 0; /* no load pending */
396 /* Check to see if a cache flush pending which affects the given address. */
398 flush_pending_for_address (SIM_CPU
*cpu
, SI address
)
400 int line_mask
= ~(CPU_DATA_CACHE (cpu
)->line_size
- 1);
402 for (i
= 0; i
< cache_queue
.ix
; ++i
)
404 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[i
];
406 /* Must be the same kind of request and active. */
407 if (! q
->active
|| q
->request
!= cache_flush
)
410 /* If the addresses are equal, then we have a match. */
411 if ((q
->address
& line_mask
) == (address
& line_mask
))
412 return 1; /* flush pending */
415 return 0; /* no flush pending */
419 remove_cache_queue_element (SIM_CPU
*cpu
, int i
)
421 /* If we are removing the load of a FR register, then remember which one(s).
423 CACHE_QUEUE_ELEMENT q
= cache_queue
.q
[i
];
425 for (--cache_queue
.ix
; i
< cache_queue
.ix
; ++i
)
426 cache_queue
.q
[i
] = cache_queue
.q
[i
+ 1];
428 /* If we removed a load of a FR register, check to see if any other loads
429 of that register is still queued. If not, then apply the queued post
430 processing time of that register to its latency. Also apply
431 1 extra cycle of latency to the register since it was a floating point
433 if (q
.request
== cache_load
&& q
.regtype
!= REGTYPE_NONE
)
435 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
436 int data_words
= (q
.length
+ sizeof (SI
) - 1) / sizeof (SI
);
438 for (j
= 0; j
< data_words
; ++j
)
440 int regnum
= q
.regnum
+ j
;
441 if (! load_pending_for_register (cpu
, regnum
, 1, q
.regtype
))
443 if (q
.regtype
== REGTYPE_FR
)
445 int *fr
= ps
->fr_busy
;
446 fr
[regnum
] += 1 + ps
->fr_ptime
[regnum
];
447 ps
->fr_ptime
[regnum
] = 0;
454 /* Copy data from the cache buffer to the target register(s). */
456 copy_load_data (SIM_CPU
*current_cpu
, FRV_CACHE
*cache
, int slot
,
457 CACHE_QUEUE_ELEMENT
*q
)
462 if (q
->regtype
== REGTYPE_FR
)
466 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
467 SET_H_FR (q
->regnum
, value
);
471 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
472 SET_H_FR (q
->regnum
, value
);
479 QI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, QI
, 1);
480 SET_H_GR (q
->regnum
, value
);
484 UQI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UQI
, 1);
485 SET_H_GR (q
->regnum
, value
);
490 if (q
->regtype
== REGTYPE_FR
)
494 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
495 SET_H_FR (q
->regnum
, value
);
499 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
500 SET_H_FR (q
->regnum
, value
);
507 HI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, HI
, 2);
508 SET_H_GR (q
->regnum
, value
);
512 UHI value
= CACHE_RETURN_DATA (cache
, slot
, q
->address
, UHI
, 2);
513 SET_H_GR (q
->regnum
, value
);
518 if (q
->regtype
== REGTYPE_FR
)
521 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SF
, 4));
526 CACHE_RETURN_DATA (cache
, slot
, q
->address
, SI
, 4));
530 if (q
->regtype
== REGTYPE_FR
)
532 SET_H_FR_DOUBLE (q
->regnum
,
533 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DF
, 8));
537 SET_H_GR_DOUBLE (q
->regnum
,
538 CACHE_RETURN_DATA (cache
, slot
, q
->address
, DI
, 8));
542 if (q
->regtype
== REGTYPE_FR
)
543 frvbf_h_fr_quad_set_handler (current_cpu
, q
->regnum
,
544 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
548 frvbf_h_gr_quad_set_handler (current_cpu
, q
->regnum
,
549 CACHE_RETURN_DATA_ADDRESS (cache
, slot
,
559 request_complete (SIM_CPU
*cpu
, CACHE_QUEUE_ELEMENT
*q
)
562 if (! q
->active
|| q
->cycles
> 0)
565 cache
= CPU_DATA_CACHE (cpu
);
569 /* For loads, we must wait until the data is returned from the cache. */
570 if (frv_cache_data_in_buffer (cache
, 0, q
->address
, q
->reqno
))
572 copy_load_data (cpu
, cache
, 0, q
);
575 if (frv_cache_data_in_buffer (cache
, 1, q
->address
, q
->reqno
))
577 copy_load_data (cpu
, cache
, 1, q
);
583 /* We must wait until the data is flushed. */
584 if (frv_cache_data_flushed (cache
, 0, q
->address
, q
->reqno
))
586 if (frv_cache_data_flushed (cache
, 1, q
->address
, q
->reqno
))
591 /* All other requests are complete once they've been made. */
598 /* Run the insn and data caches through the given number of cycles, taking
599 note of load requests which are fullfilled as a result. */
601 run_caches (SIM_CPU
*cpu
, int cycles
)
603 FRV_CACHE
* data_cache
= CPU_DATA_CACHE (cpu
);
604 FRV_CACHE
* insn_cache
= CPU_INSN_CACHE (cpu
);
606 /* For each cycle, run the caches, noting which requests have been fullfilled
607 and submitting new requests on their designated cycles. */
608 for (i
= 0; i
< cycles
; ++i
)
611 /* Run the caches through 1 cycle. */
612 frv_cache_run (data_cache
, 1);
613 frv_cache_run (insn_cache
, 1);
615 /* Note whether prefetched insn data has been loaded yet. */
616 for (j
= LS
; j
< FRV_CACHE_PIPELINES
; ++j
)
618 if (frv_insn_fetch_buffer
[j
].reqno
!= NO_REQNO
619 && frv_cache_data_in_buffer (insn_cache
, j
,
620 frv_insn_fetch_buffer
[j
].address
,
621 frv_insn_fetch_buffer
[j
].reqno
))
622 frv_insn_fetch_buffer
[j
].reqno
= NO_REQNO
;
625 /* Check to see which requests have been satisfied and which should
627 for (j
= 0; j
< cache_queue
.ix
; ++j
)
629 CACHE_QUEUE_ELEMENT
*q
= & cache_queue
.q
[j
];
633 /* If a load has been satisfied, complete the operation and remove it
635 if (request_complete (cpu
, q
))
637 remove_cache_queue_element (cpu
, j
);
642 /* Decrease the cycle count of each queued request.
643 Submit a request for each queued request whose cycle count has
647 submit_cache_request (q
);
653 apply_latency_adjustments (SIM_CPU
*cpu
)
655 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
657 /* update the latencies of the registers. */
658 int *fr
= ps
->fr_busy
;
659 int *acc
= ps
->acc_busy
;
660 for (i
= 0; i
< 64; ++i
)
662 if (ps
->fr_busy_adjust
[i
] > 0)
663 *fr
-= ps
->fr_busy_adjust
[i
]; /* OK if it goes negative. */
664 if (ps
->acc_busy_adjust
[i
] > 0)
665 *acc
-= ps
->acc_busy_adjust
[i
]; /* OK if it goes negative. */
671 /* Account for the number of cycles which have just passed in the latency of
672 various system elements. Works for negative cycles too so that latency
673 can be extended in the case of insn fetch latency.
674 If negative or zero, then no adjustment is necessary. */
676 update_latencies (SIM_CPU
*cpu
, int cycles
)
678 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
680 /* update the latencies of the registers. */
687 int *gr
= ps
->gr_busy
;
688 int *fr
= ps
->fr_busy
;
689 int *acc
= ps
->acc_busy
;
691 /* This loop handles GR, FR and ACC registers. */
692 for (i
= 0; i
< 64; ++i
)
697 reset_gr_flags (cpu
, i
);
701 /* If the busy drops to 0, then mark the register as
705 int *fr_lat
= ps
->fr_latency
+ i
;
707 ps
->fr_busy_adjust
[i
] = 0;
708 /* Only clear flags if this register has no target latency. */
710 reset_fr_flags (cpu
, i
);
714 /* If the busy drops to 0, then mark the register as
718 int *acc_lat
= ps
->acc_latency
+ i
;
720 ps
->acc_busy_adjust
[i
] = 0;
721 /* Only clear flags if this register has no target latency. */
723 reset_acc_flags (cpu
, i
);
731 /* This loop handles CCR registers. */
733 for (i
= 0; i
< 8; ++i
)
738 reset_cc_flags (cpu
, i
);
744 /* This loop handles SPR registers. */
746 for (i
= 0; i
< 4096; ++i
)
754 /* This loop handles resources. */
755 idiv
= ps
->idiv_busy
;
756 fdiv
= ps
->fdiv_busy
;
757 fsqrt
= ps
->fsqrt_busy
;
758 for (i
= 0; i
< 2; ++i
)
760 *idiv
= (*idiv
<= cycles
) ? 0 : (*idiv
- cycles
);
761 *fdiv
= (*fdiv
<= cycles
) ? 0 : (*fdiv
- cycles
);
762 *fsqrt
= (*fsqrt
<= cycles
) ? 0 : (*fsqrt
- cycles
);
767 /* Float and media units can occur in 4 slots on some machines. */
768 flt
= ps
->float_busy
;
769 media
= ps
->media_busy
;
770 for (i
= 0; i
< 4; ++i
)
772 *flt
= (*flt
<= cycles
) ? 0 : (*flt
- cycles
);
773 *media
= (*media
<= cycles
) ? 0 : (*media
- cycles
);
779 /* Print information about the wait for the given number of cycles. */
781 frv_model_trace_wait_cycles (SIM_CPU
*cpu
, int cycles
, const char *hazard_name
)
783 if (TRACE_INSN_P (cpu
) && cycles
> 0)
785 SIM_DESC sd
= CPU_STATE (cpu
);
786 trace_printf (sd
, cpu
, "**** %s wait %d cycles ***\n",
787 hazard_name
, cycles
);
792 trace_vliw_wait_cycles (SIM_CPU
*cpu
)
794 if (TRACE_INSN_P (cpu
))
796 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
797 frv_model_trace_wait_cycles (cpu
, ps
->vliw_wait
, hazard_name
);
801 /* Wait for the given number of cycles. */
803 frv_model_advance_cycles (SIM_CPU
*cpu
, int cycles
)
805 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
806 update_latencies (cpu
, cycles
);
807 run_caches (cpu
, cycles
);
808 PROFILE_MODEL_TOTAL_CYCLES (p
) += cycles
;
812 handle_resource_wait (SIM_CPU
*cpu
)
814 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
815 if (ps
->vliw_wait
!= 0)
816 frv_model_advance_cycles (cpu
, ps
->vliw_wait
);
817 if (ps
->vliw_load_stall
> ps
->vliw_wait
)
818 ps
->vliw_load_stall
-= ps
->vliw_wait
;
820 ps
->vliw_load_stall
= 0;
823 /* Account for the number of cycles until these resources will be available
826 update_target_latencies (SIM_CPU
*cpu
)
828 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
830 /* update the latencies of the registers. */
832 int *gr_lat
= ps
->gr_latency
;
833 int *fr_lat
= ps
->fr_latency
;
834 int *acc_lat
= ps
->acc_latency
;
837 int *gr
= ps
->gr_busy
;
838 int *fr
= ps
->fr_busy
;
839 int *acc
= ps
->acc_busy
;
841 /* This loop handles GR, FR and ACC registers. */
842 for (i
= 0; i
< 64; ++i
)
863 /* This loop handles CCR registers. */
865 ccr_lat
= ps
->ccr_latency
;
866 for (i
= 0; i
< 8; ++i
)
875 /* This loop handles SPR registers. */
877 spr_lat
= ps
->spr_latency
;
878 for (i
= 0; i
< 4096; ++i
)
889 /* Run the caches until all pending cache flushes are complete. */
891 wait_for_flush (SIM_CPU
*cpu
)
893 SI address
= CPU_LOAD_ADDRESS (cpu
);
895 while (flush_pending_for_address (cpu
, address
))
897 frv_model_advance_cycles (cpu
, 1);
900 if (TRACE_INSN_P (cpu
) && wait
)
902 sprintf (hazard_name
, "Data cache flush address %p:", address
);
903 frv_model_trace_wait_cycles (cpu
, wait
, hazard_name
);
907 /* Initialize cycle counting for an insn.
908 FIRST_P is non-zero if this is the first insn in a set of parallel
911 frvbf_model_insn_before (SIM_CPU
*cpu
, int first_p
)
913 SIM_DESC sd
= CPU_STATE (cpu
);
914 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
918 memset (ps
->fr_busy_adjust
, 0, sizeof (ps
->fr_busy_adjust
));
919 memset (ps
->acc_busy_adjust
, 0, sizeof (ps
->acc_busy_adjust
));
925 ps
->vliw_branch_taken
= 0;
926 ps
->vliw_load_stall
= 0;
929 switch (STATE_ARCHITECTURE (sd
)->mach
)
933 fr400_model_insn_before (cpu
, first_p
);
936 fr500_model_insn_before (cpu
, first_p
);
939 fr550_model_insn_before (cpu
, first_p
);
946 wait_for_flush (cpu
);
949 /* Record the cycles computed for an insn.
950 LAST_P is non-zero if this is the last insn in a set of parallel insns,
951 and we update the total cycle count.
952 CYCLES is the cycle count of the insn. */
955 frvbf_model_insn_after (SIM_CPU
*cpu
, int last_p
, int cycles
)
957 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
958 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
959 SIM_DESC sd
= CPU_STATE (cpu
);
961 PROFILE_MODEL_CUR_INSN_CYCLES (p
) = cycles
;
963 /* The number of cycles for a VLIW insn is the maximum number of cycles
964 used by any individual insn within it. */
965 if (cycles
> ps
->vliw_cycles
)
966 ps
->vliw_cycles
= cycles
;
970 /* This is the last insn in a VLIW insn. */
971 struct frv_interrupt_timer
*timer
= & frv_interrupt_state
.timer
;
973 activate_cache_requests (cpu
); /* before advancing cycles. */
974 apply_latency_adjustments (cpu
); /* must go first. */
975 update_target_latencies (cpu
); /* must go next. */
976 frv_model_advance_cycles (cpu
, ps
->vliw_cycles
);
978 PROFILE_MODEL_LOAD_STALL_CYCLES (p
) += ps
->vliw_load_stall
;
980 /* Check the interrupt timer. cycles contains the total cycle count. */
983 cycles
= PROFILE_MODEL_TOTAL_CYCLES (p
);
984 if (timer
->current
% timer
->value
985 + (cycles
- timer
->current
) >= timer
->value
)
986 frv_queue_external_interrupt (cpu
, timer
->interrupt
);
987 timer
->current
= cycles
;
990 ps
->past_first_p
= 0; /* Next one will be the first in a new VLIW. */
991 ps
->branch_address
= -1;
994 ps
->past_first_p
= 1;
996 switch (STATE_ARCHITECTURE (sd
)->mach
)
1000 fr400_model_insn_after (cpu
, last_p
, cycles
);
1002 case bfd_mach_fr500
:
1003 fr500_model_insn_after (cpu
, last_p
, cycles
);
1005 case bfd_mach_fr550
:
1006 fr550_model_insn_after (cpu
, last_p
, cycles
);
1014 frvbf_model_branch (SIM_CPU
*current_cpu
, PCADDR target
, int hint
)
1016 /* Record the hint and branch address for use in profiling. */
1017 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (current_cpu
);
1018 ps
->branch_hint
= hint
;
1019 ps
->branch_address
= target
;
1022 /* Top up the latency of the given GR by the given number of cycles. */
1024 update_GR_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1028 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1029 int *gr
= ps
->gr_latency
;
1030 if (gr
[out_GR
] < cycles
)
1031 gr
[out_GR
] = cycles
;
1036 decrease_GR_busy (SIM_CPU
*cpu
, INT in_GR
, int cycles
)
1040 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1041 int *gr
= ps
->gr_busy
;
1042 gr
[in_GR
] -= cycles
;
1046 /* Top up the latency of the given double GR by the number of cycles. */
1048 update_GRdouble_latency (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1052 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1053 int *gr
= ps
->gr_latency
;
1054 if (gr
[out_GR
] < cycles
)
1055 gr
[out_GR
] = cycles
;
1056 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1057 gr
[out_GR
+ 1] = cycles
;
1062 update_GR_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1066 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1067 int *gr
= ps
->gr_latency
;
1069 /* The latency of the GR will be at least the number of cycles used
1071 if (gr
[out_GR
] < cycles
)
1072 gr
[out_GR
] = cycles
;
1074 /* The latency will also depend on how long it takes to retrieve the
1075 data from the cache or memory. Assume that the load is issued
1076 after the last cycle of the insn. */
1077 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1082 update_GRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1086 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1087 int *gr
= ps
->gr_latency
;
1089 /* The latency of the GR will be at least the number of cycles used
1091 if (gr
[out_GR
] < cycles
)
1092 gr
[out_GR
] = cycles
;
1093 if (out_GR
< 63 && gr
[out_GR
+ 1] < cycles
)
1094 gr
[out_GR
+ 1] = cycles
;
1096 /* The latency will also depend on how long it takes to retrieve the
1097 data from the cache or memory. Assume that the load is issued
1098 after the last cycle of the insn. */
1099 request_cache_load (cpu
, out_GR
, REGTYPE_NONE
, cycles
);
1104 update_GR_latency_for_swap (SIM_CPU
*cpu
, INT out_GR
, int cycles
)
1106 update_GR_latency_for_load (cpu
, out_GR
, cycles
);
1109 /* Top up the latency of the given FR by the given number of cycles. */
1111 update_FR_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1115 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1116 int *fr
= ps
->fr_latency
;
1117 if (fr
[out_FR
] < cycles
)
1118 fr
[out_FR
] = cycles
;
1122 /* Top up the latency of the given double FR by the number of cycles. */
1124 update_FRdouble_latency (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1128 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1129 int *fr
= ps
->fr_latency
;
1130 if (fr
[out_FR
] < cycles
)
1131 fr
[out_FR
] = cycles
;
1132 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1133 fr
[out_FR
+ 1] = cycles
;
1138 update_FR_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1142 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1143 int *fr
= ps
->fr_latency
;
1145 /* The latency of the FR will be at least the number of cycles used
1147 if (fr
[out_FR
] < cycles
)
1148 fr
[out_FR
] = cycles
;
1150 /* The latency will also depend on how long it takes to retrieve the
1151 data from the cache or memory. Assume that the load is issued
1152 after the last cycle of the insn. */
1153 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1158 update_FRdouble_latency_for_load (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1162 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1163 int *fr
= ps
->fr_latency
;
1165 /* The latency of the FR will be at least the number of cycles used
1167 if (fr
[out_FR
] < cycles
)
1168 fr
[out_FR
] = cycles
;
1169 if (out_FR
< 63 && fr
[out_FR
+ 1] < cycles
)
1170 fr
[out_FR
+ 1] = cycles
;
1172 /* The latency will also depend on how long it takes to retrieve the
1173 data from the cache or memory. Assume that the load is issued
1174 after the last cycle of the insn. */
1175 request_cache_load (cpu
, out_FR
, REGTYPE_FR
, cycles
);
1179 /* Top up the post-processing time of the given FR by the given number of
1182 update_FR_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1186 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1187 /* If a load is pending on this register, then add the cycles to
1188 the post processing time for this register. Otherwise apply it
1189 directly to the latency of the register. */
1190 if (! load_pending_for_register (cpu
, out_FR
, 1, REGTYPE_FR
))
1192 int *fr
= ps
->fr_latency
;
1193 fr
[out_FR
] += cycles
;
1196 ps
->fr_ptime
[out_FR
] += cycles
;
1201 update_FRdouble_ptime (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1205 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1206 /* If a load is pending on this register, then add the cycles to
1207 the post processing time for this register. Otherwise apply it
1208 directly to the latency of the register. */
1209 if (! load_pending_for_register (cpu
, out_FR
, 2, REGTYPE_FR
))
1211 int *fr
= ps
->fr_latency
;
1212 fr
[out_FR
] += cycles
;
1214 fr
[out_FR
+ 1] += cycles
;
1218 ps
->fr_ptime
[out_FR
] += cycles
;
1220 ps
->fr_ptime
[out_FR
+ 1] += cycles
;
1225 /* Top up the post-processing time of the given ACC by the given number of
1228 update_ACC_ptime (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1232 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1233 /* No load can be pending on this register. Apply the cycles
1234 directly to the latency of the register. */
1235 int *acc
= ps
->acc_latency
;
1236 acc
[out_ACC
] += cycles
;
1240 /* Top up the post-processing time of the given SPR by the given number of
1243 update_SPR_ptime (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1247 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1248 /* No load can be pending on this register. Apply the cycles
1249 directly to the latency of the register. */
1250 int *spr
= ps
->spr_latency
;
1251 spr
[out_SPR
] += cycles
;
1256 decrease_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1260 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1261 int *acc
= ps
->acc_busy
;
1262 acc
[out_ACC
] -= cycles
;
1263 if (ps
->acc_busy_adjust
[out_ACC
] >= 0
1264 && cycles
> ps
->acc_busy_adjust
[out_ACC
])
1265 ps
->acc_busy_adjust
[out_ACC
] = cycles
;
1270 increase_ACC_busy (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1274 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1275 int *acc
= ps
->acc_busy
;
1276 acc
[out_ACC
] += cycles
;
1281 enforce_full_acc_latency (SIM_CPU
*cpu
, INT in_ACC
)
1283 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1284 ps
->acc_busy_adjust
[in_ACC
] = -1;
1288 decrease_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1292 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1293 int *fr
= ps
->fr_busy
;
1294 fr
[out_FR
] -= cycles
;
1295 if (ps
->fr_busy_adjust
[out_FR
] >= 0
1296 && cycles
> ps
->fr_busy_adjust
[out_FR
])
1297 ps
->fr_busy_adjust
[out_FR
] = cycles
;
1302 increase_FR_busy (SIM_CPU
*cpu
, INT out_FR
, int cycles
)
1306 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1307 int *fr
= ps
->fr_busy
;
1308 fr
[out_FR
] += cycles
;
1312 /* Top up the latency of the given ACC by the given number of cycles. */
1314 update_ACC_latency (SIM_CPU
*cpu
, INT out_ACC
, int cycles
)
1318 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1319 int *acc
= ps
->acc_latency
;
1320 if (acc
[out_ACC
] < cycles
)
1321 acc
[out_ACC
] = cycles
;
1325 /* Top up the latency of the given CCR by the given number of cycles. */
1327 update_CCR_latency (SIM_CPU
*cpu
, INT out_CCR
, int cycles
)
1331 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1332 int *ccr
= ps
->ccr_latency
;
1333 if (ccr
[out_CCR
] < cycles
)
1334 ccr
[out_CCR
] = cycles
;
1338 /* Top up the latency of the given SPR by the given number of cycles. */
1340 update_SPR_latency (SIM_CPU
*cpu
, INT out_SPR
, int cycles
)
1344 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1345 int *spr
= ps
->spr_latency
;
1346 if (spr
[out_SPR
] < cycles
)
1347 spr
[out_SPR
] = cycles
;
1351 /* Top up the latency of the given integer division resource by the given
1352 number of cycles. */
1354 update_idiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1356 /* operate directly on the busy cycles since each resource can only
1357 be used once in a VLIW insn. */
1358 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1359 int *r
= ps
->idiv_busy
;
1360 r
[in_resource
] = cycles
;
1363 /* Set the latency of the given resource to the given number of cycles. */
1365 update_fdiv_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1367 /* operate directly on the busy cycles since each resource can only
1368 be used once in a VLIW insn. */
1369 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1370 int *r
= ps
->fdiv_busy
;
1371 r
[in_resource
] = cycles
;
1374 /* Set the latency of the given resource to the given number of cycles. */
1376 update_fsqrt_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1378 /* operate directly on the busy cycles since each resource can only
1379 be used once in a VLIW insn. */
1380 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1381 int *r
= ps
->fsqrt_busy
;
1382 r
[in_resource
] = cycles
;
1385 /* Set the latency of the given resource to the given number of cycles. */
1387 update_float_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1389 /* operate directly on the busy cycles since each resource can only
1390 be used once in a VLIW insn. */
1391 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1392 int *r
= ps
->float_busy
;
1393 r
[in_resource
] = cycles
;
1397 update_media_resource_latency (SIM_CPU
*cpu
, INT in_resource
, int cycles
)
1399 /* operate directly on the busy cycles since each resource can only
1400 be used once in a VLIW insn. */
1401 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1402 int *r
= ps
->media_busy
;
1403 r
[in_resource
] = cycles
;
1406 /* Set the branch penalty to the given number of cycles. */
1408 update_branch_penalty (SIM_CPU
*cpu
, int cycles
)
1410 /* operate directly on the busy cycles since only one branch can occur
1412 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1413 ps
->branch_penalty
= cycles
;
1416 /* Check the availability of the given GR register and update the number
1417 of cycles the current VLIW insn must wait until it is available. */
1419 vliw_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1421 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1422 int *gr
= ps
->gr_busy
;
1423 /* If the latency of the register is greater than the current wait
1424 then update the current wait. */
1425 if (in_GR
>= 0 && gr
[in_GR
] > ps
->vliw_wait
)
1427 if (TRACE_INSN_P (cpu
))
1428 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1429 ps
->vliw_wait
= gr
[in_GR
];
1433 /* Check the availability of the given GR register and update the number
1434 of cycles the current VLIW insn must wait until it is available. */
1436 vliw_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1438 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1439 int *gr
= ps
->gr_busy
;
1440 /* If the latency of the register is greater than the current wait
1441 then update the current wait. */
1444 if (gr
[in_GR
] > ps
->vliw_wait
)
1446 if (TRACE_INSN_P (cpu
))
1447 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1448 ps
->vliw_wait
= gr
[in_GR
];
1450 if (in_GR
< 63 && gr
[in_GR
+ 1] > ps
->vliw_wait
)
1452 if (TRACE_INSN_P (cpu
))
1453 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
+ 1);
1454 ps
->vliw_wait
= gr
[in_GR
+ 1];
1459 /* Check the availability of the given FR register and update the number
1460 of cycles the current VLIW insn must wait until it is available. */
1462 vliw_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1464 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1465 int *fr
= ps
->fr_busy
;
1466 /* If the latency of the register is greater than the current wait
1467 then update the current wait. */
1468 if (in_FR
>= 0 && fr
[in_FR
] > ps
->vliw_wait
)
1470 if (TRACE_INSN_P (cpu
))
1471 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1472 ps
->vliw_wait
= fr
[in_FR
];
1476 /* Check the availability of the given GR register and update the number
1477 of cycles the current VLIW insn must wait until it is available. */
1479 vliw_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1481 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1482 int *fr
= ps
->fr_busy
;
1483 /* If the latency of the register is greater than the current wait
1484 then update the current wait. */
1487 if (fr
[in_FR
] > ps
->vliw_wait
)
1489 if (TRACE_INSN_P (cpu
))
1490 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1491 ps
->vliw_wait
= fr
[in_FR
];
1493 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->vliw_wait
)
1495 if (TRACE_INSN_P (cpu
))
1496 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1497 ps
->vliw_wait
= fr
[in_FR
+ 1];
1502 /* Check the availability of the given CCR register and update the number
1503 of cycles the current VLIW insn must wait until it is available. */
1505 vliw_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1507 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1508 int *ccr
= ps
->ccr_busy
;
1509 /* If the latency of the register is greater than the current wait
1510 then update the current wait. */
1511 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->vliw_wait
)
1513 if (TRACE_INSN_P (cpu
))
1516 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
-4);
1518 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1520 ps
->vliw_wait
= ccr
[in_CCR
];
1524 /* Check the availability of the given ACC register and update the number
1525 of cycles the current VLIW insn must wait until it is available. */
1527 vliw_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1529 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1530 int *acc
= ps
->acc_busy
;
1531 /* If the latency of the register is greater than the current wait
1532 then update the current wait. */
1533 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->vliw_wait
)
1535 if (TRACE_INSN_P (cpu
))
1536 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1537 ps
->vliw_wait
= acc
[in_ACC
];
1541 /* Check the availability of the given SPR register and update the number
1542 of cycles the current VLIW insn must wait until it is available. */
1544 vliw_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1546 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1547 int *spr
= ps
->spr_busy
;
1548 /* If the latency of the register is greater than the current wait
1549 then update the current wait. */
1550 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->vliw_wait
)
1552 if (TRACE_INSN_P (cpu
))
1553 sprintf (hazard_name
, "Data hazard for spr %d:", in_SPR
);
1554 ps
->vliw_wait
= spr
[in_SPR
];
1558 /* Check the availability of the given integer division resource and update
1559 the number of cycles the current VLIW insn must wait until it is available.
1562 vliw_wait_for_idiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1564 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1565 int *r
= ps
->idiv_busy
;
1566 /* If the latency of the resource is greater than the current wait
1567 then update the current wait. */
1568 if (r
[in_resource
] > ps
->vliw_wait
)
1570 if (TRACE_INSN_P (cpu
))
1572 sprintf (hazard_name
, "Resource hazard for integer division in slot I%d:", in_resource
);
1574 ps
->vliw_wait
= r
[in_resource
];
1578 /* Check the availability of the given float division resource and update
1579 the number of cycles the current VLIW insn must wait until it is available.
1582 vliw_wait_for_fdiv_resource (SIM_CPU
*cpu
, INT in_resource
)
1584 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1585 int *r
= ps
->fdiv_busy
;
1586 /* If the latency of the resource is greater than the current wait
1587 then update the current wait. */
1588 if (r
[in_resource
] > ps
->vliw_wait
)
1590 if (TRACE_INSN_P (cpu
))
1592 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", in_resource
);
1594 ps
->vliw_wait
= r
[in_resource
];
1598 /* Check the availability of the given float square root resource and update
1599 the number of cycles the current VLIW insn must wait until it is available.
1602 vliw_wait_for_fsqrt_resource (SIM_CPU
*cpu
, INT in_resource
)
1604 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1605 int *r
= ps
->fsqrt_busy
;
1606 /* If the latency of the resource is greater than the current wait
1607 then update the current wait. */
1608 if (r
[in_resource
] > ps
->vliw_wait
)
1610 if (TRACE_INSN_P (cpu
))
1612 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", in_resource
);
1614 ps
->vliw_wait
= r
[in_resource
];
1618 /* Check the availability of the given float unit resource and update
1619 the number of cycles the current VLIW insn must wait until it is available.
1622 vliw_wait_for_float_resource (SIM_CPU
*cpu
, INT in_resource
)
1624 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1625 int *r
= ps
->float_busy
;
1626 /* If the latency of the resource is greater than the current wait
1627 then update the current wait. */
1628 if (r
[in_resource
] > ps
->vliw_wait
)
1630 if (TRACE_INSN_P (cpu
))
1632 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", in_resource
);
1634 ps
->vliw_wait
= r
[in_resource
];
1638 /* Check the availability of the given media unit resource and update
1639 the number of cycles the current VLIW insn must wait until it is available.
1642 vliw_wait_for_media_resource (SIM_CPU
*cpu
, INT in_resource
)
1644 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1645 int *r
= ps
->media_busy
;
1646 /* If the latency of the resource is greater than the current wait
1647 then update the current wait. */
1648 if (r
[in_resource
] > ps
->vliw_wait
)
1650 if (TRACE_INSN_P (cpu
))
1652 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", in_resource
);
1654 ps
->vliw_wait
= r
[in_resource
];
1658 /* Run the caches until all requests for the given register(s) are satisfied. */
1660 load_wait_for_GR (SIM_CPU
*cpu
, INT in_GR
)
1665 while (load_pending_for_register (cpu
, in_GR
, 1/*words*/, REGTYPE_NONE
))
1667 frv_model_advance_cycles (cpu
, 1);
1672 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1673 ps
->vliw_wait
+= wait
;
1674 ps
->vliw_load_stall
+= wait
;
1675 if (TRACE_INSN_P (cpu
))
1676 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1682 load_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1686 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1689 while (load_pending_for_register (cpu
, in_FR
, 1/*words*/, REGTYPE_FR
))
1691 frv_model_advance_cycles (cpu
, 1);
1694 /* Post processing time may have been added to the register's
1695 latency after the loads were processed. Account for that too.
1701 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1703 /* Update the vliw_wait with the number of cycles we waited for the
1704 load and any post-processing. */
1707 ps
->vliw_wait
+= wait
;
1708 ps
->vliw_load_stall
+= wait
;
1709 if (TRACE_INSN_P (cpu
))
1710 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1716 load_wait_for_GRdouble (SIM_CPU
*cpu
, INT in_GR
)
1721 while (load_pending_for_register (cpu
, in_GR
, 2/*words*/, REGTYPE_NONE
))
1723 frv_model_advance_cycles (cpu
, 1);
1728 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1729 ps
->vliw_wait
+= wait
;
1730 ps
->vliw_load_stall
+= wait
;
1731 if (TRACE_INSN_P (cpu
))
1732 sprintf (hazard_name
, "Data hazard for gr%d:", in_GR
);
1738 load_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1742 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1745 while (load_pending_for_register (cpu
, in_FR
, 2/*words*/, REGTYPE_FR
))
1747 frv_model_advance_cycles (cpu
, 1);
1750 /* Post processing time may have been added to the registers'
1751 latencies after the loads were processed. Account for that too.
1757 frv_model_advance_cycles (cpu
, fr
[in_FR
]);
1763 wait
+= fr
[in_FR
+ 1];
1764 frv_model_advance_cycles (cpu
, fr
[in_FR
+ 1]);
1767 /* Update the vliw_wait with the number of cycles we waited for the
1768 load and any post-processing. */
1771 ps
->vliw_wait
+= wait
;
1772 ps
->vliw_load_stall
+= wait
;
1773 if (TRACE_INSN_P (cpu
))
1774 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1780 enforce_full_fr_latency (SIM_CPU
*cpu
, INT in_FR
)
1782 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1783 ps
->fr_busy_adjust
[in_FR
] = -1;
1786 /* Calculate how long the post processing for a floating point insn must
1787 wait for resources to become available. */
1789 post_wait_for_FR (SIM_CPU
*cpu
, INT in_FR
)
1791 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1792 int *fr
= ps
->fr_busy
;
1794 if (in_FR
>= 0 && fr
[in_FR
] > ps
->post_wait
)
1796 ps
->post_wait
= fr
[in_FR
];
1797 if (TRACE_INSN_P (cpu
))
1798 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1802 /* Calculate how long the post processing for a floating point insn must
1803 wait for resources to become available. */
1805 post_wait_for_FRdouble (SIM_CPU
*cpu
, INT in_FR
)
1807 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1808 int *fr
= ps
->fr_busy
;
1812 if (fr
[in_FR
] > ps
->post_wait
)
1814 ps
->post_wait
= fr
[in_FR
];
1815 if (TRACE_INSN_P (cpu
))
1816 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
);
1818 if (in_FR
< 63 && fr
[in_FR
+ 1] > ps
->post_wait
)
1820 ps
->post_wait
= fr
[in_FR
+ 1];
1821 if (TRACE_INSN_P (cpu
))
1822 sprintf (hazard_name
, "Data hazard for fr%d:", in_FR
+ 1);
1828 post_wait_for_ACC (SIM_CPU
*cpu
, INT in_ACC
)
1830 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1831 int *acc
= ps
->acc_busy
;
1833 if (in_ACC
>= 0 && acc
[in_ACC
] > ps
->post_wait
)
1835 ps
->post_wait
= acc
[in_ACC
];
1836 if (TRACE_INSN_P (cpu
))
1837 sprintf (hazard_name
, "Data hazard for acc%d:", in_ACC
);
1842 post_wait_for_CCR (SIM_CPU
*cpu
, INT in_CCR
)
1844 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1845 int *ccr
= ps
->ccr_busy
;
1847 if (in_CCR
>= 0 && ccr
[in_CCR
] > ps
->post_wait
)
1849 ps
->post_wait
= ccr
[in_CCR
];
1850 if (TRACE_INSN_P (cpu
))
1853 sprintf (hazard_name
, "Data hazard for icc%d:", in_CCR
- 4);
1855 sprintf (hazard_name
, "Data hazard for fcc%d:", in_CCR
);
1861 post_wait_for_SPR (SIM_CPU
*cpu
, INT in_SPR
)
1863 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1864 int *spr
= ps
->spr_busy
;
1866 if (in_SPR
>= 0 && spr
[in_SPR
] > ps
->post_wait
)
1868 ps
->post_wait
= spr
[in_SPR
];
1869 if (TRACE_INSN_P (cpu
))
1870 sprintf (hazard_name
, "Data hazard for spr[%d]:", in_SPR
);
1875 post_wait_for_fdiv (SIM_CPU
*cpu
, INT slot
)
1877 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1878 int *fdiv
= ps
->fdiv_busy
;
1880 /* Multiple floating point divisions in the same slot need only wait 1
1882 if (fdiv
[slot
] > 0 && 1 > ps
->post_wait
)
1885 if (TRACE_INSN_P (cpu
))
1887 sprintf (hazard_name
, "Resource hazard for floating point division in slot F%d:", slot
);
1893 post_wait_for_fsqrt (SIM_CPU
*cpu
, INT slot
)
1895 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1896 int *fsqrt
= ps
->fsqrt_busy
;
1898 /* Multiple floating point square roots in the same slot need only wait 1
1900 if (fsqrt
[slot
] > 0 && 1 > ps
->post_wait
)
1903 if (TRACE_INSN_P (cpu
))
1905 sprintf (hazard_name
, "Resource hazard for square root in slot F%d:", slot
);
1911 post_wait_for_float (SIM_CPU
*cpu
, INT slot
)
1913 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1914 int *flt
= ps
->float_busy
;
1916 /* Multiple floating point square roots in the same slot need only wait 1
1918 if (flt
[slot
] > ps
->post_wait
)
1920 ps
->post_wait
= flt
[slot
];
1921 if (TRACE_INSN_P (cpu
))
1923 sprintf (hazard_name
, "Resource hazard for floating point unit in slot F%d:", slot
);
1929 post_wait_for_media (SIM_CPU
*cpu
, INT slot
)
1931 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1932 int *media
= ps
->media_busy
;
1934 /* Multiple floating point square roots in the same slot need only wait 1
1936 if (media
[slot
] > ps
->post_wait
)
1938 ps
->post_wait
= media
[slot
];
1939 if (TRACE_INSN_P (cpu
))
1941 sprintf (hazard_name
, "Resource hazard for media unit in slot M%d:", slot
);
1946 /* Print cpu-specific profile information. */
1947 #define COMMAS(n) sim_add_commas (comma_buf, sizeof (comma_buf), (n))
1950 print_cache (SIM_CPU
*cpu
, FRV_CACHE
*cache
, const char *cache_name
)
1952 SIM_DESC sd
= CPU_STATE (cpu
);
1959 sim_io_printf (sd
, " %s Cache\n\n", cache_name
);
1960 accesses
= cache
->statistics
.accesses
;
1961 sim_io_printf (sd
, " Total accesses: %s\n", COMMAS (accesses
));
1965 unsigned hits
= cache
->statistics
.hits
;
1966 sim_io_printf (sd
, " Hits: %s\n", COMMAS (hits
));
1967 rate
= (float)hits
/ accesses
;
1968 sim_io_printf (sd
, " Hit rate: %.2f%%\n", rate
* 100);
1972 sim_io_printf (sd
, " Model %s has no %s cache\n",
1973 MODEL_NAME (CPU_MODEL (cpu
)), cache_name
);
1975 sim_io_printf (sd
, "\n");
1978 /* This table must correspond to the UNIT_ATTR table in
1979 opcodes/frv-desc.h. Only the units up to UNIT_C need be
1980 listed since the others cannot occur after mapping. */
1985 "I0", "I1", "I01", "I2", "I3", "IALL",
1986 "FM0", "FM1", "FM01", "FM2", "FM3", "FMALL", "FMLOW",
1992 print_parallel (SIM_CPU
*cpu
, int verbose
)
1994 SIM_DESC sd
= CPU_STATE (cpu
);
1995 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
1996 FRV_PROFILE_STATE
*ps
= CPU_PROFILE_STATE (cpu
);
1997 unsigned total
, vliw
;
2001 sim_io_printf (sd
, "Model %s Parallelization\n\n",
2002 MODEL_NAME (CPU_MODEL (cpu
)));
2004 total
= PROFILE_TOTAL_INSN_COUNT (p
);
2005 sim_io_printf (sd
, " Total instructions: %s\n", COMMAS (total
));
2006 vliw
= ps
->vliw_insns
;
2007 sim_io_printf (sd
, " VLIW instructions: %s\n", COMMAS (vliw
));
2008 average
= (float)total
/ vliw
;
2009 sim_io_printf (sd
, " Average VLIW length: %.2f\n", average
);
2010 average
= (float)PROFILE_MODEL_TOTAL_CYCLES (p
) / vliw
;
2011 sim_io_printf (sd
, " Cycles per VLIW instruction: %.2f\n", average
);
2012 average
= (float)total
/ PROFILE_MODEL_TOTAL_CYCLES (p
);
2013 sim_io_printf (sd
, " Instructions per cycle: %.2f\n", average
);
2019 int max_name_len
= 0;
2020 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2022 if (INSNS_IN_SLOT (i
))
2025 if (INSNS_IN_SLOT (i
) > max_val
)
2026 max_val
= INSNS_IN_SLOT (i
);
2027 len
= strlen (slot_names
[i
]);
2028 if (len
> max_name_len
)
2034 sim_io_printf (sd
, "\n");
2035 sim_io_printf (sd
, " Instructions per slot:\n");
2036 sim_io_printf (sd
, "\n");
2037 for (i
= UNIT_NIL
+ 1; i
< UNIT_NUM_UNITS
; ++i
)
2039 if (INSNS_IN_SLOT (i
) != 0)
2041 sim_io_printf (sd
, " %*s: %*s: ",
2042 max_name_len
, slot_names
[i
],
2043 max_val
< 10000 ? 5 : 10,
2044 COMMAS (INSNS_IN_SLOT (i
)));
2045 sim_profile_print_bar (sd
, PROFILE_HISTOGRAM_WIDTH
,
2048 sim_io_printf (sd
, "\n");
2051 } /* details to print */
2054 sim_io_printf (sd
, "\n");
2058 frv_profile_info (SIM_CPU
*cpu
, int verbose
)
2060 /* FIXME: Need to add smp support. */
2061 PROFILE_DATA
*p
= CPU_PROFILE_DATA (cpu
);
2063 #if WITH_PROFILE_PARALLEL_P
2064 if (PROFILE_FLAGS (p
) [PROFILE_PARALLEL_IDX
])
2065 print_parallel (cpu
, verbose
);
2068 #if WITH_PROFILE_CACHE_P
2069 if (PROFILE_FLAGS (p
) [PROFILE_CACHE_IDX
])
2071 SIM_DESC sd
= CPU_STATE (cpu
);
2072 sim_io_printf (sd
, "Model %s Cache Statistics\n\n",
2073 MODEL_NAME (CPU_MODEL (cpu
)));
2074 print_cache (cpu
, CPU_INSN_CACHE (cpu
), "Instruction");
2075 print_cache (cpu
, CPU_DATA_CACHE (cpu
), "Data");
2077 #endif /* WITH_PROFILE_CACHE_P */
2080 /* A hack to get registers referenced for profiling. */
2081 SI
frv_ref_SI (SI ref
) {return ref
;}
2082 #endif /* WITH_PROFILE_MODEL_P */