1 /* This file is generated by the genmloop script. DO NOT EDIT! */
3 /* Enable switch() support in cgen headers. */
13 #include "sim-assert.h"
15 /* Fill in the administrative ARGBUF fields required by all insns,
19 sh64_media_fill_argbuf (const SIM_CPU
*cpu
, ARGBUF
*abuf
, const IDESC
*idesc
,
20 PCADDR pc
, int fast_p
)
23 SEM_SET_CODE (abuf
, idesc
, fast_p
);
24 ARGBUF_ADDR (abuf
) = pc
;
26 ARGBUF_IDESC (abuf
) = idesc
;
29 /* Fill in tracing/profiling fields of an ARGBUF. */
32 sh64_media_fill_argbuf_tp (const SIM_CPU
*cpu
, ARGBUF
*abuf
,
33 int trace_p
, int profile_p
)
35 ARGBUF_TRACE_P (abuf
) = trace_p
;
36 ARGBUF_PROFILE_P (abuf
) = profile_p
;
41 /* Emit the "x-before" handler.
42 x-before is emitted before each insn (serial or parallel).
43 This is as opposed to x-after which is only emitted at the end of a group
47 sh64_media_emit_before (SIM_CPU
*current_cpu
, SCACHE
*sc
, PCADDR pc
, int first_p
)
49 ARGBUF
*abuf
= &sc
[0].argbuf
;
50 const IDESC
*id
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_BEFORE
];
52 abuf
->fields
.before
.first_p
= first_p
;
53 sh64_media_fill_argbuf (current_cpu
, abuf
, id
, pc
, 0);
54 /* no need to set trace_p,profile_p */
57 /* Emit the "x-after" handler.
58 x-after is emitted after a serial insn or at the end of a group of
62 sh64_media_emit_after (SIM_CPU
*current_cpu
, SCACHE
*sc
, PCADDR pc
)
64 ARGBUF
*abuf
= &sc
[0].argbuf
;
65 const IDESC
*id
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_AFTER
];
67 sh64_media_fill_argbuf (current_cpu
, abuf
, id
, pc
, 0);
68 /* no need to set trace_p,profile_p */
71 #endif /* WITH_SCACHE_PBB */
74 static INLINE
const IDESC
*
75 extract (SIM_CPU
*current_cpu
, PCADDR pc
, CGEN_INSN_INT insn
, ARGBUF
*abuf
,
78 const IDESC
*id
= sh64_media_decode (current_cpu
, pc
, insn
, insn
, abuf
);
80 sh64_media_fill_argbuf (current_cpu
, abuf
, id
, pc
, fast_p
);
83 int trace_p
= PC_IN_TRACE_RANGE_P (current_cpu
, pc
);
84 int profile_p
= PC_IN_PROFILE_RANGE_P (current_cpu
, pc
);
85 sh64_media_fill_argbuf_tp (current_cpu
, abuf
, trace_p
, profile_p
);
91 execute (SIM_CPU
*current_cpu
, SCACHE
*sc
, int fast_p
)
97 #if ! WITH_SEM_SWITCH_FAST
99 vpc
= (*sc
->argbuf
.semantic
.sem_fast
) (current_cpu
, sc
);
101 vpc
= (*sc
->argbuf
.semantic
.sem_fast
) (current_cpu
, &sc
->argbuf
);
105 #endif /* WITH_SEM_SWITCH_FAST */
109 #if ! WITH_SEM_SWITCH_FULL
110 ARGBUF
*abuf
= &sc
->argbuf
;
111 const IDESC
*idesc
= abuf
->idesc
;
113 int virtual_p
= CGEN_ATTR_VALUE (NULL
, idesc
->attrs
, CGEN_INSN_VIRTUAL
);
120 /* FIXME: call x-before */
121 if (ARGBUF_PROFILE_P (abuf
))
122 PROFILE_COUNT_INSN (current_cpu
, abuf
->addr
, idesc
->num
);
123 /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
124 if (PROFILE_MODEL_P (current_cpu
)
125 && ARGBUF_PROFILE_P (abuf
))
126 sh64_media_model_insn_before (current_cpu
, 1 /*first_p*/);
127 CGEN_TRACE_INSN_INIT (current_cpu
, abuf
, 1);
128 CGEN_TRACE_INSN (current_cpu
, idesc
->idata
,
129 (const struct argbuf
*) abuf
, abuf
->addr
);
132 vpc
= (*sc
->argbuf
.semantic
.sem_full
) (current_cpu
, sc
);
134 vpc
= (*sc
->argbuf
.semantic
.sem_full
) (current_cpu
, abuf
);
138 /* FIXME: call x-after */
139 if (PROFILE_MODEL_P (current_cpu
)
140 && ARGBUF_PROFILE_P (abuf
))
144 cycles
= (*idesc
->timing
->model_fn
) (current_cpu
, sc
);
145 sh64_media_model_insn_after (current_cpu
, 1 /*last_p*/, cycles
);
147 CGEN_TRACE_INSN_FINI (current_cpu
, abuf
, 1);
151 #endif /* WITH_SEM_SWITCH_FULL */
158 /* Record address of cti terminating a pbb. */
159 #define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
160 /* Record number of [real] insns in pbb. */
161 #define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
163 /* Fetch and extract a pseudo-basic-block.
164 FAST_P is non-zero if no tracing/profiling/etc. is wanted. */
167 sh64_media_pbb_begin (SIM_CPU
*current_cpu
, int FAST_P
)
172 int max_insns
= CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu
);
176 new_vpc
= scache_lookup_or_alloc (current_cpu
, pc
, max_insns
, &sc
);
179 /* Leading '_' to avoid collision with mainloop.in. */
181 SCACHE
*orig_sc
= sc
;
182 SCACHE
*_cti_sc
= NULL
;
183 int slice_insns
= CPU_MAX_SLICE_INSNS (current_cpu
);
185 /* First figure out how many instructions to compile.
186 MAX_INSNS is the size of the allocated buffer, which includes space
187 for before/after handlers if they're being used.
188 SLICE_INSNS is the maxinum number of real insns that can be
189 executed. Zero means "as many as we want". */
190 /* ??? max_insns is serving two incompatible roles.
191 1) Number of slots available in scache buffer.
192 2) Number of real insns to execute.
193 They're incompatible because there are virtual insns emitted too
194 (chain,cti-chain,before,after handlers). */
196 if (slice_insns
== 1)
198 /* No need to worry about extra slots required for virtual insns
199 and parallel exec support because MAX_CHAIN_LENGTH is
200 guaranteed to be big enough to execute at least 1 insn! */
205 /* Allow enough slop so that while compiling insns, if max_insns > 0
206 then there's guaranteed to be enough space to emit one real insn.
207 MAX_CHAIN_LENGTH is typically much longer than
208 the normal number of insns between cti's anyway. */
209 max_insns
-= (1 /* one for the trailing chain insn */
212 : (1 + MAX_PARALLEL_INSNS
) /* before+after */)
213 + (MAX_PARALLEL_INSNS
> 1
214 ? (MAX_PARALLEL_INSNS
* 2)
217 /* Account for before/after handlers. */
222 && slice_insns
< max_insns
)
223 max_insns
= slice_insns
;
228 /* SC,PC must be updated to point passed the last entry used.
229 SET_CTI_VPC must be called if pbb is terminated by a cti.
230 SET_INSN_COUNT must be called to record number of real insns in
231 pbb [could be computed by us of course, extra cpu but perhaps
232 negligible enough]. */
234 /* begin extract-pbb */
239 while (max_insns
> 0)
241 USI insn
= GETIMEMUSI (current_cpu
, pc
);
243 idesc
= extract (current_cpu
, pc
, insn
, &sc
->argbuf
, FAST_P
);
244 SEM_SKIP_COMPILE (current_cpu
, sc
, 1);
250 if (IDESC_CTI_P (idesc
))
252 SET_CTI_VPC (sc
- 1);
254 if (CGEN_ATTR_VALUE (NULL
, idesc
->attrs
, CGEN_INSN_DELAY_SLOT
))
256 USI insn
= GETIMEMUSI (current_cpu
, pc
);
257 idesc
= extract (current_cpu
, pc
, insn
, &sc
->argbuf
, FAST_P
);
269 SET_INSN_COUNT (icount
);
271 /* end extract-pbb */
273 /* The last one is a pseudo-insn to link to the next chain.
274 It is also used to record the insn count for this chain. */
278 /* Was pbb terminated by a cti? */
281 id
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_CTI_CHAIN
];
285 id
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_CHAIN
];
287 SEM_SET_CODE (&sc
->argbuf
, id
, FAST_P
);
288 sc
->argbuf
.idesc
= id
;
289 sc
->argbuf
.addr
= pc
;
290 sc
->argbuf
.fields
.chain
.insn_count
= _insn_count
;
291 sc
->argbuf
.fields
.chain
.next
= 0;
292 sc
->argbuf
.fields
.chain
.branch_target
= 0;
296 /* Update the pointer to the next free entry, may not have used as
297 many entries as was asked for. */
298 CPU_SCACHE_NEXT_FREE (current_cpu
) = sc
;
299 /* Record length of chain if profiling.
300 This includes virtual insns since they count against
303 PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu
, sc
- orig_sc
);
309 /* Chain to the next block from a non-cti terminated previous block. */
312 sh64_media_pbb_chain (SIM_CPU
*current_cpu
, SEM_ARG sem_arg
)
314 ARGBUF
*abuf
= SEM_ARGBUF (sem_arg
);
316 PBB_UPDATE_INSN_COUNT (current_cpu
, sem_arg
);
318 SET_H_PC (abuf
->addr
| 1);
320 /* If not running forever, exit back to main loop. */
321 if (CPU_MAX_SLICE_INSNS (current_cpu
) != 0
322 /* Also exit back to main loop if there's an event.
323 Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
324 at the "right" time, but then that was what was asked for.
325 There is no silver bullet for simulator engines.
326 ??? Clearly this needs a cleaner interface.
327 At present it's just so Ctrl-C works. */
328 || STATE_EVENTS (CPU_STATE (current_cpu
))->work_pending
)
329 CPU_RUNNING_P (current_cpu
) = 0;
331 /* If chained to next block, go straight to it. */
332 if (abuf
->fields
.chain
.next
)
333 return abuf
->fields
.chain
.next
;
334 /* See if next block has already been compiled. */
335 abuf
->fields
.chain
.next
= scache_lookup (current_cpu
, abuf
->addr
);
336 if (abuf
->fields
.chain
.next
)
337 return abuf
->fields
.chain
.next
;
338 /* Nope, so next insn is a virtual insn to invoke the compiler
340 return CPU_SCACHE_PBB_BEGIN (current_cpu
);
343 /* Chain to the next block from a cti terminated previous block.
344 BR_TYPE indicates whether the branch was taken and whether we can cache
345 the vpc of the branch target.
346 NEW_PC is the target's branch address, and is only valid if
347 BR_TYPE != SEM_BRANCH_UNTAKEN. */
350 sh64_media_pbb_cti_chain (SIM_CPU
*current_cpu
, SEM_ARG sem_arg
,
351 SEM_BRANCH_TYPE br_type
, PCADDR new_pc
)
355 PBB_UPDATE_INSN_COUNT (current_cpu
, sem_arg
);
357 /* If we have switched ISAs, exit back to main loop.
358 Set idesc to 0 to cause the engine to point to the right insn table. */
359 if ((new_pc
& 1) == 0)
361 /* Switch to SHcompact. */
362 CPU_IDESC_SEM_INIT_P (current_cpu
) = 0;
363 CPU_RUNNING_P (current_cpu
) = 0;
366 /* If not running forever, exit back to main loop. */
367 if (CPU_MAX_SLICE_INSNS (current_cpu
) != 0
368 /* Also exit back to main loop if there's an event.
369 Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
370 at the "right" time, but then that was what was asked for.
371 There is no silver bullet for simulator engines.
372 ??? Clearly this needs a cleaner interface.
373 At present it's just so Ctrl-C works. */
374 || STATE_EVENTS (CPU_STATE (current_cpu
))->work_pending
)
375 CPU_RUNNING_P (current_cpu
) = 0;
377 /* Restart compiler if we branched to an uncacheable address
379 if (br_type
== SEM_BRANCH_UNCACHEABLE
)
382 return CPU_SCACHE_PBB_BEGIN (current_cpu
);
385 /* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
387 if (br_type
== SEM_BRANCH_UNTAKEN
)
389 ARGBUF
*abuf
= SEM_ARGBUF (sem_arg
);
391 /* Set bit 0 to stay in SHmedia mode. */
392 SET_H_PC (new_pc
| 1);
393 new_vpc_ptr
= &abuf
->fields
.chain
.next
;
397 ARGBUF
*abuf
= SEM_ARGBUF (sem_arg
);
399 new_vpc_ptr
= &abuf
->fields
.chain
.branch_target
;
402 /* If chained to next block, go straight to it. */
405 /* See if next block has already been compiled. */
406 *new_vpc_ptr
= scache_lookup (current_cpu
, new_pc
);
409 /* Nope, so next insn is a virtual insn to invoke the compiler
411 return CPU_SCACHE_PBB_BEGIN (current_cpu
);
415 This is called before each insn. */
418 sh64_media_pbb_before (SIM_CPU
*current_cpu
, SCACHE
*sc
)
420 SEM_ARG sem_arg
= sc
;
421 const ARGBUF
*abuf
= SEM_ARGBUF (sem_arg
);
422 int first_p
= abuf
->fields
.before
.first_p
;
423 const ARGBUF
*cur_abuf
= SEM_ARGBUF (sc
+ 1);
424 const IDESC
*cur_idesc
= cur_abuf
->idesc
;
425 PCADDR pc
= cur_abuf
->addr
;
427 if (ARGBUF_PROFILE_P (cur_abuf
))
428 PROFILE_COUNT_INSN (current_cpu
, pc
, cur_idesc
->num
);
430 /* If this isn't the first insn, finish up the previous one. */
434 if (PROFILE_MODEL_P (current_cpu
))
436 const SEM_ARG prev_sem_arg
= sc
- 1;
437 const ARGBUF
*prev_abuf
= SEM_ARGBUF (prev_sem_arg
);
438 const IDESC
*prev_idesc
= prev_abuf
->idesc
;
441 /* ??? May want to measure all insns if doing insn tracing. */
442 if (ARGBUF_PROFILE_P (prev_abuf
))
444 cycles
= (*prev_idesc
->timing
->model_fn
) (current_cpu
, prev_sem_arg
);
445 sh64_media_model_insn_after (current_cpu
, 0 /*last_p*/, cycles
);
449 CGEN_TRACE_INSN_FINI (current_cpu
, cur_abuf
, 0 /*last_p*/);
452 /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
453 if (PROFILE_MODEL_P (current_cpu
)
454 && ARGBUF_PROFILE_P (cur_abuf
))
455 sh64_media_model_insn_before (current_cpu
, first_p
);
457 CGEN_TRACE_INSN_INIT (current_cpu
, cur_abuf
, first_p
);
458 CGEN_TRACE_INSN (current_cpu
, cur_idesc
->idata
, cur_abuf
, pc
);
462 This is called after a serial insn or at the end of a group of parallel
466 sh64_media_pbb_after (SIM_CPU
*current_cpu
, SCACHE
*sc
)
468 SEM_ARG sem_arg
= sc
;
469 const ARGBUF
*abuf
= SEM_ARGBUF (sem_arg
);
470 const SEM_ARG prev_sem_arg
= sc
- 1;
471 const ARGBUF
*prev_abuf
= SEM_ARGBUF (prev_sem_arg
);
473 /* ??? May want to measure all insns if doing insn tracing. */
474 if (PROFILE_MODEL_P (current_cpu
)
475 && ARGBUF_PROFILE_P (prev_abuf
))
477 const IDESC
*prev_idesc
= prev_abuf
->idesc
;
480 cycles
= (*prev_idesc
->timing
->model_fn
) (current_cpu
, prev_sem_arg
);
481 sh64_media_model_insn_after (current_cpu
, 1 /*last_p*/, cycles
);
483 CGEN_TRACE_INSN_FINI (current_cpu
, prev_abuf
, 1 /*last_p*/);
489 sh64_media_engine_run_full (SIM_CPU
*current_cpu
)
491 SIM_DESC current_state
= CPU_STATE (current_cpu
);
492 SCACHE
*scache
= CPU_SCACHE_CACHE (current_cpu
);
493 /* virtual program counter */
495 #if WITH_SEM_SWITCH_FULL
496 /* For communication between cti's and cti-chain. */
497 SEM_BRANCH_TYPE pbb_br_type
;
502 if (! CPU_IDESC_SEM_INIT_P (current_cpu
))
504 /* ??? 'twould be nice to move this up a level and only call it once.
505 On the other hand, in the "let's go fast" case the test is only done
506 once per pbb (since we only return to the main loop at the end of
507 a pbb). And in the "let's run until we're done" case we don't return
508 until the program exits. */
510 #if WITH_SEM_SWITCH_FULL
511 #if defined (__GNUC__)
512 /* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
513 #define DEFINE_LABELS
514 #include "sem-media-switch.c"
517 sh64_media_sem_init_idesc_table (current_cpu
);
520 /* Initialize the "begin (compile) a pbb" virtual insn. */
521 vpc
= CPU_SCACHE_PBB_BEGIN (current_cpu
);
522 SEM_SET_FULL_CODE (SEM_ARGBUF (vpc
),
523 & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_BEGIN
]);
524 vpc
->argbuf
.idesc
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_BEGIN
];
526 CPU_IDESC_SEM_INIT_P (current_cpu
) = 1;
529 CPU_RUNNING_P (current_cpu
) = 1;
530 /* ??? In the case where we're returning to the main loop after every
531 pbb we don't want to call pbb_begin each time (which hashes on the pc
532 and does a table lookup). A way to speed this up is to save vpc
534 vpc
= sh64_media_pbb_begin (current_cpu
, FAST_P
);
538 /* begin full-exec-pbb */
540 #if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
541 #define DEFINE_SWITCH
542 #define WITH_ISA_COMPACT
543 #include "sem-media-switch.c"
545 vpc
= execute (current_cpu
, vpc
, FAST_P
);
548 /* end full-exec-pbb */
550 while (CPU_RUNNING_P (current_cpu
));
559 sh64_media_engine_run_fast (SIM_CPU
*current_cpu
)
561 SIM_DESC current_state
= CPU_STATE (current_cpu
);
562 SCACHE
*scache
= CPU_SCACHE_CACHE (current_cpu
);
563 /* virtual program counter */
565 #if WITH_SEM_SWITCH_FAST
566 /* For communication between cti's and cti-chain. */
567 SEM_BRANCH_TYPE pbb_br_type
;
572 if (! CPU_IDESC_SEM_INIT_P (current_cpu
))
574 /* ??? 'twould be nice to move this up a level and only call it once.
575 On the other hand, in the "let's go fast" case the test is only done
576 once per pbb (since we only return to the main loop at the end of
577 a pbb). And in the "let's run until we're done" case we don't return
578 until the program exits. */
580 #if WITH_SEM_SWITCH_FAST
581 #if defined (__GNUC__)
582 /* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
583 #define DEFINE_LABELS
584 #include "sem-media-switch.c"
587 sh64_media_semf_init_idesc_table (current_cpu
);
590 /* Initialize the "begin (compile) a pbb" virtual insn. */
591 vpc
= CPU_SCACHE_PBB_BEGIN (current_cpu
);
592 SEM_SET_FAST_CODE (SEM_ARGBUF (vpc
),
593 & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_BEGIN
]);
594 vpc
->argbuf
.idesc
= & CPU_IDESC (current_cpu
) [SH64_MEDIA_INSN_X_BEGIN
];
596 CPU_IDESC_SEM_INIT_P (current_cpu
) = 1;
599 CPU_RUNNING_P (current_cpu
) = 1;
600 /* ??? In the case where we're returning to the main loop after every
601 pbb we don't want to call pbb_begin each time (which hashes on the pc
602 and does a table lookup). A way to speed this up is to save vpc
604 vpc
= sh64_media_pbb_begin (current_cpu
, FAST_P
);
608 /* begin fast-exec-pbb */
610 #if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
611 #define DEFINE_SWITCH
612 #define WITH_ISA_COMPACT
613 #include "sem-media-switch.c"
615 vpc
= execute (current_cpu
, vpc
, FAST_P
);
618 /* end fast-exec-pbb */
620 while (CPU_RUNNING_P (current_cpu
));