Update copyright year in gdbarch.sh doc/gdb.texinfo and doc/refcard.tex
[binutils-gdb.git] / sim / sh64 / mloop-compact.c
blob16d6e6308ac25dbad2c4f4d59c9f4ad018112358
1 /* This file is generated by the genmloop script. DO NOT EDIT! */
3 /* Enable switch() support in cgen headers. */
4 #define SEM_IN_SWITCH
6 #define WANT_CPU sh64
7 #define WANT_CPU_SH64
9 #include "sim-main.h"
10 #include "bfd.h"
11 #include "cgen-mem.h"
12 #include "cgen-ops.h"
13 #include "sim-assert.h"
15 /* Fill in the administrative ARGBUF fields required by all insns,
16 virtual and real. */
18 static INLINE void
19 sh64_compact_fill_argbuf (const SIM_CPU *cpu, ARGBUF *abuf, const IDESC *idesc,
20 PCADDR pc, int fast_p)
22 #if WITH_SCACHE
23 SEM_SET_CODE (abuf, idesc, fast_p);
24 ARGBUF_ADDR (abuf) = pc;
25 #endif
26 ARGBUF_IDESC (abuf) = idesc;
29 /* Fill in tracing/profiling fields of an ARGBUF. */
31 static INLINE void
32 sh64_compact_fill_argbuf_tp (const SIM_CPU *cpu, ARGBUF *abuf,
33 int trace_p, int profile_p)
35 ARGBUF_TRACE_P (abuf) = trace_p;
36 ARGBUF_PROFILE_P (abuf) = profile_p;
39 #if WITH_SCACHE_PBB
41 /* Emit the "x-before" handler.
42 x-before is emitted before each insn (serial or parallel).
43 This is as opposed to x-after which is only emitted at the end of a group
44 of parallel insns. */
46 static INLINE void
47 sh64_compact_emit_before (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc, int first_p)
49 ARGBUF *abuf = &sc[0].argbuf;
50 const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEFORE];
52 abuf->fields.before.first_p = first_p;
53 sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
54 /* no need to set trace_p,profile_p */
57 /* Emit the "x-after" handler.
58 x-after is emitted after a serial insn or at the end of a group of
59 parallel insns. */
61 static INLINE void
62 sh64_compact_emit_after (SIM_CPU *current_cpu, SCACHE *sc, PCADDR pc)
64 ARGBUF *abuf = &sc[0].argbuf;
65 const IDESC *id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_AFTER];
67 sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, 0);
68 /* no need to set trace_p,profile_p */
71 #endif /* WITH_SCACHE_PBB */
74 static INLINE const IDESC *
75 extract (SIM_CPU *current_cpu, PCADDR pc, CGEN_INSN_INT insn, ARGBUF *abuf,
76 int fast_p)
78 const IDESC *id = sh64_compact_decode (current_cpu, pc, insn, insn, abuf);
80 sh64_compact_fill_argbuf (current_cpu, abuf, id, pc, fast_p);
81 if (! fast_p)
83 int trace_p = PC_IN_TRACE_RANGE_P (current_cpu, pc);
84 int profile_p = PC_IN_PROFILE_RANGE_P (current_cpu, pc);
85 sh64_compact_fill_argbuf_tp (current_cpu, abuf, trace_p, profile_p);
87 return id;
90 static INLINE SEM_PC
91 execute (SIM_CPU *current_cpu, SCACHE *sc, int fast_p)
93 SEM_PC vpc;
95 if (fast_p)
97 #if ! WITH_SEM_SWITCH_FAST
98 #if WITH_SCACHE
99 vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, sc);
100 #else
101 vpc = (*sc->argbuf.semantic.sem_fast) (current_cpu, &sc->argbuf);
102 #endif
103 #else
104 abort ();
105 #endif /* WITH_SEM_SWITCH_FAST */
107 else
109 #if ! WITH_SEM_SWITCH_FULL
110 ARGBUF *abuf = &sc->argbuf;
111 const IDESC *idesc = abuf->idesc;
112 #if WITH_SCACHE_PBB
113 int virtual_p = CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_VIRTUAL);
114 #else
115 int virtual_p = 0;
116 #endif
118 if (! virtual_p)
120 /* FIXME: call x-before */
121 if (ARGBUF_PROFILE_P (abuf))
122 PROFILE_COUNT_INSN (current_cpu, abuf->addr, idesc->num);
123 /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
124 if (PROFILE_MODEL_P (current_cpu)
125 && ARGBUF_PROFILE_P (abuf))
126 sh64_compact_model_insn_before (current_cpu, 1 /*first_p*/);
127 CGEN_TRACE_INSN_INIT (current_cpu, abuf, 1);
128 CGEN_TRACE_INSN (current_cpu, idesc->idata,
129 (const struct argbuf *) abuf, abuf->addr);
131 #if WITH_SCACHE
132 vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, sc);
133 #else
134 vpc = (*sc->argbuf.semantic.sem_full) (current_cpu, abuf);
135 #endif
136 if (! virtual_p)
138 /* FIXME: call x-after */
139 if (PROFILE_MODEL_P (current_cpu)
140 && ARGBUF_PROFILE_P (abuf))
142 int cycles;
144 cycles = (*idesc->timing->model_fn) (current_cpu, sc);
145 sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
147 CGEN_TRACE_INSN_FINI (current_cpu, abuf, 1);
149 #else
150 abort ();
151 #endif /* WITH_SEM_SWITCH_FULL */
154 return vpc;
158 /* Record address of cti terminating a pbb. */
159 #define SET_CTI_VPC(sc) do { _cti_sc = (sc); } while (0)
160 /* Record number of [real] insns in pbb. */
161 #define SET_INSN_COUNT(n) do { _insn_count = (n); } while (0)
163 /* Fetch and extract a pseudo-basic-block.
164 FAST_P is non-zero if no tracing/profiling/etc. is wanted. */
166 INLINE SEM_PC
167 sh64_compact_pbb_begin (SIM_CPU *current_cpu, int FAST_P)
169 SEM_PC new_vpc;
170 PCADDR pc;
171 SCACHE *sc;
172 int max_insns = CPU_SCACHE_MAX_CHAIN_LENGTH (current_cpu);
174 pc = GET_H_PC ();
176 new_vpc = scache_lookup_or_alloc (current_cpu, pc, max_insns, &sc);
177 if (! new_vpc)
179 /* Leading '_' to avoid collision with mainloop.in. */
180 int _insn_count = 0;
181 SCACHE *orig_sc = sc;
182 SCACHE *_cti_sc = NULL;
183 int slice_insns = CPU_MAX_SLICE_INSNS (current_cpu);
185 /* First figure out how many instructions to compile.
186 MAX_INSNS is the size of the allocated buffer, which includes space
187 for before/after handlers if they're being used.
188 SLICE_INSNS is the maxinum number of real insns that can be
189 executed. Zero means "as many as we want". */
190 /* ??? max_insns is serving two incompatible roles.
191 1) Number of slots available in scache buffer.
192 2) Number of real insns to execute.
193 They're incompatible because there are virtual insns emitted too
194 (chain,cti-chain,before,after handlers). */
196 if (slice_insns == 1)
198 /* No need to worry about extra slots required for virtual insns
199 and parallel exec support because MAX_CHAIN_LENGTH is
200 guaranteed to be big enough to execute at least 1 insn! */
201 max_insns = 1;
203 else
205 /* Allow enough slop so that while compiling insns, if max_insns > 0
206 then there's guaranteed to be enough space to emit one real insn.
207 MAX_CHAIN_LENGTH is typically much longer than
208 the normal number of insns between cti's anyway. */
209 max_insns -= (1 /* one for the trailing chain insn */
210 + (FAST_P
212 : (1 + MAX_PARALLEL_INSNS) /* before+after */)
213 + (MAX_PARALLEL_INSNS > 1
214 ? (MAX_PARALLEL_INSNS * 2)
215 : 0));
217 /* Account for before/after handlers. */
218 if (! FAST_P)
219 slice_insns *= 3;
221 if (slice_insns > 0
222 && slice_insns < max_insns)
223 max_insns = slice_insns;
226 new_vpc = sc;
228 /* SC,PC must be updated to point passed the last entry used.
229 SET_CTI_VPC must be called if pbb is terminated by a cti.
230 SET_INSN_COUNT must be called to record number of real insns in
231 pbb [could be computed by us of course, extra cpu but perhaps
232 negligible enough]. */
234 /* begin extract-pbb */
236 const IDESC *idesc;
237 int icount = 0;
239 while (max_insns > 0)
241 UHI insn = GETIMEMUHI (current_cpu, pc);
243 idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
244 SEM_SKIP_COMPILE (current_cpu, sc, 1);
245 ++sc;
246 --max_insns;
247 ++icount;
248 pc += idesc->length;
250 if (IDESC_CTI_P (idesc))
252 SET_CTI_VPC (sc - 1);
254 if (CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_DELAY_SLOT))
256 USI insn = GETIMEMUHI (current_cpu, pc);
257 idesc = extract (current_cpu, pc, insn, &sc->argbuf, FAST_P);
259 if (IDESC_CTI_P (idesc) ||
260 CGEN_ATTR_VALUE (NULL, idesc->attrs, CGEN_INSN_ILLSLOT))
262 SIM_DESC sd = CPU_STATE (current_cpu);
263 sim_io_eprintf (CPU_STATE (current_cpu),
264 "malformed program, `%s' insn in delay slot\n",
265 CGEN_INSN_NAME (idesc->idata));
266 sim_engine_halt (sd, current_cpu, NULL, pc,
267 sim_stopped, SIM_SIGILL);
269 else
271 ++sc;
272 --max_insns;
273 ++icount;
274 pc += idesc->length;
277 break;
281 Finish:
282 SET_INSN_COUNT (icount);
284 /* end extract-pbb */
286 /* The last one is a pseudo-insn to link to the next chain.
287 It is also used to record the insn count for this chain. */
289 const IDESC *id;
291 /* Was pbb terminated by a cti? */
292 if (_cti_sc)
294 id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CTI_CHAIN];
296 else
298 id = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_CHAIN];
300 SEM_SET_CODE (&sc->argbuf, id, FAST_P);
301 sc->argbuf.idesc = id;
302 sc->argbuf.addr = pc;
303 sc->argbuf.fields.chain.insn_count = _insn_count;
304 sc->argbuf.fields.chain.next = 0;
305 sc->argbuf.fields.chain.branch_target = 0;
306 ++sc;
309 /* Update the pointer to the next free entry, may not have used as
310 many entries as was asked for. */
311 CPU_SCACHE_NEXT_FREE (current_cpu) = sc;
312 /* Record length of chain if profiling.
313 This includes virtual insns since they count against
314 max_insns too. */
315 if (! FAST_P)
316 PROFILE_COUNT_SCACHE_CHAIN_LENGTH (current_cpu, sc - orig_sc);
319 return new_vpc;
322 /* Chain to the next block from a non-cti terminated previous block. */
324 INLINE SEM_PC
325 sh64_compact_pbb_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg)
327 ARGBUF *abuf = SEM_ARGBUF (sem_arg);
329 PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
331 SET_H_PC (abuf->addr);
334 /* If not running forever, exit back to main loop. */
335 if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
336 /* Also exit back to main loop if there's an event.
337 Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
338 at the "right" time, but then that was what was asked for.
339 There is no silver bullet for simulator engines.
340 ??? Clearly this needs a cleaner interface.
341 At present it's just so Ctrl-C works. */
342 || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
343 CPU_RUNNING_P (current_cpu) = 0;
345 /* If chained to next block, go straight to it. */
346 if (abuf->fields.chain.next)
347 return abuf->fields.chain.next;
348 /* See if next block has already been compiled. */
349 abuf->fields.chain.next = scache_lookup (current_cpu, abuf->addr);
350 if (abuf->fields.chain.next)
351 return abuf->fields.chain.next;
352 /* Nope, so next insn is a virtual insn to invoke the compiler
353 (begin a pbb). */
354 return CPU_SCACHE_PBB_BEGIN (current_cpu);
357 /* Chain to the next block from a cti terminated previous block.
358 BR_TYPE indicates whether the branch was taken and whether we can cache
359 the vpc of the branch target.
360 NEW_PC is the target's branch address, and is only valid if
361 BR_TYPE != SEM_BRANCH_UNTAKEN. */
363 INLINE SEM_PC
364 sh64_compact_pbb_cti_chain (SIM_CPU *current_cpu, SEM_ARG sem_arg,
365 SEM_BRANCH_TYPE br_type, PCADDR new_pc)
367 SEM_PC *new_vpc_ptr;
369 PBB_UPDATE_INSN_COUNT (current_cpu, sem_arg);
371 /* If we have switched ISAs, exit back to main loop.
372 Set idesc to 0 to cause the engine to point to the right insn table. */
373 if (new_pc & 1)
375 /* Switch to SHmedia. */
376 CPU_IDESC_SEM_INIT_P (current_cpu) = 0;
377 CPU_RUNNING_P (current_cpu) = 0;
380 /* If not running forever, exit back to main loop. */
381 if (CPU_MAX_SLICE_INSNS (current_cpu) != 0
382 /* Also exit back to main loop if there's an event.
383 Note that if CPU_MAX_SLICE_INSNS != 1, events won't get processed
384 at the "right" time, but then that was what was asked for.
385 There is no silver bullet for simulator engines.
386 ??? Clearly this needs a cleaner interface.
387 At present it's just so Ctrl-C works. */
388 || STATE_EVENTS (CPU_STATE (current_cpu))->work_pending)
389 CPU_RUNNING_P (current_cpu) = 0;
391 /* Restart compiler if we branched to an uncacheable address
392 (e.g. "j reg"). */
393 if (br_type == SEM_BRANCH_UNCACHEABLE)
395 SET_H_PC (new_pc);
396 return CPU_SCACHE_PBB_BEGIN (current_cpu);
399 /* If branch wasn't taken, update the pc and set BR_ADDR_PTR to our
400 next chain ptr. */
401 if (br_type == SEM_BRANCH_UNTAKEN)
403 ARGBUF *abuf = SEM_ARGBUF (sem_arg);
404 new_pc = abuf->addr;
405 SET_H_PC (new_pc);
406 new_vpc_ptr = &abuf->fields.chain.next;
408 else
410 ARGBUF *abuf = SEM_ARGBUF (sem_arg);
411 SET_H_PC (new_pc);
412 new_vpc_ptr = &abuf->fields.chain.branch_target;
415 /* If chained to next block, go straight to it. */
416 if (*new_vpc_ptr)
417 return *new_vpc_ptr;
418 /* See if next block has already been compiled. */
419 *new_vpc_ptr = scache_lookup (current_cpu, new_pc);
420 if (*new_vpc_ptr)
421 return *new_vpc_ptr;
422 /* Nope, so next insn is a virtual insn to invoke the compiler
423 (begin a pbb). */
424 return CPU_SCACHE_PBB_BEGIN (current_cpu);
427 /* x-before handler.
428 This is called before each insn. */
430 void
431 sh64_compact_pbb_before (SIM_CPU *current_cpu, SCACHE *sc)
433 SEM_ARG sem_arg = sc;
434 const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
435 int first_p = abuf->fields.before.first_p;
436 const ARGBUF *cur_abuf = SEM_ARGBUF (sc + 1);
437 const IDESC *cur_idesc = cur_abuf->idesc;
438 PCADDR pc = cur_abuf->addr;
440 if (ARGBUF_PROFILE_P (cur_abuf))
441 PROFILE_COUNT_INSN (current_cpu, pc, cur_idesc->num);
443 /* If this isn't the first insn, finish up the previous one. */
445 if (! first_p)
447 if (PROFILE_MODEL_P (current_cpu))
449 const SEM_ARG prev_sem_arg = sc - 1;
450 const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
451 const IDESC *prev_idesc = prev_abuf->idesc;
452 int cycles;
454 /* ??? May want to measure all insns if doing insn tracing. */
455 if (ARGBUF_PROFILE_P (prev_abuf))
457 cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
458 sh64_compact_model_insn_after (current_cpu, 0 /*last_p*/, cycles);
462 CGEN_TRACE_INSN_FINI (current_cpu, cur_abuf, 0 /*last_p*/);
465 /* FIXME: Later make cover macros: PROFILE_INSN_{INIT,FINI}. */
466 if (PROFILE_MODEL_P (current_cpu)
467 && ARGBUF_PROFILE_P (cur_abuf))
468 sh64_compact_model_insn_before (current_cpu, first_p);
470 CGEN_TRACE_INSN_INIT (current_cpu, cur_abuf, first_p);
471 CGEN_TRACE_INSN (current_cpu, cur_idesc->idata, cur_abuf, pc);
474 /* x-after handler.
475 This is called after a serial insn or at the end of a group of parallel
476 insns. */
478 void
479 sh64_compact_pbb_after (SIM_CPU *current_cpu, SCACHE *sc)
481 SEM_ARG sem_arg = sc;
482 const ARGBUF *abuf = SEM_ARGBUF (sem_arg);
483 const SEM_ARG prev_sem_arg = sc - 1;
484 const ARGBUF *prev_abuf = SEM_ARGBUF (prev_sem_arg);
486 /* ??? May want to measure all insns if doing insn tracing. */
487 if (PROFILE_MODEL_P (current_cpu)
488 && ARGBUF_PROFILE_P (prev_abuf))
490 const IDESC *prev_idesc = prev_abuf->idesc;
491 int cycles;
493 cycles = (*prev_idesc->timing->model_fn) (current_cpu, prev_sem_arg);
494 sh64_compact_model_insn_after (current_cpu, 1 /*last_p*/, cycles);
496 CGEN_TRACE_INSN_FINI (current_cpu, prev_abuf, 1 /*last_p*/);
499 #define FAST_P 0
501 void
502 sh64_compact_engine_run_full (SIM_CPU *current_cpu)
504 SIM_DESC current_state = CPU_STATE (current_cpu);
505 SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
506 /* virtual program counter */
507 SEM_PC vpc;
508 #if WITH_SEM_SWITCH_FULL
509 /* For communication between cti's and cti-chain. */
510 SEM_BRANCH_TYPE pbb_br_type;
511 PCADDR pbb_br_npc;
512 #endif
515 if (! CPU_IDESC_SEM_INIT_P (current_cpu))
517 /* ??? 'twould be nice to move this up a level and only call it once.
518 On the other hand, in the "let's go fast" case the test is only done
519 once per pbb (since we only return to the main loop at the end of
520 a pbb). And in the "let's run until we're done" case we don't return
521 until the program exits. */
523 #if WITH_SEM_SWITCH_FULL
524 #if defined (__GNUC__)
525 /* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
526 #define DEFINE_LABELS
527 #include "sem-compact-switch.c"
528 #endif
529 #else
530 sh64_compact_sem_init_idesc_table (current_cpu);
531 #endif
533 /* Initialize the "begin (compile) a pbb" virtual insn. */
534 vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
535 SEM_SET_FULL_CODE (SEM_ARGBUF (vpc),
536 & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
537 vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
539 CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
542 CPU_RUNNING_P (current_cpu) = 1;
543 /* ??? In the case where we're returning to the main loop after every
544 pbb we don't want to call pbb_begin each time (which hashes on the pc
545 and does a table lookup). A way to speed this up is to save vpc
546 between calls. */
547 vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
551 /* begin full-exec-pbb */
553 #if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
554 #define DEFINE_SWITCH
555 #include "sem-compact-switch.c"
556 #else
557 vpc = execute (current_cpu, vpc, FAST_P);
558 #endif
560 /* end full-exec-pbb */
562 while (CPU_RUNNING_P (current_cpu));
565 #undef FAST_P
568 #define FAST_P 1
570 void
571 sh64_compact_engine_run_fast (SIM_CPU *current_cpu)
573 SIM_DESC current_state = CPU_STATE (current_cpu);
574 SCACHE *scache = CPU_SCACHE_CACHE (current_cpu);
575 /* virtual program counter */
576 SEM_PC vpc;
577 #if WITH_SEM_SWITCH_FAST
578 /* For communication between cti's and cti-chain. */
579 SEM_BRANCH_TYPE pbb_br_type;
580 PCADDR pbb_br_npc;
581 #endif
584 if (! CPU_IDESC_SEM_INIT_P (current_cpu))
586 /* ??? 'twould be nice to move this up a level and only call it once.
587 On the other hand, in the "let's go fast" case the test is only done
588 once per pbb (since we only return to the main loop at the end of
589 a pbb). And in the "let's run until we're done" case we don't return
590 until the program exits. */
592 #if WITH_SEM_SWITCH_FAST
593 #if defined (__GNUC__)
594 /* ??? Later maybe paste sem-switch.c in when building mainloop.c. */
595 #define DEFINE_LABELS
596 #include "sem-compact-switch.c"
597 #endif
598 #else
599 sh64_compact_semf_init_idesc_table (current_cpu);
600 #endif
602 /* Initialize the "begin (compile) a pbb" virtual insn. */
603 vpc = CPU_SCACHE_PBB_BEGIN (current_cpu);
604 SEM_SET_FAST_CODE (SEM_ARGBUF (vpc),
605 & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN]);
606 vpc->argbuf.idesc = & CPU_IDESC (current_cpu) [SH64_COMPACT_INSN_X_BEGIN];
608 CPU_IDESC_SEM_INIT_P (current_cpu) = 1;
611 CPU_RUNNING_P (current_cpu) = 1;
612 /* ??? In the case where we're returning to the main loop after every
613 pbb we don't want to call pbb_begin each time (which hashes on the pc
614 and does a table lookup). A way to speed this up is to save vpc
615 between calls. */
616 vpc = sh64_compact_pbb_begin (current_cpu, FAST_P);
620 /* begin fast-exec-pbb */
622 #if (! FAST_P && WITH_SEM_SWITCH_FULL) || (FAST_P && WITH_SEM_SWITCH_FAST)
623 #define DEFINE_SWITCH
624 #include "sem-compact-switch.c"
625 #else
626 vpc = execute (current_cpu, vpc, FAST_P);
627 #endif
629 /* end fast-exec-pbb */
631 while (CPU_RUNNING_P (current_cpu));
634 #undef FAST_P