2 * Copyright (c) 2005-2007 Stephen Williams <steve@icarus.com>
4 * This source code is free software; you can redistribute it
5 * and/or modify it in source code form under the terms of the GNU
6 * General Public License as published by the Free Software
7 * Foundation; either version 2 of the License, or (at your option)
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
26 void vvp_delay_t::calculate_min_delay_()
29 if (fall_
< min_delay_
)
31 if (decay_
< min_delay_
)
35 vvp_delay_t::vvp_delay_t(vvp_time64_t rise
, vvp_time64_t fall
)
39 decay_
= fall
< rise
? fall
: rise
;
43 vvp_delay_t::vvp_delay_t(vvp_time64_t rise
, vvp_time64_t fall
, vvp_time64_t decay
)
49 calculate_min_delay_();
52 vvp_delay_t::~vvp_delay_t()
56 vvp_time64_t
vvp_delay_t::get_delay(vvp_bit4_t from
, vvp_bit4_t to
)
61 case BIT4_0
: return 0;
62 case BIT4_1
: return rise_
;
63 case BIT4_X
: return min_delay_
;
64 case BIT4_Z
: return decay_
;
69 case BIT4_0
: return fall_
;
70 case BIT4_1
: return 0;
71 case BIT4_X
: return min_delay_
;
72 case BIT4_Z
: return decay_
;
77 case BIT4_0
: return fall_
;
78 case BIT4_1
: return rise_
;
79 case BIT4_X
: return 0;
80 case BIT4_Z
: return decay_
;
85 case BIT4_0
: return fall_
;
86 case BIT4_1
: return rise_
;
87 case BIT4_X
: return min_delay_
;
88 case BIT4_Z
: return 0;
97 vvp_time64_t
vvp_delay_t::get_min_delay() const
102 void vvp_delay_t::set_rise(vvp_time64_t val
)
105 if (val
< min_delay_
)
108 calculate_min_delay_();
111 void vvp_delay_t::set_fall(vvp_time64_t val
)
114 if (val
< min_delay_
)
117 calculate_min_delay_();
120 void vvp_delay_t::set_decay(vvp_time64_t val
)
123 if (val
< min_delay_
)
126 calculate_min_delay_();
129 vvp_fun_delay::vvp_fun_delay(vvp_net_t
*n
, vvp_bit4_t init
, const vvp_delay_t
&d
)
130 : net_(n
), delay_(d
), cur_vec4_(1)
132 cur_vec4_
.set_bit(0, init
);
136 vvp_fun_delay::~vvp_fun_delay()
138 while (struct event_
*cur
= dequeue_())
142 void vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay
)
148 struct event_
*cur
= list_
->next
;
149 /* If this event is far enough from the event I'm about
150 to create, then that scheduled event is not a pulse
151 to be eliminated, so we're done. */
152 if (cur
->sim_time
+use_delay
<= use_delay
+schedule_simtime())
158 list_
->next
= cur
->next
;
164 * FIXME: this implementation currently only uses the maximum delay
165 * from all the bit changes in the vectors. If there are multiple
166 * changes with different delays, then the results would be
167 * wrong. What should happen is that if there are multiple changes,
168 * multiple vectors approaching the result should be scheduled.
170 void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port
, const vvp_vector4_t
&bit
)
172 if (port
.port() > 0) {
173 // Get the integer value of the bit vector, or 0 if
174 // there are X or Z bits.
175 unsigned long val
= 0;
176 vector4_to_value(bit
, val
);
178 switch (port
.port()) {
180 delay_
.set_rise(val
);
183 delay_
.set_fall(val
);
186 delay_
.set_decay(val
);
192 /* How many bits to compare? */
193 unsigned use_wid
= cur_vec4_
.size();
194 if (bit
.size() < use_wid
)
195 use_wid
= bit
.size();
197 /* Scan the vectors looking for delays. Select the maximim
198 delay encountered. */
199 vvp_time64_t use_delay
;
200 use_delay
= delay_
.get_delay(cur_vec4_
.value(0), bit
.value(0));
202 for (unsigned idx
= 1 ; idx
< use_wid
; idx
+= 1) {
204 tmp
= delay_
.get_delay(cur_vec4_
.value(idx
), bit
.value(idx
));
209 /* what *should* happen here is we check to see if there is a
210 transaction in the queue. This would be a pulse that needs to be
212 clean_pulse_events_(use_delay
);
214 vvp_time64_t use_simtime
= schedule_simtime() + use_delay
;
216 /* And propagate it. */
217 if (use_delay
== 0) {
219 vvp_send_vec4(net_
->out
, cur_vec4_
);
221 struct event_
*cur
= new struct event_(use_simtime
);
222 cur
->run_run_ptr
= &vvp_fun_delay::run_run_vec4_
;
225 schedule_generic(this, use_delay
, false);
229 void vvp_fun_delay::recv_vec8(vvp_net_ptr_t port
, vvp_vector8_t bit
)
231 assert(port
.port() == 0);
233 if (cur_vec8_
.eeq(bit
))
236 /* XXXX FIXME: For now, just use the minimum delay. */
237 vvp_time64_t use_delay
;
238 use_delay
= delay_
.get_min_delay();
240 vvp_time64_t use_simtime
= schedule_simtime() + use_delay
;
241 if (use_delay
== 0) {
243 vvp_send_vec8(net_
->out
, cur_vec8_
);
245 struct event_
*cur
= new struct event_(use_simtime
);
247 cur
->run_run_ptr
= &vvp_fun_delay::run_run_vec8_
;
249 schedule_generic(this, use_delay
, false);
253 void vvp_fun_delay::recv_real(vvp_net_ptr_t port
, double bit
)
255 if (port
.port() > 0) {
256 /* If the port is not 0, then this is a delay value that
257 should be rounded and converted to an integer delay. */
258 unsigned long long val
= 0;
260 val
= (unsigned long long) (bit
+0.5);
262 switch (port
.port()) {
264 delay_
.set_rise(val
);
267 delay_
.set_fall(val
);
270 delay_
.set_decay(val
);
276 if (cur_real_
== bit
)
279 vvp_time64_t use_delay
;
280 use_delay
= delay_
.get_min_delay();
282 vvp_time64_t use_simtime
= schedule_simtime() + use_delay
;
284 if (use_delay
== 0) {
286 vvp_send_real(net_
->out
, cur_real_
);
288 struct event_
*cur
= new struct event_(use_simtime
);
289 cur
->run_run_ptr
= &vvp_fun_delay::run_run_real_
;
293 schedule_generic(this, use_delay
, false);
297 void vvp_fun_delay::run_run()
299 vvp_time64_t sim_time
= schedule_simtime();
300 if (list_
== 0 || list_
->next
->sim_time
> sim_time
)
303 struct event_
*cur
= dequeue_();
307 (this->*(cur
->run_run_ptr
))(cur
);
311 void vvp_fun_delay::run_run_vec4_(struct event_
*cur
)
313 cur_vec4_
= cur
->ptr_vec4
;
314 vvp_send_vec4(net_
->out
, cur_vec4_
);
317 void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_
*cur
)
319 cur_vec8_
= cur
->ptr_vec8
;
320 vvp_send_vec8(net_
->out
, cur_vec8_
);
323 void vvp_fun_delay::run_run_real_(struct vvp_fun_delay::event_
*cur
)
325 cur_real_
= cur
->ptr_real
;
326 vvp_send_real(net_
->out
, cur_real_
);
329 vvp_fun_modpath::vvp_fun_modpath(vvp_net_t
*net
)
330 : net_(net
), src_list_(0)
334 vvp_fun_modpath::~vvp_fun_modpath()
336 // Delete the source probes.
338 vvp_fun_modpath_src
*tmp
= src_list_
;
339 src_list_
= tmp
->next_
;
344 void vvp_fun_modpath::add_modpath_src(vvp_fun_modpath_src
*that
)
346 assert(that
->next_
== 0);
347 that
->next_
= src_list_
;
351 static vvp_time64_t
delay_from_edge(vvp_bit4_t a
, vvp_bit4_t b
, vvp_time64_t array
[12])
353 typedef delay_edge_t bit4_table4
[4];
354 const static bit4_table4 edge_table
[4] = {
355 { DELAY_EDGE_01
, DELAY_EDGE_01
, DELAY_EDGE_0x
, DELAY_EDGE_0z
},
356 { DELAY_EDGE_10
, DELAY_EDGE_10
, DELAY_EDGE_1x
, DELAY_EDGE_1z
},
357 { DELAY_EDGE_x0
, DELAY_EDGE_x1
, DELAY_EDGE_x0
, DELAY_EDGE_xz
},
358 { DELAY_EDGE_z0
, DELAY_EDGE_z1
, DELAY_EDGE_zx
, DELAY_EDGE_z0
}
361 return array
[ edge_table
[a
][b
] ];
364 void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port
, const vvp_vector4_t
&bit
)
366 /* Only the first port is used. */
370 if (cur_vec4_
.eeq(bit
))
373 /* Select a time delay source that applies. */
374 vvp_fun_modpath_src
*src
= 0;
375 for (vvp_fun_modpath_src
*cur
= src_list_
; cur
; cur
=cur
->next_
) {
376 /* Skip paths that are disabled by conditions. */
377 if (cur
->condition_flag_
== false)
382 } else if (cur
->wake_time_
> src
->wake_time_
) {
385 continue; /* Skip this entry. */
391 vvp_time64_t out_at
[12];
392 vvp_time64_t now
= schedule_simtime();
393 for (unsigned idx
= 0 ; idx
< 12 ; idx
+= 1) {
394 out_at
[idx
] = src
->wake_time_
+ src
->delay_
[idx
];
395 if (out_at
[idx
] <= now
)
401 /* Given the scheduled output time, create an output event. */
402 vvp_time64_t use_delay
= delay_from_edge(cur_vec4_
.value(0),
406 /* FIXME: This bases the edge delay on only the least
407 bit. This is WRONG! I need to find all the possible delays,
408 and schedule an event for each partial change. Hard! */
409 for (unsigned idx
= 1 ; idx
< bit
.size() ; idx
+= 1) {
410 vvp_time64_t tmp
= delay_from_edge(cur_vec4_
.value(idx
),
413 assert(tmp
== use_delay
);
417 schedule_generic(this, use_delay
, false);
420 void vvp_fun_modpath::run_run()
422 vvp_send_vec4(net_
->out
, cur_vec4_
);
425 vvp_fun_modpath_src::vvp_fun_modpath_src(vvp_time64_t del
[12])
427 for (unsigned idx
= 0 ; idx
< 12 ; idx
+= 1)
428 delay_
[idx
] = del
[idx
];
432 condition_flag_
= true;
435 vvp_fun_modpath_src::~vvp_fun_modpath_src()
439 void vvp_fun_modpath_src::get_delay12(vvp_time64_t val
[12]) const
441 for (unsigned idx
= 0 ; idx
< 12 ; idx
+= 1)
442 val
[idx
] = delay_
[idx
];
445 void vvp_fun_modpath_src::put_delay12(const vvp_time64_t val
[12])
447 for (unsigned idx
= 0 ; idx
< 12 ; idx
+= 1)
448 delay_
[idx
] = val
[idx
];
451 void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port
, const vvp_vector4_t
&bit
)
453 if (port
.port() == 0) {
454 // The modpath input...
456 wake_time_
= schedule_simtime();
458 } else if (port
.port() == 1) {
459 // The modpath condition input...
460 if (bit
.value(0) == BIT4_1
)
461 condition_flag_
= true;
463 condition_flag_
= false;
467 bool vvp_fun_modpath_src::test_vec4(const vvp_vector4_t
&)
472 vvp_fun_modpath_edge::vvp_fun_modpath_edge(vvp_time64_t del
[12],
474 : vvp_fun_modpath_src(del
)
481 bool vvp_fun_modpath_edge::test_vec4(const vvp_vector4_t
&bit
)
483 vvp_bit4_t tmp
= old_value_
;
484 old_value_
= bit
.value(0);
486 int edge_flag
= edge(tmp
, old_value_
);
487 if (edge_flag
> 0) return posedge_
;
488 if (edge_flag
< 0) return negedge_
;
494 * All the below routines that begin with
495 * modpath_src_* belong the internal function
496 * of an vpiModPathIn object. This is used to
497 * make some specific delays path operations
500 static int modpath_src_get(int code
, vpiHandle ref
)
502 struct __vpiModPathSrc
*obj
= vpip_modpath_src_from_handle(ref
);
507 static void modpath_src_get_value(vpiHandle ref
, p_vpi_value vp
)
509 assert((ref
->vpi_type
->type_code
== vpiModPathIn
));
510 struct __vpiModPathSrc
* modpathsrc
= vpip_modpath_src_from_handle( ref
) ;
511 assert ( modpathsrc
) ;
515 static vpiHandle
modpath_src_put_value(vpiHandle ref
, s_vpi_value
*vp
)
517 assert((ref
->vpi_type
->type_code
== vpiModPathIn
));
518 struct __vpiModPathSrc
* modpathsrc
= vpip_modpath_src_from_handle( ref
) ;
519 assert ( modpathsrc
) ;
523 static vpiHandle
modpath_src_get_handle(int code
, vpiHandle ref
)
525 struct __vpiModPathSrc
*rfp
= vpip_modpath_src_from_handle(ref
);
531 return vpi_handle(rfp
->dest
->scope
);
534 { struct __vpiScope
*scope
= rfp
->dest
->scope
;
535 while (scope
&& scope
->base
.vpi_type
->type_code
!= vpiModule
)
536 scope
= scope
->scope
;
538 return vpi_handle(scope
);
542 return vpi_handle(&rfp
->path_term_in
);
545 return vpi_handle(&rfp
->dest
->path_term_out
);
550 static vpiHandle
modpath_src_index ( vpiHandle ref
, int code
)
552 assert( (ref
->vpi_type
->type_code
== vpiModPathIn
) );
557 static int modpath_src_free_object( vpiHandle ref
)
559 assert( (ref
->vpi_type
->type_code
== vpiModPathIn
) );
565 * This Routine will put specific demension of delay[] values
566 * into a vpiHandle. In this case, he will put an
567 * specific delays values in a vpiModPathIn object
570 static void modpath_src_put_delays ( vpiHandle ref
, p_vpi_delay delays
)
572 vvp_time64_t tmp
[12];
574 struct __vpiModPathSrc
* src
= vpip_modpath_src_from_handle( ref
) ;
577 vvp_fun_modpath_src
*fun
= dynamic_cast<vvp_fun_modpath_src
*>(src
->net
->fun
);
580 typedef unsigned char map_array_t
[12];
581 static const map_array_t map_2
= {0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0};
582 static const map_array_t map12
= {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
584 const map_array_t
*use_map
= 0;
585 switch (delays
->no_of_delays
) {
597 if (delays
->time_type
== vpiSimTime
) {
598 for (idx
= 0 ; idx
< 12 ; idx
+= 1) {
599 tmp
[idx
] = vpip_timestruct_to_time(delays
->da
+use_map
[0][idx
]);
602 for (idx
= 0 ; idx
< 12 ; idx
+= 1) {
603 tmp
[idx
] = vpip_scaled_real_to_time64(delays
->da
[use_map
[0][idx
]].real
,
608 /* Now clean up any to-from-x delays to me the min/max based on
609 the rules for selecting X delays. This only needs to happen
610 if the X delays are not already explicitly given. */
611 if (delays
->no_of_delays
<= 6) {
612 vvp_time64_t t_max
= tmp
[0];
613 vvp_time64_t t_min
= tmp
[1];
614 for (idx
= 1 ; idx
< delays
->no_of_delays
; idx
+= 1) {
615 if (tmp
[idx
] > t_max
) t_max
= tmp
[idx
];
616 if (tmp
[idx
] < t_min
) t_min
= tmp
[idx
];
618 tmp
[DELAY_EDGE_0x
] = t_min
;
619 tmp
[DELAY_EDGE_x1
] = t_max
;
620 tmp
[DELAY_EDGE_1x
] = t_min
;
621 tmp
[DELAY_EDGE_x0
] = t_max
;
622 tmp
[DELAY_EDGE_xz
] = t_max
;
623 tmp
[DELAY_EDGE_zx
] = t_min
;
626 fun
->put_delay12(tmp
);
630 * This Routine will retrive the delay[12] values
631 * of an vpiHandle. In this case, he will get an
632 * specific delays values from an vpiModPathIn
637 static void modpath_src_get_delays ( vpiHandle ref
, p_vpi_delay delays
)
639 struct __vpiModPathSrc
*src
= vpip_modpath_src_from_handle( ref
) ;
642 vvp_fun_modpath_src
*fun
= dynamic_cast<vvp_fun_modpath_src
*>(src
->net
->fun
);
646 vvp_time64_t tmp
[12];
647 fun
->get_delay12(tmp
);
649 switch (delays
->no_of_delays
) {
651 if (delays
->time_type
== vpiSimTime
) {
652 for (idx
= 0; idx
< 12; idx
+= 1) {
653 vpip_time_to_timestruct(delays
->da
+idx
, tmp
[idx
]);
656 int units
= src
->dest
->scope
->time_units
;
657 for (idx
= 0; idx
< 12; idx
+= 1) {
658 delays
->da
[idx
].real
= vpip_time_to_scaled_real(tmp
[idx
], src
->dest
->scope
);
670 static vpiHandle
pathterm_get_handle(int code
, vpiHandle ref
)
672 struct __vpiModPathTerm
*obj
= vpip_modpath_term_from_handle(ref
);
684 * The __vpiModPathSrc class is what the VPI client sees as a
685 * vpiModPath object. The __vpiModPath structure contains items that
686 * are common to a bunch of modpaths, including the destination term.
688 static const struct __vpirt vpip_modpath_src_rt
= {
692 modpath_src_get_value
,
693 modpath_src_put_value
,
694 modpath_src_get_handle
,
695 0, /* modpath_src_iterate,*/
697 modpath_src_free_object
,
698 modpath_src_get_delays
,
699 modpath_src_put_delays
702 static const struct __vpirt vpip_modpath_term_rt
= {
711 0, // vpi_free_object,
712 0, // vpi_get_delays,
716 static void initialize_path_term(struct __vpiModPathTerm
&obj
)
718 obj
.base
.vpi_type
= &vpip_modpath_term_rt
;
723 * This function will Construct a vpiModPath Object.
724 * give a respective "net", and will point to his
728 struct __vpiModPath
* vpip_make_modpath(vvp_net_t
*net
)
730 struct __vpiModPath
*obj
= (struct __vpiModPath
*)calloc(1, sizeof ( struct __vpiModPath
) );
731 obj
->scope
= vpip_peek_current_scope ( );
733 initialize_path_term(obj
->path_term_out
);
734 obj
->input_net
= net
;
741 * This function will Constructs a vpiModPathIn
742 * ( struct __vpiModPathSrc ) Object. will give
743 * a delays[12] values, and point to the specified functor
747 struct __vpiModPathSrc
* vpip_make_modpath_src (struct __vpiModPath
*path
, vvp_time64_t use_delay
[12] , vvp_net_t
*net
)
749 struct __vpiModPathSrc
*obj
= (struct __vpiModPathSrc
*) calloc (1, sizeof ( struct __vpiModPathSrc
) ) ;
751 obj
->base
.vpi_type
= &vpip_modpath_src_rt
;
754 initialize_path_term(obj
->path_term_in
);
761 this Routine will safetly convert a modpath vpiHandle
762 to a struct __vpiModPath { }
765 struct __vpiModPathTerm
* vpip_modpath_term_from_handle(vpiHandle ref
)
767 if (ref
->vpi_type
->type_code
!= vpiPathTerm
)
770 return (struct __vpiModPathTerm
*) ref
;
774 this Routine will safetly convert a modpathsrc vpiHandle
775 to a struct __vpiModPathSrc { }, This is equivalent ao
779 struct __vpiModPathSrc
* vpip_modpath_src_from_handle(vpiHandle ref
)
781 if (ref
->vpi_type
->type_code
!= vpiModPath
)
784 return (struct __vpiModPathSrc
*) ref
;
790 void vpip_add_mopdath_edge ( vpiHandle vpiobj
, char *label
,
791 vvp_time64_t use_delay
[12] ,
792 bool posedge
, bool negedge
)
794 // printf(" In the vpip_add_mopdath_edge( ) \n") ;