Fix for assertion error when expanding macro.
[iverilog.git] / vvp / delay.cc
blob4497f89016d96ef9f1b40638e5aa05b94a518320
1 /*
2 * Copyright (c) 2005-2007 Stephen Williams <steve@icarus.com>
4 * This source code is free software; you can redistribute it
5 * and/or modify it in source code form under the terms of the GNU
6 * General Public License as published by the Free Software
7 * Foundation; either version 2 of the License, or (at your option)
8 * any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
20 #include "delay.h"
21 #include "schedule.h"
22 #include "vpi_priv.h"
23 #include <iostream>
24 #include <assert.h>
26 void vvp_delay_t::calculate_min_delay_()
28 min_delay_ = rise_;
29 if (fall_ < min_delay_)
30 min_delay_ = fall_;
31 if (decay_ < min_delay_)
32 min_delay_ = decay_;
35 vvp_delay_t::vvp_delay_t(vvp_time64_t rise, vvp_time64_t fall)
37 rise_ = rise;
38 fall_ = fall;
39 decay_= fall < rise? fall : rise;
40 min_delay_ = decay_;
43 vvp_delay_t::vvp_delay_t(vvp_time64_t rise, vvp_time64_t fall, vvp_time64_t decay)
45 rise_ = rise;
46 fall_ = fall;
47 decay_= decay;
49 calculate_min_delay_();
52 vvp_delay_t::~vvp_delay_t()
56 vvp_time64_t vvp_delay_t::get_delay(vvp_bit4_t from, vvp_bit4_t to)
58 switch (from) {
59 case BIT4_0:
60 switch (to) {
61 case BIT4_0: return 0;
62 case BIT4_1: return rise_;
63 case BIT4_X: return min_delay_;
64 case BIT4_Z: return decay_;
66 break;
67 case BIT4_1:
68 switch (to) {
69 case BIT4_0: return fall_;
70 case BIT4_1: return 0;
71 case BIT4_X: return min_delay_;
72 case BIT4_Z: return decay_;
74 break;
75 case BIT4_X:
76 switch (to) {
77 case BIT4_0: return fall_;
78 case BIT4_1: return rise_;
79 case BIT4_X: return 0;
80 case BIT4_Z: return decay_;
82 break;
83 case BIT4_Z:
84 switch (to) {
85 case BIT4_0: return fall_;
86 case BIT4_1: return rise_;
87 case BIT4_X: return min_delay_;
88 case BIT4_Z: return 0;
90 break;
93 assert(0);
94 return 0;
97 vvp_time64_t vvp_delay_t::get_min_delay() const
99 return min_delay_;
102 void vvp_delay_t::set_rise(vvp_time64_t val)
104 rise_ = val;
105 if (val < min_delay_)
106 min_delay_ = val;
107 else
108 calculate_min_delay_();
111 void vvp_delay_t::set_fall(vvp_time64_t val)
113 fall_ = val;
114 if (val < min_delay_)
115 min_delay_ = val;
116 else
117 calculate_min_delay_();
120 void vvp_delay_t::set_decay(vvp_time64_t val)
122 decay_ = val;
123 if (val < min_delay_)
124 min_delay_ = val;
125 else
126 calculate_min_delay_();
129 vvp_fun_delay::vvp_fun_delay(vvp_net_t*n, vvp_bit4_t init, const vvp_delay_t&d)
130 : net_(n), delay_(d), cur_vec4_(1)
132 cur_vec4_.set_bit(0, init);
133 list_ = 0;
136 vvp_fun_delay::~vvp_fun_delay()
138 while (struct event_*cur = dequeue_())
139 delete cur;
142 void vvp_fun_delay::clean_pulse_events_(vvp_time64_t use_delay)
144 if (list_ == 0)
145 return;
147 do {
148 struct event_*cur = list_->next;
149 /* If this event is far enough from the event I'm about
150 to create, then that scheduled event is not a pulse
151 to be eliminated, so we're done. */
152 if (cur->sim_time+use_delay <= use_delay+schedule_simtime())
153 break;
155 if (list_ == cur)
156 list_ = 0;
157 else
158 list_->next = cur->next;
159 delete cur;
160 } while (list_);
164 * FIXME: this implementation currently only uses the maximum delay
165 * from all the bit changes in the vectors. If there are multiple
166 * changes with different delays, then the results would be
167 * wrong. What should happen is that if there are multiple changes,
168 * multiple vectors approaching the result should be scheduled.
170 void vvp_fun_delay::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
172 if (port.port() > 0) {
173 // Get the integer value of the bit vector, or 0 if
174 // there are X or Z bits.
175 unsigned long val = 0;
176 vector4_to_value(bit, val);
178 switch (port.port()) {
179 case 1:
180 delay_.set_rise(val);
181 return;
182 case 2:
183 delay_.set_fall(val);
184 return;
185 case 3:
186 delay_.set_decay(val);
187 return;
189 return;
192 /* How many bits to compare? */
193 unsigned use_wid = cur_vec4_.size();
194 if (bit.size() < use_wid)
195 use_wid = bit.size();
197 /* Scan the vectors looking for delays. Select the maximim
198 delay encountered. */
199 vvp_time64_t use_delay;
200 use_delay = delay_.get_delay(cur_vec4_.value(0), bit.value(0));
202 for (unsigned idx = 1 ; idx < use_wid ; idx += 1) {
203 vvp_time64_t tmp;
204 tmp = delay_.get_delay(cur_vec4_.value(idx), bit.value(idx));
205 if (tmp > use_delay)
206 use_delay = tmp;
209 /* what *should* happen here is we check to see if there is a
210 transaction in the queue. This would be a pulse that needs to be
211 eliminated. */
212 clean_pulse_events_(use_delay);
214 vvp_time64_t use_simtime = schedule_simtime() + use_delay;
216 /* And propagate it. */
217 if (use_delay == 0) {
218 cur_vec4_ = bit;
219 vvp_send_vec4(net_->out, cur_vec4_);
220 } else {
221 struct event_*cur = new struct event_(use_simtime);
222 cur->run_run_ptr = &vvp_fun_delay::run_run_vec4_;
223 cur->ptr_vec4 = bit;
224 enqueue_(cur);
225 schedule_generic(this, use_delay, false);
229 void vvp_fun_delay::recv_vec8(vvp_net_ptr_t port, vvp_vector8_t bit)
231 assert(port.port() == 0);
233 if (cur_vec8_.eeq(bit))
234 return;
236 /* XXXX FIXME: For now, just use the minimum delay. */
237 vvp_time64_t use_delay;
238 use_delay = delay_.get_min_delay();
240 vvp_time64_t use_simtime = schedule_simtime() + use_delay;
241 if (use_delay == 0) {
242 cur_vec8_ = bit;
243 vvp_send_vec8(net_->out, cur_vec8_);
244 } else {
245 struct event_*cur = new struct event_(use_simtime);
246 cur->ptr_vec8 = bit;
247 cur->run_run_ptr = &vvp_fun_delay::run_run_vec8_;
248 enqueue_(cur);
249 schedule_generic(this, use_delay, false);
253 void vvp_fun_delay::recv_real(vvp_net_ptr_t port, double bit)
255 if (port.port() > 0) {
256 /* If the port is not 0, then this is a delay value that
257 should be rounded and converted to an integer delay. */
258 unsigned long long val = 0;
259 if (bit > 0)
260 val = (unsigned long long) (bit+0.5);
262 switch (port.port()) {
263 case 1:
264 delay_.set_rise(val);
265 return;
266 case 2:
267 delay_.set_fall(val);
268 return;
269 case 3:
270 delay_.set_decay(val);
271 return;
273 return;
276 if (cur_real_ == bit)
277 return;
279 vvp_time64_t use_delay;
280 use_delay = delay_.get_min_delay();
282 vvp_time64_t use_simtime = schedule_simtime() + use_delay;
284 if (use_delay == 0) {
285 cur_real_ = bit;
286 vvp_send_real(net_->out, cur_real_);
287 } else {
288 struct event_*cur = new struct event_(use_simtime);
289 cur->run_run_ptr = &vvp_fun_delay::run_run_real_;
290 cur->ptr_real = bit;
291 enqueue_(cur);
293 schedule_generic(this, use_delay, false);
297 void vvp_fun_delay::run_run()
299 vvp_time64_t sim_time = schedule_simtime();
300 if (list_ == 0 || list_->next->sim_time > sim_time)
301 return;
303 struct event_*cur = dequeue_();
304 if (cur == 0)
305 return;
307 (this->*(cur->run_run_ptr))(cur);
308 delete cur;
311 void vvp_fun_delay::run_run_vec4_(struct event_*cur)
313 cur_vec4_ = cur->ptr_vec4;
314 vvp_send_vec4(net_->out, cur_vec4_);
317 void vvp_fun_delay::run_run_vec8_(struct vvp_fun_delay::event_*cur)
319 cur_vec8_ = cur->ptr_vec8;
320 vvp_send_vec8(net_->out, cur_vec8_);
323 void vvp_fun_delay::run_run_real_(struct vvp_fun_delay::event_*cur)
325 cur_real_ = cur->ptr_real;
326 vvp_send_real(net_->out, cur_real_);
329 vvp_fun_modpath::vvp_fun_modpath(vvp_net_t*net)
330 : net_(net), src_list_(0)
334 vvp_fun_modpath::~vvp_fun_modpath()
336 // Delete the source probes.
337 while (src_list_) {
338 vvp_fun_modpath_src*tmp = src_list_;
339 src_list_ = tmp->next_;
340 delete tmp;
344 void vvp_fun_modpath::add_modpath_src(vvp_fun_modpath_src*that)
346 assert(that->next_ == 0);
347 that->next_ = src_list_;
348 src_list_ = that;
351 static vvp_time64_t delay_from_edge(vvp_bit4_t a, vvp_bit4_t b, vvp_time64_t array[12])
353 typedef delay_edge_t bit4_table4[4];
354 const static bit4_table4 edge_table[4] = {
355 { DELAY_EDGE_01, DELAY_EDGE_01, DELAY_EDGE_0x, DELAY_EDGE_0z },
356 { DELAY_EDGE_10, DELAY_EDGE_10, DELAY_EDGE_1x, DELAY_EDGE_1z },
357 { DELAY_EDGE_x0, DELAY_EDGE_x1, DELAY_EDGE_x0, DELAY_EDGE_xz },
358 { DELAY_EDGE_z0, DELAY_EDGE_z1, DELAY_EDGE_zx, DELAY_EDGE_z0 }
361 return array[ edge_table[a][b] ];
364 void vvp_fun_modpath::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
366 /* Only the first port is used. */
367 if (port.port() > 0)
368 return;
370 if (cur_vec4_.eeq(bit))
371 return;
373 /* Select a time delay source that applies. */
374 vvp_fun_modpath_src*src = 0;
375 for (vvp_fun_modpath_src*cur = src_list_ ; cur ; cur=cur->next_) {
376 /* Skip paths that are disabled by conditions. */
377 if (cur->condition_flag_ == false)
378 continue;
380 if (src == 0) {
381 src = cur;
382 } else if (cur->wake_time_ > src->wake_time_) {
383 src = cur;
384 } else {
385 continue; /* Skip this entry. */
389 assert(src);
391 vvp_time64_t out_at[12];
392 vvp_time64_t now = schedule_simtime();
393 for (unsigned idx = 0 ; idx < 12 ; idx += 1) {
394 out_at[idx] = src->wake_time_ + src->delay_[idx];
395 if (out_at[idx] <= now)
396 out_at[idx] = 0;
397 else
398 out_at[idx] -= now;
401 /* Given the scheduled output time, create an output event. */
402 vvp_time64_t use_delay = delay_from_edge(cur_vec4_.value(0),
403 bit.value(0),
404 out_at);
406 /* FIXME: This bases the edge delay on only the least
407 bit. This is WRONG! I need to find all the possible delays,
408 and schedule an event for each partial change. Hard! */
409 for (unsigned idx = 1 ; idx < bit.size() ; idx += 1) {
410 vvp_time64_t tmp = delay_from_edge(cur_vec4_.value(idx),
411 bit.value(0),
412 out_at);
413 assert(tmp == use_delay);
416 cur_vec4_ = bit;
417 schedule_generic(this, use_delay, false);
420 void vvp_fun_modpath::run_run()
422 vvp_send_vec4(net_->out, cur_vec4_);
425 vvp_fun_modpath_src::vvp_fun_modpath_src(vvp_time64_t del[12])
427 for (unsigned idx = 0 ; idx < 12 ; idx += 1)
428 delay_[idx] = del[idx];
430 next_ = 0;
431 wake_time_ = 0;
432 condition_flag_ = true;
435 vvp_fun_modpath_src::~vvp_fun_modpath_src()
439 void vvp_fun_modpath_src::get_delay12(vvp_time64_t val[12]) const
441 for (unsigned idx = 0 ; idx < 12 ; idx += 1)
442 val[idx] = delay_[idx];
445 void vvp_fun_modpath_src::put_delay12(const vvp_time64_t val[12])
447 for (unsigned idx = 0 ; idx < 12 ; idx += 1)
448 delay_[idx] = val[idx];
451 void vvp_fun_modpath_src::recv_vec4(vvp_net_ptr_t port, const vvp_vector4_t&bit)
453 if (port.port() == 0) {
454 // The modpath input...
455 if (test_vec4(bit))
456 wake_time_ = schedule_simtime();
458 } else if (port.port() == 1) {
459 // The modpath condition input...
460 if (bit.value(0) == BIT4_1)
461 condition_flag_ = true;
462 else
463 condition_flag_ = false;
467 bool vvp_fun_modpath_src::test_vec4(const vvp_vector4_t&)
469 return true;
472 vvp_fun_modpath_edge::vvp_fun_modpath_edge(vvp_time64_t del[12],
473 bool pos, bool neg)
474 : vvp_fun_modpath_src(del)
476 old_value_ = BIT4_X;
477 posedge_ = pos;
478 negedge_ = neg;
481 bool vvp_fun_modpath_edge::test_vec4(const vvp_vector4_t&bit)
483 vvp_bit4_t tmp = old_value_;
484 old_value_ = bit.value(0);
486 int edge_flag = edge(tmp, old_value_);
487 if (edge_flag > 0) return posedge_;
488 if (edge_flag < 0) return negedge_;
489 return false;
494 * All the below routines that begin with
495 * modpath_src_* belong the internal function
496 * of an vpiModPathIn object. This is used to
497 * make some specific delays path operations
500 static int modpath_src_get(int code, vpiHandle ref)
502 struct __vpiModPathSrc*obj = vpip_modpath_src_from_handle(ref);
503 assert(obj);
504 return 0 ;
507 static void modpath_src_get_value(vpiHandle ref, p_vpi_value vp)
509 assert((ref->vpi_type->type_code == vpiModPathIn));
510 struct __vpiModPathSrc* modpathsrc = vpip_modpath_src_from_handle( ref) ;
511 assert ( modpathsrc ) ;
512 return ;
515 static vpiHandle modpath_src_put_value(vpiHandle ref, s_vpi_value *vp )
517 assert((ref->vpi_type->type_code == vpiModPathIn));
518 struct __vpiModPathSrc* modpathsrc = vpip_modpath_src_from_handle( ref) ;
519 assert ( modpathsrc ) ;
520 return 0 ;
523 static vpiHandle modpath_src_get_handle(int code, vpiHandle ref)
525 struct __vpiModPathSrc*rfp = vpip_modpath_src_from_handle(ref);
526 assert(rfp);
528 switch (code) {
530 case vpiScope:
531 return vpi_handle(rfp->dest->scope);
533 case vpiModule:
534 { struct __vpiScope*scope = rfp->dest->scope;
535 while (scope && scope->base.vpi_type->type_code != vpiModule)
536 scope = scope->scope;
537 assert(scope);
538 return vpi_handle(scope);
541 case vpiModPathIn:
542 return vpi_handle(&rfp->path_term_in);
544 case vpiModPathOut:
545 return vpi_handle(&rfp->dest->path_term_out);
547 return 0;
550 static vpiHandle modpath_src_index ( vpiHandle ref, int code )
552 assert( (ref->vpi_type->type_code == vpiModPathIn ) );
553 return 0 ;
557 static int modpath_src_free_object( vpiHandle ref )
559 assert( (ref->vpi_type->type_code == vpiModPathIn ) );
560 free ( ref ) ;
561 return 1 ;
565 * This Routine will put specific demension of delay[] values
566 * into a vpiHandle. In this case, he will put an
567 * specific delays values in a vpiModPathIn object
570 static void modpath_src_put_delays ( vpiHandle ref, p_vpi_delay delays )
572 vvp_time64_t tmp[12];
573 int idx;
574 struct __vpiModPathSrc * src = vpip_modpath_src_from_handle( ref) ;
575 assert(src) ;
577 vvp_fun_modpath_src *fun = dynamic_cast<vvp_fun_modpath_src*>(src->net->fun);
578 assert( fun );
580 typedef unsigned char map_array_t[12];
581 static const map_array_t map_2 = {0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0};
582 static const map_array_t map12 = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
584 const map_array_t*use_map = 0;
585 switch (delays->no_of_delays) {
586 case 2:
587 use_map = &map_2;
588 break;
589 case 12:
590 use_map = &map12;
591 break;
592 default:
593 assert(0);
594 break;
597 if (delays->time_type == vpiSimTime) {
598 for (idx = 0 ; idx < 12 ; idx += 1) {
599 tmp[idx] = vpip_timestruct_to_time(delays->da+use_map[0][idx]);
601 } else {
602 for (idx = 0 ; idx < 12 ; idx += 1) {
603 tmp[idx] = vpip_scaled_real_to_time64(delays->da[use_map[0][idx]].real,
604 src->dest->scope);
608 /* Now clean up any to-from-x delays to me the min/max based on
609 the rules for selecting X delays. This only needs to happen
610 if the X delays are not already explicitly given. */
611 if (delays->no_of_delays <= 6) {
612 vvp_time64_t t_max = tmp[0];
613 vvp_time64_t t_min = tmp[1];
614 for (idx = 1 ; idx < delays->no_of_delays ; idx += 1) {
615 if (tmp[idx] > t_max) t_max = tmp[idx];
616 if (tmp[idx] < t_min) t_min = tmp[idx];
618 tmp[DELAY_EDGE_0x] = t_min;
619 tmp[DELAY_EDGE_x1] = t_max;
620 tmp[DELAY_EDGE_1x] = t_min;
621 tmp[DELAY_EDGE_x0] = t_max;
622 tmp[DELAY_EDGE_xz] = t_max;
623 tmp[DELAY_EDGE_zx] = t_min;
626 fun->put_delay12(tmp);
630 * This Routine will retrive the delay[12] values
631 * of an vpiHandle. In this case, he will get an
632 * specific delays values from an vpiModPathIn
633 * object
637 static void modpath_src_get_delays ( vpiHandle ref, p_vpi_delay delays )
639 struct __vpiModPathSrc*src = vpip_modpath_src_from_handle( ref) ;
640 assert(src);
642 vvp_fun_modpath_src *fun = dynamic_cast<vvp_fun_modpath_src*>(src->net->fun);
643 assert(fun);
645 int idx;
646 vvp_time64_t tmp[12];
647 fun->get_delay12(tmp);
649 switch (delays->no_of_delays) {
650 case 12:
651 if (delays->time_type == vpiSimTime) {
652 for (idx = 0; idx < 12; idx += 1) {
653 vpip_time_to_timestruct(delays->da+idx, tmp[idx]);
655 } else {
656 int units = src->dest->scope->time_units;
657 for (idx = 0; idx < 12; idx += 1) {
658 delays->da[idx].real = vpip_time_to_scaled_real(tmp[idx], src->dest->scope);
661 break;
663 default:
664 assert(0);
665 break;
670 static vpiHandle pathterm_get_handle(int code, vpiHandle ref)
672 struct __vpiModPathTerm*obj = vpip_modpath_term_from_handle(ref);
673 assert(obj);
675 switch (code) {
676 case vpiExpr:
677 return obj->expr;
678 default:
679 return 0;
684 * The __vpiModPathSrc class is what the VPI client sees as a
685 * vpiModPath object. The __vpiModPath structure contains items that
686 * are common to a bunch of modpaths, including the destination term.
688 static const struct __vpirt vpip_modpath_src_rt = {
689 vpiModPath,
690 modpath_src_get,
691 0, /* vpi_get_str */
692 modpath_src_get_value,
693 modpath_src_put_value,
694 modpath_src_get_handle,
695 0, /* modpath_src_iterate,*/
696 modpath_src_index,
697 modpath_src_free_object,
698 modpath_src_get_delays,
699 modpath_src_put_delays
702 static const struct __vpirt vpip_modpath_term_rt = {
703 vpiPathTerm,
704 0, // vpi_get
705 0, // vpi_get_str
706 0, // vpi_get_value,
707 0, // vpi_put_value,
708 pathterm_get_handle,
709 0, // vpi_iterate,
710 0, // vpi_index,
711 0, // vpi_free_object,
712 0, // vpi_get_delays,
713 0 // vpi_put_delays
716 static void initialize_path_term(struct __vpiModPathTerm&obj)
718 obj.base.vpi_type = &vpip_modpath_term_rt;
719 obj.expr = 0;
723 * This function will Construct a vpiModPath Object.
724 * give a respective "net", and will point to his
725 * respective functor
728 struct __vpiModPath* vpip_make_modpath(vvp_net_t *net)
730 struct __vpiModPath*obj = (struct __vpiModPath *)calloc(1, sizeof ( struct __vpiModPath ) );
731 obj->scope = vpip_peek_current_scope ( );
733 initialize_path_term(obj->path_term_out);
734 obj->input_net = net ;
736 return obj;
741 * This function will Constructs a vpiModPathIn
742 * ( struct __vpiModPathSrc ) Object. will give
743 * a delays[12] values, and point to the specified functor
747 struct __vpiModPathSrc* vpip_make_modpath_src (struct __vpiModPath*path, vvp_time64_t use_delay[12] , vvp_net_t *net )
749 struct __vpiModPathSrc *obj = (struct __vpiModPathSrc *) calloc (1, sizeof ( struct __vpiModPathSrc ) ) ;
751 obj->base.vpi_type = &vpip_modpath_src_rt;
752 obj->dest = path;
753 obj->net = net;
754 initialize_path_term(obj->path_term_in);
756 return obj;
761 this Routine will safetly convert a modpath vpiHandle
762 to a struct __vpiModPath { }
765 struct __vpiModPathTerm* vpip_modpath_term_from_handle(vpiHandle ref)
767 if (ref->vpi_type->type_code != vpiPathTerm)
768 return 0;
770 return (struct __vpiModPathTerm*) ref;
774 this Routine will safetly convert a modpathsrc vpiHandle
775 to a struct __vpiModPathSrc { }, This is equivalent ao
776 vpiModPathIn handle
779 struct __vpiModPathSrc* vpip_modpath_src_from_handle(vpiHandle ref)
781 if (ref->vpi_type->type_code != vpiModPath)
782 return 0;
784 return (struct __vpiModPathSrc *) ref;
790 void vpip_add_mopdath_edge ( vpiHandle vpiobj, char *label,
791 vvp_time64_t use_delay[12] ,
792 bool posedge , bool negedge )
794 // printf(" In the vpip_add_mopdath_edge( ) \n") ;