1 /* $NetBSD: prune.c,v 1.17 2006/05/25 01:41:13 christos Exp $ */
4 * The mrouted program is covered by the license in the accompanying file
5 * named "LICENSE". Use of the mrouted program represents acceptance of
6 * the terms and conditions listed in that file.
8 * The mrouted program is COPYRIGHT 1989 by The Board of Trustees of
9 * Leland Stanford Junior University.
15 extern int cache_lifetime
;
16 extern int max_prune_lifetime
;
17 extern struct rtentry
*routing_table
;
22 * dither cache lifetime to obtain a value between x and 2*x
24 #define CACHE_LIFETIME(x) ((x) + (arc4random() % (x)))
26 #define CHK_GS(x, y) { \
35 /*case 256:*/ y = 1; \
41 struct gtable
*kernel_table
; /* ptr to list of kernel grp entries*/
42 static struct gtable
*kernel_no_route
; /* list of grp entries w/o routes */
43 struct gtable
*gtp
; /* pointer for kernel rt entries */
44 unsigned int kroutes
; /* current number of cache entries */
46 /****************************************************************************
47 Functions that are local to prune.c
48 ****************************************************************************/
49 static void prun_add_ttls(struct gtable
*gt
);
50 static int pruning_neighbor(vifi_t vifi
, u_int32_t addr
);
51 static int can_mtrace(vifi_t vifi
, u_int32_t addr
);
52 static struct ptable
* find_prune_entry(u_int32_t vr
, struct ptable
*pt
);
53 static void expire_prune(vifi_t vifi
, struct gtable
*gt
);
54 static void send_prune(struct gtable
*gt
);
55 static void send_graft(struct gtable
*gt
);
56 static void send_graft_ack(u_int32_t src
, u_int32_t dst
,
57 u_int32_t origin
, u_int32_t grp
);
58 static void update_kernel(struct gtable
*g
);
59 static const char * scaletime(u_long t
);
62 * Updates the ttl values for each vif.
65 prun_add_ttls(struct gtable
*gt
)
70 for (vifi
= 0, v
= uvifs
; vifi
< numvifs
; ++vifi
, ++v
) {
71 if (VIFM_ISSET(vifi
, gt
->gt_grpmems
))
72 gt
->gt_ttls
[vifi
] = v
->uv_threshold
;
74 gt
->gt_ttls
[vifi
] = 0;
79 * checks for scoped multicast addresses
81 #define GET_SCOPE(gt) { \
83 if ((ntohl((gt)->gt_mcastgrp) & 0xff000000) == 0xef000000) \
84 for (_i = 0; _i < numvifs; _i++) \
85 if (scoped_addr(_i, (gt)->gt_mcastgrp)) \
86 VIFM_SET(_i, (gt)->gt_scope); \
90 scoped_addr(vifi_t vifi
, u_int32_t addr
)
94 for (acl
= uvifs
[vifi
].uv_acl
; acl
; acl
= acl
->acl_next
)
95 if ((addr
& acl
->acl_mask
) == acl
->acl_addr
)
102 * Determine if mcastgrp has a listener on vifi
105 grplst_mem(vifi_t vifi
, u_int32_t mcastgrp
)
112 for (g
= v
->uv_groups
; g
!= NULL
; g
= g
->al_next
)
113 if (mcastgrp
== g
->al_addr
)
120 * Finds the group entry with the specified source and netmask.
121 * If netmask is 0, it uses the route's netmask.
123 * Returns TRUE if found a match, and the global variable gtp is left
124 * pointing to entry before the found entry.
125 * Returns FALSE if no exact match found, gtp is left pointing to before
126 * the entry in question belongs, or is NULL if the it belongs at the
130 find_src_grp(u_int32_t src
, u_int32_t mask
, u_int32_t grp
)
137 if (grp
== gt
->gt_mcastgrp
&&
138 (mask
? (gt
->gt_route
->rt_origin
== src
&&
139 gt
->gt_route
->rt_originmask
== mask
) :
140 ((src
& gt
->gt_route
->rt_originmask
) ==
141 gt
->gt_route
->rt_origin
)))
143 if (ntohl(grp
) > ntohl(gt
->gt_mcastgrp
) ||
144 (grp
== gt
->gt_mcastgrp
&&
145 (ntohl(mask
) < ntohl(gt
->gt_route
->rt_originmask
) ||
146 (mask
== gt
->gt_route
->rt_originmask
&&
147 (ntohl(src
) > ntohl(gt
->gt_route
->rt_origin
)))))) {
157 * Check if the neighbor supports pruning
160 pruning_neighbor(vifi_t vifi
, u_int32_t addr
)
162 struct listaddr
*n
= neighbor_info(vifi
, addr
);
168 if (n
->al_flags
& NF_PRUNE
)
172 * Versions from 3.0 to 3.4 relied on the version number to identify
173 * that they could handle pruning.
176 return (vers
>= 0x0300 && vers
<= 0x0304);
180 * Can the neighbor in question handle multicast traceroute?
183 can_mtrace(vifi_t vifi
, u_int32_t addr
)
185 struct listaddr
*n
= neighbor_info(vifi
, addr
);
191 if (n
->al_flags
& NF_MTRACE
)
195 * Versions 3.3 and 3.4 relied on the version number to identify
196 * that they could handle traceroute.
199 return (vers
>= 0x0303 && vers
<= 0x0304);
203 * Returns the prune entry of the router, or NULL if none exists
205 static struct ptable
*
206 find_prune_entry(u_int32_t vr
, struct ptable
*pt
)
209 if (pt
->pt_router
== vr
)
218 * Send a prune message to the dominant router for
221 * Record an entry that a prune was sent for this group
224 send_prune(struct gtable
*gt
)
234 /* Don't process any prunes if router is not pruning */
238 /* Can't process a prune if we don't have an associated route */
239 if (gt
->gt_route
== NULL
)
242 /* Don't send a prune to a non-pruning router */
243 if (!pruning_neighbor(gt
->gt_route
->rt_parent
, gt
->gt_route
->rt_gateway
))
247 * sends a prune message to the router upstream.
249 src
= uvifs
[gt
->gt_route
->rt_parent
].uv_lcl_addr
;
250 dst
= gt
->gt_route
->rt_gateway
;
252 p
= send_buf
+ MIN_IP_HEADER_LEN
+ IGMP_MINLEN
;
256 * determine prune lifetime
258 gt
->gt_prsent_timer
= gt
->gt_timer
;
259 for (pt
= gt
->gt_pruntbl
; pt
; pt
= pt
->pt_next
)
260 if (pt
->pt_timer
< gt
->gt_prsent_timer
)
261 gt
->gt_prsent_timer
= pt
->pt_timer
;
264 * If we have a graft pending, cancel graft retransmission
268 for (i
= 0; i
< 4; i
++)
269 *p
++ = ((char *)&(gt
->gt_route
->rt_origin
))[i
];
270 for (i
= 0; i
< 4; i
++)
271 *p
++ = ((char *)&(gt
->gt_mcastgrp
))[i
];
272 tmp
= htonl(gt
->gt_prsent_timer
);
273 for (i
= 0; i
< 4; i
++)
274 *p
++ = ((char *)&(tmp
))[i
];
277 send_igmp(src
, dst
, IGMP_DVMRP
, DVMRP_PRUNE
,
278 htonl(MROUTED_LEVEL
), datalen
);
280 logit(LOG_DEBUG
, 0, "sent prune for (%s %s)/%d on vif %d to %s",
281 inet_fmts(gt
->gt_route
->rt_origin
, gt
->gt_route
->rt_originmask
),
282 inet_fmt(gt
->gt_mcastgrp
),
283 gt
->gt_prsent_timer
, gt
->gt_route
->rt_parent
,
284 inet_fmt(gt
->gt_route
->rt_gateway
));
288 * a prune was sent upstream
289 * so, a graft has to be sent to annul the prune
290 * set up a graft timer so that if an ack is not
291 * heard within that time, another graft request
295 send_graft(struct gtable
*gt
)
303 /* Can't send a graft without an associated route */
304 if (gt
->gt_route
== NULL
)
307 src
= uvifs
[gt
->gt_route
->rt_parent
].uv_lcl_addr
;
308 dst
= gt
->gt_route
->rt_gateway
;
310 p
= send_buf
+ MIN_IP_HEADER_LEN
+ IGMP_MINLEN
;
313 for (i
= 0; i
< 4; i
++)
314 *p
++ = ((char *)&(gt
->gt_route
->rt_origin
))[i
];
315 for (i
= 0; i
< 4; i
++)
316 *p
++ = ((char *)&(gt
->gt_mcastgrp
))[i
];
320 send_igmp(src
, dst
, IGMP_DVMRP
, DVMRP_GRAFT
,
321 htonl(MROUTED_LEVEL
), datalen
);
323 logit(LOG_DEBUG
, 0, "sent graft for (%s %s) to %s on vif %d",
324 inet_fmts(gt
->gt_route
->rt_origin
, gt
->gt_route
->rt_originmask
),
325 inet_fmt(gt
->gt_mcastgrp
),
326 inet_fmt(gt
->gt_route
->rt_gateway
),
327 gt
->gt_route
->rt_parent
);
331 * Send an ack that a graft was received
334 send_graft_ack(u_int32_t src
, u_int32_t dst
, u_int32_t origin
, u_int32_t grp
)
340 p
= send_buf
+ MIN_IP_HEADER_LEN
+ IGMP_MINLEN
;
343 for (i
= 0; i
< 4; i
++)
344 *p
++ = ((char *)&(origin
))[i
];
345 for (i
= 0; i
< 4; i
++)
346 *p
++ = ((char *)&(grp
))[i
];
349 send_igmp(src
, dst
, IGMP_DVMRP
, DVMRP_GRAFT_ACK
,
350 htonl(MROUTED_LEVEL
), datalen
);
352 logit(LOG_DEBUG
, 0, "sent graft ack for (%s, %s) to %s",
353 inet_fmt(origin
), inet_fmt(grp
),
358 * Update the kernel cache with all the routes hanging off the group entry
361 update_kernel(struct gtable
*g
)
365 for (st
= g
->gt_srctbl
; st
; st
= st
->st_next
)
366 k_add_rg(st
->st_origin
, g
);
369 /****************************************************************************
370 Functions that are used externally
371 ****************************************************************************/
374 #include <sys/types.h>
378 * Find a specific group entry in the group table
385 for (gt
= kernel_table
; gt
; gt
= gt
->gt_gnext
) {
386 if (ntohl(grp
) < ntohl(gt
->gt_mcastgrp
))
388 if (gt
->gt_mcastgrp
== grp
)
395 * Given a group entry and source, find the corresponding source table
399 find_grp_src(struct gtable
*gt
, u_long src
)
402 u_long grp
= gt
->gt_mcastgrp
;
403 struct gtable
*gtcurr
;
405 for (gtcurr
= gt
; gtcurr
->gt_mcastgrp
== grp
; gtcurr
= gtcurr
->gt_gnext
) {
406 for (st
= gtcurr
->gt_srctbl
; st
; st
= st
->st_next
)
407 if (st
->st_origin
== src
)
414 * Find next entry > specification
416 * gtpp: ordered by group
417 * stpp: ordered by source
420 next_grp_src_mask(struct gtable
**gtpp
, struct stable
**stpp
, u_long grp
,
421 u_long src
, u_long mask
)
423 struct gtable
*gt
, *gbest
= NULL
;
424 struct stable
*st
, *sbest
= NULL
;
426 /* Find first group entry >= grp spec */
427 (*gtpp
) = kernel_table
;
428 while ((*gtpp
) && ntohl((*gtpp
)->gt_mcastgrp
) < ntohl(grp
))
429 (*gtpp
)=(*gtpp
)->gt_gnext
;
431 return 0; /* no more groups */
433 for (gt
= kernel_table
; gt
; gt
=gt
->gt_gnext
) {
434 /* Since grps are ordered, we can stop when group changes from gbest */
435 if (gbest
&& gbest
->gt_mcastgrp
!= gt
->gt_mcastgrp
)
437 for (st
= gt
->gt_srctbl
; st
; st
=st
->st_next
) {
439 /* Among those entries > spec, find "lowest" one */
440 if (((ntohl(gt
->gt_mcastgrp
)> ntohl(grp
))
441 || (ntohl(gt
->gt_mcastgrp
)==ntohl(grp
)
442 && ntohl(st
->st_origin
)> ntohl(src
))
443 || (ntohl(gt
->gt_mcastgrp
)==ntohl(grp
)
444 && ntohl(st
->st_origin
)==src
&& 0xFFFFFFFF>ntohl(mask
)))
446 || (ntohl(gt
->gt_mcastgrp
)< ntohl(gbest
->gt_mcastgrp
))
447 || (ntohl(gt
->gt_mcastgrp
)==ntohl(gbest
->gt_mcastgrp
)
448 && ntohl(st
->st_origin
)< ntohl(sbest
->st_origin
)))) {
460 * Ensure that sg contains current information for the given group,source.
461 * This is fetched from the kernel as a unit so that counts for the entry
462 * are consistent, i.e. packet and byte counts for the same entry are
463 * read at the same time.
466 refresh_sg(struct sioc_sg_req
*sg
, struct gtable
*gt
, struct stable
*st
)
468 static int lastq
= -1;
470 if (quantum
!= lastq
|| sg
->src
.s_addr
!=st
->st_origin
471 || sg
->grp
.s_addr
!=gt
->gt_mcastgrp
) {
473 sg
->src
.s_addr
= st
->st_origin
;
474 sg
->grp
.s_addr
= gt
->gt_mcastgrp
;
475 ioctl(igmp_socket
, SIOCGETSGCNT
, (char *)sg
);
480 * Return pointer to a specific route entry. This must be a separate
481 * function from find_route() which modifies rtp.
484 snmp_find_route(u_long src
, u_long mask
)
488 for (rt
= routing_table
; rt
; rt
= rt
->rt_next
) {
489 if (src
== rt
->rt_origin
&& mask
== rt
->rt_originmask
)
496 * Find next route entry > specification
499 next_route(struct rtentry
**rtpp
, u_long src
, u_long mask
)
501 struct rtentry
*rt
, *rbest
= NULL
;
503 /* Among all entries > spec, find "lowest" one in order */
504 for (rt
= routing_table
; rt
; rt
=rt
->rt_next
) {
505 if ((ntohl(rt
->rt_origin
) > ntohl(src
)
506 || (ntohl(rt
->rt_origin
) == ntohl(src
)
507 && ntohl(rt
->rt_originmask
) > ntohl(mask
)))
508 && (!rbest
|| (ntohl(rt
->rt_origin
) < ntohl(rbest
->rt_origin
))
509 || (ntohl(rt
->rt_origin
) == ntohl(rbest
->rt_origin
)
510 && ntohl(rt
->rt_originmask
) < ntohl(rbest
->rt_originmask
))))
518 * Given a routing table entry, and a vifi, find the next vifi/entry
520 * vifi: vifi at which to start looking
523 next_route_child(struct rtentry
**rtpp
, u_long src
, u_long mask
, vifi_t
*vifi
)
527 /* Get (S,M) entry */
528 if (!((*rtpp
) = snmp_find_route(src
,mask
)))
529 if (!next_route(rtpp
, src
, mask
))
532 /* Continue until we get one with a valid next vif */
534 for (; (*rtpp
)->rt_children
&& *vifi
<numvifs
; (*vifi
)++)
535 if (VIFM_ISSET(*vifi
, (*rtpp
)->rt_children
))
538 } while( next_route(rtpp
, (*rtpp
)->rt_origin
, (*rtpp
)->rt_originmask
) );
544 * Given a routing table entry, and a vifi, find the next entry
545 * equal to or greater than those
547 * vifi: vifi at which to start looking
550 next_child(struct gtable
**gtpp
, struct stable
**stpp
, u_long grp
, u_long src
,
551 u_long mask
, vifi_t
*vifi
)
555 /* Get (G,S,M) entry */
557 || !((*gtpp
) = find_grp(grp
))
558 || !((*stpp
) = find_grp_src((*gtpp
),src
)))
559 if (!next_grp_src_mask(gtpp
, stpp
, grp
, src
, mask
))
562 /* Continue until we get one with a valid next vif */
564 for (; (*gtpp
)->gt_route
->rt_children
&& *vifi
<numvifs
; (*vifi
)++)
565 if (VIFM_ISSET(*vifi
, (*gtpp
)->gt_route
->rt_children
))
568 } while (next_grp_src_mask(gtpp
, stpp
, (*gtpp
)->gt_mcastgrp
,
569 (*stpp
)->st_origin
, 0xFFFFFFFF) );
576 * Initialize the kernel table structure
582 kernel_no_route
= NULL
;
587 * Add a new table entry for (origin, mcastgrp)
590 add_table_entry(u_int32_t origin
, u_int32_t mcastgrp
)
593 struct gtable
*gt
,**gtnp
,*prev_gt
;
594 struct stable
*st
,**stnp
;
598 md_log(MD_MISS
, origin
, mcastgrp
);
601 r
= determine_route(origin
);
605 * Look for it on the no_route table; if it is found then
606 * it will be detected as a duplicate below.
608 for (gt
= kernel_no_route
; gt
; gt
= gt
->gt_next
)
609 if (mcastgrp
== gt
->gt_mcastgrp
&&
610 gt
->gt_srctbl
&& gt
->gt_srctbl
->st_origin
== origin
)
612 gtnp
= &kernel_no_route
;
614 gtnp
= &r
->rt_groups
;
615 while ((gt
= *gtnp
) != NULL
) {
616 if (gt
->gt_mcastgrp
>= mcastgrp
)
623 if (gt
== NULL
|| gt
->gt_mcastgrp
!= mcastgrp
) {
624 gt
= (struct gtable
*)malloc(sizeof(struct gtable
));
626 logit(LOG_ERR
, 0, "ran out of memory");
630 gt
->gt_mcastgrp
= mcastgrp
;
631 gt
->gt_timer
= CACHE_LIFETIME(cache_lifetime
);
635 gt
->gt_prsent_timer
= 0;
637 gt
->gt_srctbl
= NULL
;
638 gt
->gt_pruntbl
= NULL
;
641 gt
->gt_rsrr_cache
= NULL
;
645 /* obtain the multicast group membership list */
646 for (i
= 0; i
< numvifs
; i
++) {
647 if (VIFM_ISSET(i
, r
->rt_children
) &&
648 !(VIFM_ISSET(i
, r
->rt_leaves
)))
649 VIFM_SET(i
, gt
->gt_grpmems
);
651 if (VIFM_ISSET(i
, r
->rt_leaves
) && grplst_mem(i
, mcastgrp
))
652 VIFM_SET(i
, gt
->gt_grpmems
);
655 if (VIFM_ISSET(r
->rt_parent
, gt
->gt_scope
))
657 gt
->gt_grpmems
&= ~gt
->gt_scope
;
669 gt
->gt_next
->gt_prev
= gt
;
670 gt
->gt_prev
= prev_gt
;
673 if (find_src_grp(r
->rt_origin
, r
->rt_originmask
, gt
->gt_mcastgrp
)) {
676 g
= gtp
? gtp
->gt_gnext
: kernel_table
;
677 logit(LOG_WARNING
, 0, "Entry for (%s %s) (rt:%p) exists (rt:%p)",
678 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
679 inet_fmt(g
->gt_mcastgrp
),
683 gt
->gt_gnext
= gtp
->gt_gnext
;
687 gt
->gt_gnext
= kernel_table
;
692 gt
->gt_gnext
->gt_gprev
= gt
;
695 gt
->gt_gnext
= gt
->gt_gprev
= NULL
;
699 stnp
= >
->gt_srctbl
;
700 while ((st
= *stnp
) != NULL
) {
701 if (ntohl(st
->st_origin
) >= ntohl(origin
))
706 if (st
== NULL
|| st
->st_origin
!= origin
) {
707 st
= (struct stable
*)malloc(sizeof(struct stable
));
709 logit(LOG_ERR
, 0, "ran out of memory");
711 st
->st_origin
= origin
;
717 md_log(MD_DUPE
, origin
, mcastgrp
);
719 logit(LOG_WARNING
, 0, "kernel entry already exists for (%s %s)",
722 /* XXX Doing this should cause no harm, and may ensure
723 * kernel<>mrouted synchronization */
724 k_add_rg(origin
, gt
);
729 k_add_rg(origin
, gt
);
731 logit(LOG_DEBUG
, 0, "add cache entry (%s %s) gm:%x, parent-vif:%d",
734 gt
->gt_grpmems
, r
? r
->rt_parent
: -1);
736 /* If there are no leaf vifs
737 * which have this group, then
738 * mark this src-grp as a prune candidate.
740 if (!gt
->gt_prsent_timer
&& !gt
->gt_grpmems
&& r
&& r
->rt_gateway
)
745 * An mrouter has gone down and come up on an interface
746 * Forward on that interface immediately
749 reset_neighbor_state(vifi_t vifi
, u_int32_t addr
)
753 struct ptable
*pt
, **ptnp
;
756 for (g
= kernel_table
; g
; g
= g
->gt_gnext
) {
760 * If neighbor was the parent, remove the prune sent state
761 * and all of the source cache info so that prunes get
764 if (vifi
== r
->rt_parent
) {
765 if (addr
== r
->rt_gateway
) {
766 logit(LOG_DEBUG
, 0, "reset_neighbor_state parent reset (%s %s)",
767 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
768 inet_fmt(g
->gt_mcastgrp
));
770 g
->gt_prsent_timer
= 0;
772 while ((st
= g
->gt_srctbl
) != NULL
) {
773 g
->gt_srctbl
= st
->st_next
;
774 k_del_rg(st
->st_origin
, g
);
781 * Neighbor was not the parent, send grafts to join the groups
783 if (g
->gt_prsent_timer
) {
786 g
->gt_prsent_timer
= 0;
790 * Remove any prunes that this router has sent us.
792 ptnp
= &g
->gt_pruntbl
;
793 while ((pt
= *ptnp
) != NULL
) {
794 if (pt
->pt_vifi
== vifi
&& pt
->pt_router
== addr
) {
802 * And see if we want to forward again.
804 if (!VIFM_ISSET(vifi
, g
->gt_grpmems
)) {
805 if (VIFM_ISSET(vifi
, r
->rt_children
) &&
806 !(VIFM_ISSET(vifi
, r
->rt_leaves
)))
807 VIFM_SET(vifi
, g
->gt_grpmems
);
809 if (VIFM_ISSET(vifi
, r
->rt_leaves
) &&
810 grplst_mem(vifi
, g
->gt_mcastgrp
))
811 VIFM_SET(vifi
, g
->gt_grpmems
);
813 g
->gt_grpmems
&= ~g
->gt_scope
;
816 /* Update kernel state */
819 /* Send route change notification to reservation protocol. */
820 rsrr_cache_send(g
,1);
823 logit(LOG_DEBUG
, 0, "reset member state (%s %s) gm:%x",
824 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
825 inet_fmt(g
->gt_mcastgrp
), g
->gt_grpmems
);
832 * Delete table entry from the kernel
833 * del_flag determines how many entries to delete
836 del_table_entry(struct rtentry
*r
, u_int32_t mcastgrp
, u_int del_flag
)
838 struct gtable
*g
, *prev_g
;
839 struct stable
*st
, *prev_st
;
840 struct ptable
*pt
, *prev_pt
;
842 if (del_flag
== DEL_ALL_ROUTES
) {
845 logit(LOG_DEBUG
, 0, "del_table_entry deleting (%s %s)",
846 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
847 inet_fmt(g
->gt_mcastgrp
));
850 if (k_del_rg(st
->st_origin
, g
) < 0) {
851 logit(LOG_WARNING
, errno
,
852 "del_table_entry trying to delete (%s, %s)",
853 inet_fmt(st
->st_origin
),
854 inet_fmt(g
->gt_mcastgrp
));
869 g
->gt_pruntbl
= NULL
;
872 g
->gt_gnext
->gt_gprev
= g
->gt_gprev
;
874 g
->gt_gprev
->gt_gnext
= g
->gt_gnext
;
876 kernel_table
= g
->gt_gnext
;
879 /* Send route change notification to reservation protocol. */
880 rsrr_cache_send(g
,0);
891 * Dummy routine - someday this may be needed, so it is just there
893 if (del_flag
== DEL_RTE_GROUP
) {
894 prev_g
= (struct gtable
*)&r
->rt_groups
;
895 for (g
= r
->rt_groups
; g
; g
= g
->gt_next
) {
896 if (g
->gt_mcastgrp
== mcastgrp
) {
897 logit(LOG_DEBUG
, 0, "del_table_entry deleting (%s %s)",
898 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
899 inet_fmt(g
->gt_mcastgrp
));
902 if (k_del_rg(st
->st_origin
, g
) < 0) {
903 logit(LOG_WARNING
, errno
,
904 "del_table_entry trying to delete (%s, %s)",
905 inet_fmt(st
->st_origin
),
906 inet_fmt(g
->gt_mcastgrp
));
921 g
->gt_pruntbl
= NULL
;
924 g
->gt_gnext
->gt_gprev
= g
->gt_gprev
;
926 g
->gt_gprev
->gt_gnext
= g
->gt_gnext
;
928 kernel_table
= g
->gt_gnext
;
930 if (prev_g
!= (struct gtable
*)&r
->rt_groups
)
931 g
->gt_next
->gt_prev
= prev_g
;
933 g
->gt_next
->gt_prev
= NULL
;
934 prev_g
->gt_next
= g
->gt_next
;
937 /* Send route change notification to reservation protocol. */
938 rsrr_cache_send(g
,0);
951 * update kernel table entry when a route entry changes
954 update_table_entry(struct rtentry
*r
)
957 struct ptable
*pt
, *prev_pt
;
960 for (g
= r
->rt_groups
; g
; g
= g
->gt_next
) {
963 prev_pt
= pt
->pt_next
;
967 g
->gt_pruntbl
= NULL
;
971 /* obtain the multicast group membership list */
972 for (i
= 0; i
< numvifs
; i
++) {
973 if (VIFM_ISSET(i
, r
->rt_children
) &&
974 !(VIFM_ISSET(i
, r
->rt_leaves
)))
975 VIFM_SET(i
, g
->gt_grpmems
);
977 if (VIFM_ISSET(i
, r
->rt_leaves
) && grplst_mem(i
, g
->gt_mcastgrp
))
978 VIFM_SET(i
, g
->gt_grpmems
);
980 if (VIFM_ISSET(r
->rt_parent
, g
->gt_scope
))
982 g
->gt_grpmems
&= ~g
->gt_scope
;
984 logit(LOG_DEBUG
, 0, "updating cache entries (%s %s) gm:%x",
985 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
986 inet_fmt(g
->gt_mcastgrp
),
989 if (g
->gt_grpmems
&& g
->gt_prsent_timer
) {
992 g
->gt_prsent_timer
= 0;
995 /* update ttls and add entry into kernel */
999 /* Send route change notification to reservation protocol. */
1000 rsrr_cache_send(g
,1);
1003 /* Check if we want to prune this group */
1004 if (!g
->gt_prsent_timer
&& g
->gt_grpmems
== 0 && r
->rt_gateway
) {
1005 g
->gt_timer
= CACHE_LIFETIME(cache_lifetime
);
1012 * set the forwarding flag for all mcastgrps on this vifi
1015 update_lclgrp(vifi_t vifi
, u_int32_t mcastgrp
)
1020 logit(LOG_DEBUG
, 0, "group %s joined on vif %d",
1021 inet_fmt(mcastgrp
), vifi
);
1023 for (g
= kernel_table
; g
; g
= g
->gt_gnext
) {
1024 if (ntohl(mcastgrp
) < ntohl(g
->gt_mcastgrp
))
1028 if (g
->gt_mcastgrp
== mcastgrp
&&
1029 VIFM_ISSET(vifi
, r
->rt_children
)) {
1031 VIFM_SET(vifi
, g
->gt_grpmems
);
1032 g
->gt_grpmems
&= ~g
->gt_scope
;
1033 if (g
->gt_grpmems
== 0)
1037 logit(LOG_DEBUG
, 0, "update lclgrp (%s %s) gm:%x",
1038 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1039 inet_fmt(g
->gt_mcastgrp
), g
->gt_grpmems
);
1043 /* Send route change notification to reservation protocol. */
1044 rsrr_cache_send(g
,1);
1051 * reset forwarding flag for all mcastgrps on this vifi
1054 delete_lclgrp(vifi_t vifi
, u_int32_t mcastgrp
)
1059 logit(LOG_DEBUG
, 0, "group %s left on vif %d",
1060 inet_fmt(mcastgrp
), vifi
);
1062 for (g
= kernel_table
; g
; g
= g
->gt_gnext
) {
1063 if (ntohl(mcastgrp
) < ntohl(g
->gt_mcastgrp
))
1066 if (g
->gt_mcastgrp
== mcastgrp
) {
1067 int stop_sending
= 1;
1071 * If this is not a leaf, then we have router neighbors on this
1072 * vif. Only turn off forwarding if they have all pruned.
1074 if (!VIFM_ISSET(vifi
, r
->rt_leaves
)) {
1075 struct listaddr
*vr
;
1077 for (vr
= uvifs
[vifi
].uv_neighbors
; vr
; vr
= vr
->al_next
)
1078 if (find_prune_entry(vr
->al_addr
, g
->gt_pruntbl
) == NULL
) {
1085 VIFM_CLR(vifi
, g
->gt_grpmems
);
1086 logit(LOG_DEBUG
, 0, "delete lclgrp (%s %s) gm:%x",
1087 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1088 inet_fmt(g
->gt_mcastgrp
), g
->gt_grpmems
);
1093 /* Send route change notification to reservation protocol. */
1094 rsrr_cache_send(g
,1);
1098 * If there are no more members of this particular group,
1099 * send prune upstream
1101 if (!g
->gt_prsent_timer
&& g
->gt_grpmems
== 0 && r
->rt_gateway
)
1109 * Takes the prune message received and then strips it to
1110 * determine the (src, grp) pair to be pruned.
1112 * Adds the router to the (src, grp) entry then.
1114 * Determines if further packets have to be sent down that vif
1116 * Determines if a corresponding prune message has to be generated
1119 accept_prune(u_int32_t src
, u_int32_t dst
, char *p
, int datalen
)
1130 struct listaddr
*vr
;
1132 /* Don't process any prunes if router is not pruning */
1136 if ((vifi
= find_vif(src
, dst
)) == NO_VIF
) {
1138 "ignoring prune report from non-neighbor %s",
1143 /* Check if enough data is present */
1146 logit(LOG_WARNING
, 0,
1147 "non-decipherable prune from %s",
1152 for (i
= 0; i
< 4; i
++)
1153 ((char *)&prun_src
)[i
] = *p
++;
1154 for (i
= 0; i
< 4; i
++)
1155 ((char *)&prun_grp
)[i
] = *p
++;
1156 for (i
= 0; i
< 4; i
++)
1157 ((char *)&prun_tmr
)[i
] = *p
++;
1158 prun_tmr
= ntohl(prun_tmr
);
1160 logit(LOG_DEBUG
, 0, "%s on vif %d prunes (%s %s)/%d",
1161 inet_fmt(src
), vifi
,
1162 inet_fmt(prun_src
), inet_fmt(prun_grp
), prun_tmr
);
1165 * Find the subnet for the prune
1167 if (find_src_grp(prun_src
, 0, prun_grp
)) {
1168 g
= gtp
? gtp
->gt_gnext
: kernel_table
;
1171 if (!VIFM_ISSET(vifi
, r
->rt_children
)) {
1172 logit(LOG_WARNING
, 0, "prune received from non-child %s for (%s %s)",
1173 inet_fmt(src
), inet_fmt(prun_src
),
1174 inet_fmt(prun_grp
));
1177 if (VIFM_ISSET(vifi
, g
->gt_scope
)) {
1178 logit(LOG_WARNING
, 0, "prune received from %s on scoped grp (%s %s)",
1179 inet_fmt(src
), inet_fmt(prun_src
),
1180 inet_fmt(prun_grp
));
1183 if ((pt
= find_prune_entry(src
, g
->gt_pruntbl
)) != NULL
) {
1185 * If it's about to expire, then it's only still around because
1186 * of timer granularity, so don't warn about it.
1188 if (pt
->pt_timer
> 10) {
1189 logit(LOG_WARNING
, 0, "%s %d from %s for (%s %s)/%d %s %d %s %x",
1190 "duplicate prune received on vif",
1191 vifi
, inet_fmt(src
), inet_fmt(prun_src
),
1192 inet_fmt(prun_grp
), prun_tmr
,
1193 "old timer:", pt
->pt_timer
, "cur gm:", g
->gt_grpmems
);
1195 pt
->pt_timer
= prun_tmr
;
1197 /* allocate space for the prune structure */
1198 pt
= (struct ptable
*)(malloc(sizeof(struct ptable
)));
1200 logit(LOG_ERR
, 0, "pt: ran out of memory");
1205 pt
->pt_router
= src
;
1206 pt
->pt_timer
= prun_tmr
;
1208 pt
->pt_next
= g
->gt_pruntbl
;
1212 /* Refresh the group's lifetime */
1213 g
->gt_timer
= CACHE_LIFETIME(cache_lifetime
);
1214 if ((u_int32_t
)g
->gt_timer
< prun_tmr
)
1215 g
->gt_timer
= prun_tmr
;
1218 * check if any more packets need to be sent on the
1219 * vif which sent this message
1222 for (vr
= uvifs
[vifi
].uv_neighbors
; vr
; vr
= vr
->al_next
)
1223 if (find_prune_entry(vr
->al_addr
, g
->gt_pruntbl
) == NULL
) {
1228 if (stop_sending
&& !grplst_mem(vifi
, prun_grp
)) {
1229 VIFM_CLR(vifi
, g
->gt_grpmems
);
1230 logit(LOG_DEBUG
, 0, "prune (%s %s), stop sending on vif %d, gm:%x",
1231 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1232 inet_fmt(g
->gt_mcastgrp
), vifi
, g
->gt_grpmems
);
1237 /* Send route change notification to reservation protocol. */
1238 rsrr_cache_send(g
,1);
1243 * check if all the child routers have expressed no interest
1244 * in this group and if this group does not exist in the
1246 * Send a prune message then upstream
1248 if (!g
->gt_prsent_timer
&& g
->gt_grpmems
== 0 && r
->rt_gateway
) {
1253 * There is no kernel entry for this group. Therefore, we can
1254 * simply ignore the prune, as we are not forwarding this traffic
1257 logit(LOG_DEBUG
, 0, "%s (%s %s)/%d from %s",
1258 "prune message received with no kernel entry for",
1259 inet_fmt(prun_src
), inet_fmt(prun_grp
),
1260 prun_tmr
, inet_fmt(src
));
1266 * Checks if this mcastgrp is present in the kernel table
1267 * If so and if a prune was sent, it sends a graft upwards
1270 chkgrp_graft(vifi_t vifi
, u_int32_t mcastgrp
)
1275 for (g
= kernel_table
; g
; g
= g
->gt_gnext
) {
1276 if (ntohl(mcastgrp
) < ntohl(g
->gt_mcastgrp
))
1280 if (g
->gt_mcastgrp
== mcastgrp
&& VIFM_ISSET(vifi
, r
->rt_children
))
1281 if (g
->gt_prsent_timer
) {
1282 VIFM_SET(vifi
, g
->gt_grpmems
);
1285 * If the vif that was joined was a scoped vif,
1286 * ignore it ; don't graft back
1288 g
->gt_grpmems
&= ~g
->gt_scope
;
1289 if (g
->gt_grpmems
== 0)
1292 /* set the flag for graft retransmission */
1295 /* send graft upwards */
1298 /* reset the prune timer and update cache timer*/
1299 g
->gt_prsent_timer
= 0;
1300 g
->gt_timer
= max_prune_lifetime
;
1302 logit(LOG_DEBUG
, 0, "chkgrp graft (%s %s) gm:%x",
1303 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1304 inet_fmt(g
->gt_mcastgrp
), g
->gt_grpmems
);
1309 /* Send route change notification to reservation protocol. */
1310 rsrr_cache_send(g
,1);
1316 /* determine the multicast group and src
1318 * if it does, then determine if a prune was sent
1320 * if prune sent upstream, send graft upstream and send
1323 * if no prune sent upstream, change the forwarding bit
1324 * for this interface and send ack downstream.
1326 * if no entry exists for this group send ack downstream.
1329 accept_graft(u_int32_t src
, u_int32_t dst
, char *p
, int datalen
)
1332 u_int32_t graft_src
;
1333 u_int32_t graft_grp
;
1337 struct ptable
*pt
, **ptnp
;
1339 if ((vifi
= find_vif(src
, dst
)) == NO_VIF
) {
1341 "ignoring graft from non-neighbor %s",
1347 logit(LOG_WARNING
, 0,
1348 "received non-decipherable graft from %s",
1353 for (i
= 0; i
< 4; i
++)
1354 ((char *)&graft_src
)[i
] = *p
++;
1355 for (i
= 0; i
< 4; i
++)
1356 ((char *)&graft_grp
)[i
] = *p
++;
1358 logit(LOG_DEBUG
, 0, "%s on vif %d grafts (%s %s)",
1359 inet_fmt(src
), vifi
,
1360 inet_fmt(graft_src
), inet_fmt(graft_grp
));
1363 * Find the subnet for the graft
1365 if (find_src_grp(graft_src
, 0, graft_grp
)) {
1366 g
= gtp
? gtp
->gt_gnext
: kernel_table
;
1369 if (VIFM_ISSET(vifi
, g
->gt_scope
)) {
1370 logit(LOG_WARNING
, 0, "graft received from %s on scoped grp (%s %s)",
1371 inet_fmt(src
), inet_fmt(graft_src
),
1372 inet_fmt(graft_grp
));
1376 ptnp
= &g
->gt_pruntbl
;
1377 while ((pt
= *ptnp
) != NULL
) {
1378 if ((pt
->pt_vifi
== vifi
) && (pt
->pt_router
== src
)) {
1379 *ptnp
= pt
->pt_next
;
1382 VIFM_SET(vifi
, g
->gt_grpmems
);
1383 logit(LOG_DEBUG
, 0, "accept graft (%s %s) gm:%x",
1384 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1385 inet_fmt(g
->gt_mcastgrp
), g
->gt_grpmems
);
1390 /* Send route change notification to reservation protocol. */
1391 rsrr_cache_send(g
,1);
1395 ptnp
= &pt
->pt_next
;
1399 /* send ack downstream */
1400 send_graft_ack(dst
, src
, graft_src
, graft_grp
);
1401 g
->gt_timer
= max_prune_lifetime
;
1403 if (g
->gt_prsent_timer
) {
1404 /* set the flag for graft retransmission */
1407 /* send graft upwards */
1410 /* reset the prune sent timer */
1411 g
->gt_prsent_timer
= 0;
1415 * We have no state for the source and group in question.
1416 * We can simply acknowledge the graft, since we know
1417 * that we have no prune state, and grafts are requests
1418 * to remove prune state.
1420 send_graft_ack(dst
, src
, graft_src
, graft_grp
);
1421 logit(LOG_DEBUG
, 0, "%s (%s %s) from %s",
1422 "graft received with no kernel entry for",
1423 inet_fmt(graft_src
), inet_fmt(graft_grp
),
1430 * find out which group is involved first of all
1431 * then determine if a graft was sent.
1432 * if no graft sent, ignore the message
1433 * if graft was sent and the ack is from the right
1434 * source, remove the graft timer so that we don't
1435 * have send a graft again
1438 accept_g_ack(u_int32_t src
, u_int32_t dst
, char *p
, int datalen
)
1446 if ((vifi
= find_vif(src
, dst
)) == NO_VIF
) {
1448 "ignoring graft ack from non-neighbor %s",
1453 if (datalen
< 0 || datalen
> 8) {
1454 logit(LOG_WARNING
, 0,
1455 "received non-decipherable graft ack from %s",
1460 for (i
= 0; i
< 4; i
++)
1461 ((char *)&grft_src
)[i
] = *p
++;
1462 for (i
= 0; i
< 4; i
++)
1463 ((char *)&grft_grp
)[i
] = *p
++;
1465 logit(LOG_DEBUG
, 0, "%s on vif %d acks graft (%s, %s)",
1466 inet_fmt(src
), vifi
,
1467 inet_fmt(grft_src
), inet_fmt(grft_grp
));
1470 * Find the subnet for the graft ack
1472 if (find_src_grp(grft_src
, 0, grft_grp
)) {
1473 g
= gtp
? gtp
->gt_gnext
: kernel_table
;
1476 logit(LOG_WARNING
, 0, "%s (%s, %s) from %s",
1477 "rcvd graft ack with no kernel entry for",
1478 inet_fmt(grft_src
), inet_fmt(grft_grp
),
1486 * free all prune entries and kernel routes
1487 * normally, this should inform the kernel that all of its routes
1488 * are going away, but this is only called by restart(), which is
1489 * about to call MRT_DONE which does that anyway.
1492 free_all_prunes(void)
1495 struct gtable
*g
, *prev_g
;
1496 struct stable
*s
, *prev_s
;
1497 struct ptable
*p
, *prev_p
;
1499 for (r
= routing_table
; r
; r
= r
->rt_next
) {
1520 r
->rt_groups
= NULL
;
1522 kernel_table
= NULL
;
1524 g
= kernel_no_route
;
1533 kernel_no_route
= NULL
;
1537 * When a new route is created, search
1538 * a) The less-specific part of the routing table
1539 * b) The route-less kernel table
1540 * for sources that the new route might want to handle.
1542 * "Inheriting" these sources might be cleanest, but simply deleting
1543 * them is easier, and letting the kernel re-request them.
1546 steal_sources(struct rtentry
*rt
)
1549 struct gtable
*gt
, **gtnp
;
1550 struct stable
*st
, **stnp
;
1552 for (rp
= rt
->rt_next
; rp
; rp
= rp
->rt_next
) {
1553 if ((rt
->rt_origin
& rp
->rt_originmask
) == rp
->rt_origin
) {
1554 logit(LOG_DEBUG
, 0, "Route for %s stealing sources from %s",
1555 inet_fmts(rt
->rt_origin
, rt
->rt_originmask
),
1556 inet_fmts(rp
->rt_origin
, rp
->rt_originmask
));
1557 for (gt
= rp
->rt_groups
; gt
; gt
= gt
->gt_next
) {
1558 stnp
= >
->gt_srctbl
;
1559 while ((st
= *stnp
) != NULL
) {
1560 if ((st
->st_origin
& rt
->rt_originmask
) == rt
->rt_origin
) {
1561 logit(LOG_DEBUG
, 0, "%s stealing (%s %s) from %s",
1562 inet_fmts(rt
->rt_origin
, rt
->rt_originmask
),
1563 inet_fmt(st
->st_origin
),
1564 inet_fmt(gt
->gt_mcastgrp
),
1565 inet_fmts(rp
->rt_origin
, rp
->rt_originmask
));
1566 if (k_del_rg(st
->st_origin
, gt
) < 0) {
1567 logit(LOG_WARNING
, errno
, "%s (%s, %s)",
1568 "steal_sources trying to delete",
1569 inet_fmt(st
->st_origin
),
1570 inet_fmt(gt
->gt_mcastgrp
));
1572 *stnp
= st
->st_next
;
1576 stnp
= &st
->st_next
;
1583 gtnp
= &kernel_no_route
;
1584 while ((gt
= *gtnp
) != NULL
) {
1585 if (gt
->gt_srctbl
&& ((gt
->gt_srctbl
->st_origin
& rt
->rt_originmask
)
1586 == rt
->rt_origin
)) {
1587 logit(LOG_DEBUG
, 0, "%s stealing (%s %s) from %s",
1588 inet_fmts(rt
->rt_origin
, rt
->rt_originmask
),
1589 inet_fmt(gt
->gt_srctbl
->st_origin
),
1590 inet_fmt(gt
->gt_mcastgrp
),
1592 if (k_del_rg(gt
->gt_srctbl
->st_origin
, gt
) < 0) {
1593 logit(LOG_WARNING
, errno
, "%s (%s %s)",
1594 "steal_sources trying to delete",
1595 inet_fmt(gt
->gt_srctbl
->st_origin
),
1596 inet_fmt(gt
->gt_mcastgrp
));
1599 free(gt
->gt_srctbl
);
1600 *gtnp
= gt
->gt_next
;
1602 gt
->gt_next
->gt_prev
= gt
->gt_prev
;
1605 gtnp
= >
->gt_next
;
1611 * Advance the timers on all the cache entries.
1612 * If there are any entries whose timers have expired,
1613 * remove these entries from the kernel cache.
1616 age_table_entry(void)
1619 struct gtable
*gt
, **gtnptr
;
1620 struct stable
*st
, **stnp
;
1621 struct ptable
*pt
, **ptnp
;
1622 struct sioc_sg_req sg_req
;
1624 logit(LOG_DEBUG
, 0, "ageing entries");
1626 gtnptr
= &kernel_table
;
1627 while ((gt
= *gtnptr
) != NULL
) {
1630 /* advance the timer for the kernel entry */
1631 gt
->gt_timer
-= ROUTE_MAX_REPORT_DELAY
;
1633 /* decrement prune timer if need be */
1634 if (gt
->gt_prsent_timer
> 0) {
1635 gt
->gt_prsent_timer
-= ROUTE_MAX_REPORT_DELAY
;
1636 if (gt
->gt_prsent_timer
<= 0) {
1637 logit(LOG_DEBUG
, 0, "upstream prune tmo (%s %s)",
1638 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1639 inet_fmt(gt
->gt_mcastgrp
));
1640 gt
->gt_prsent_timer
= -1;
1644 /* retransmit graft if graft sent flag is still set */
1645 if (gt
->gt_grftsnt
) {
1647 CHK_GS(gt
->gt_grftsnt
++, y
);
1655 * If a prune expires, forward again on that vif.
1657 ptnp
= >
->gt_pruntbl
;
1658 while ((pt
= *ptnp
) != NULL
) {
1659 if ((pt
->pt_timer
-= ROUTE_MAX_REPORT_DELAY
) <= 0) {
1660 logit(LOG_DEBUG
, 0, "expire prune (%s %s) from %s on vif %d",
1661 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1662 inet_fmt(gt
->gt_mcastgrp
),
1663 inet_fmt(pt
->pt_router
),
1666 expire_prune(pt
->pt_vifi
, gt
);
1668 /* remove the router's prune entry and await new one */
1669 *ptnp
= pt
->pt_next
;
1672 ptnp
= &pt
->pt_next
;
1677 * If the cache entry has expired, delete source table entries for
1678 * silent sources. If there are no source entries left, and there
1679 * are no downstream prunes, then the entry is deleted.
1680 * Otherwise, the cache entry's timer is refreshed.
1682 if (gt
->gt_timer
<= 0) {
1683 /* Check for traffic before deleting source entries */
1684 sg_req
.grp
.s_addr
= gt
->gt_mcastgrp
;
1685 stnp
= >
->gt_srctbl
;
1686 while ((st
= *stnp
) != NULL
) {
1687 sg_req
.src
.s_addr
= st
->st_origin
;
1688 if (ioctl(igmp_socket
, SIOCGETSGCNT
, (char *)&sg_req
) < 0) {
1689 logit(LOG_WARNING
, errno
, "%s (%s %s)",
1690 "age_table_entry: SIOCGETSGCNT failing for",
1691 inet_fmt(st
->st_origin
),
1692 inet_fmt(gt
->gt_mcastgrp
));
1693 /* Make sure it gets deleted below */
1694 sg_req
.pktcnt
= st
->st_pktcnt
;
1696 if (sg_req
.pktcnt
== st
->st_pktcnt
) {
1697 *stnp
= st
->st_next
;
1698 logit(LOG_DEBUG
, 0, "age_table_entry deleting (%s %s)",
1699 inet_fmt(st
->st_origin
),
1700 inet_fmt(gt
->gt_mcastgrp
));
1701 if (k_del_rg(st
->st_origin
, gt
) < 0) {
1702 logit(LOG_WARNING
, errno
,
1703 "age_table_entry trying to delete (%s %s)",
1704 inet_fmt(st
->st_origin
),
1705 inet_fmt(gt
->gt_mcastgrp
));
1710 st
->st_pktcnt
= sg_req
.pktcnt
;
1711 stnp
= &st
->st_next
;
1716 * Retain the group entry if we have downstream prunes or if
1717 * there is at least one source in the list that still has
1718 * traffic, or if our upstream prune timer is running.
1720 if (gt
->gt_pruntbl
!= NULL
|| gt
->gt_srctbl
!= NULL
||
1721 gt
->gt_prsent_timer
> 0) {
1722 gt
->gt_timer
= CACHE_LIFETIME(cache_lifetime
);
1723 if (gt
->gt_prsent_timer
== -1) {
1724 if (gt
->gt_grpmems
== 0)
1727 gt
->gt_prsent_timer
= 0;
1729 gtnptr
= >
->gt_gnext
;
1733 logit(LOG_DEBUG
, 0, "timeout cache entry (%s, %s)",
1734 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1735 inet_fmt(gt
->gt_mcastgrp
));
1738 gt
->gt_prev
->gt_next
= gt
->gt_next
;
1740 gt
->gt_route
->rt_groups
= gt
->gt_next
;
1742 gt
->gt_next
->gt_prev
= gt
->gt_prev
;
1745 gt
->gt_gprev
->gt_gnext
= gt
->gt_gnext
;
1746 gtnptr
= >
->gt_gprev
->gt_gnext
;
1748 kernel_table
= gt
->gt_gnext
;
1749 gtnptr
= &kernel_table
;
1752 gt
->gt_gnext
->gt_gprev
= gt
->gt_gprev
;
1755 /* Send route change notification to reservation protocol. */
1756 rsrr_cache_send(gt
,0);
1757 rsrr_cache_clean(gt
);
1761 if (gt
->gt_prsent_timer
== -1) {
1762 if (gt
->gt_grpmems
== 0)
1765 gt
->gt_prsent_timer
= 0;
1767 gtnptr
= >
->gt_gnext
;
1772 * When traversing the no_route table, the decision is much easier.
1773 * Just delete it if it has timed out.
1775 gtnptr
= &kernel_no_route
;
1776 while ((gt
= *gtnptr
) != NULL
) {
1777 /* advance the timer for the kernel entry */
1778 gt
->gt_timer
-= ROUTE_MAX_REPORT_DELAY
;
1780 if (gt
->gt_timer
< 0) {
1781 if (gt
->gt_srctbl
) {
1782 if (k_del_rg(gt
->gt_srctbl
->st_origin
, gt
) < 0) {
1783 logit(LOG_WARNING
, errno
, "%s (%s %s)",
1784 "age_table_entry trying to delete no-route",
1785 inet_fmt(gt
->gt_srctbl
->st_origin
),
1786 inet_fmt(gt
->gt_mcastgrp
));
1788 free(gt
->gt_srctbl
);
1790 *gtnptr
= gt
->gt_next
;
1792 gt
->gt_next
->gt_prev
= gt
->gt_prev
;
1796 gtnptr
= >
->gt_next
;
1802 * Modify the kernel to forward packets when one or multiple prunes that
1803 * were received on the vif given by vifi, for the group given by gt,
1807 expire_prune(vifi_t vifi
, struct gtable
*gt
)
1810 * No need to send a graft, any prunes that we sent
1811 * will expire before any prunes that we have received.
1813 if (gt
->gt_prsent_timer
> 0) {
1814 logit(LOG_DEBUG
, 0, "prune expired with %d left on %s",
1815 gt
->gt_prsent_timer
, "prsent_timer");
1816 gt
->gt_prsent_timer
= 0;
1819 /* modify the kernel entry to forward packets */
1820 if (!VIFM_ISSET(vifi
, gt
->gt_grpmems
)) {
1821 struct rtentry
*rt
= gt
->gt_route
;
1822 VIFM_SET(vifi
, gt
->gt_grpmems
);
1823 logit(LOG_DEBUG
, 0, "forw again (%s %s) gm:%x vif:%d",
1824 inet_fmts(rt
->rt_origin
, rt
->rt_originmask
),
1825 inet_fmt(gt
->gt_mcastgrp
), gt
->gt_grpmems
, vifi
);
1830 /* Send route change notification to reservation protocol. */
1831 rsrr_cache_send(gt
,1);
1840 static char buf1
[5];
1841 static char buf2
[5];
1842 static char *buf
=buf1
;
1854 } else if (t
< 3600) {
1857 } else if (t
< 86400) {
1860 } else if (t
< 864000) {
1870 snprintf(p
, 5, "%3d%c", (int)t
, s
);
1876 * Print the contents of the cache table on file 'fp2'.
1879 dump_cache(FILE *fp2
)
1885 time_t thyme
= time(0);
1888 "Multicast Routing Cache Table (%d entries)\n%s", kroutes
,
1889 " Origin Mcast-group CTmr Age Ptmr IVif Forwvifs\n");
1891 for (gt
= kernel_no_route
; gt
; gt
= gt
->gt_next
) {
1892 if (gt
->gt_srctbl
) {
1893 fprintf(fp2
, " %-18s %-15s %-4s %-4s - -1\n",
1894 inet_fmts(gt
->gt_srctbl
->st_origin
, 0xffffffff),
1895 inet_fmt(gt
->gt_mcastgrp
), scaletime(gt
->gt_timer
),
1896 scaletime(thyme
- gt
->gt_ctime
));
1897 fprintf(fp2
, ">%s\n", inet_fmt(gt
->gt_srctbl
->st_origin
));
1901 for (gt
= kernel_table
; gt
; gt
= gt
->gt_gnext
) {
1903 fprintf(fp2
, " %-18s %-15s",
1904 inet_fmts(r
->rt_origin
, r
->rt_originmask
),
1905 inet_fmt(gt
->gt_mcastgrp
));
1907 fprintf(fp2
, " %-4s", scaletime(gt
->gt_timer
));
1909 fprintf(fp2
, " %-4s %-4s ", scaletime(thyme
- gt
->gt_ctime
),
1910 gt
->gt_prsent_timer
? scaletime(gt
->gt_prsent_timer
) :
1913 fprintf(fp2
, "%2u%c%c ", r
->rt_parent
,
1914 gt
->gt_prsent_timer
? 'P' : ' ',
1915 VIFM_ISSET(r
->rt_parent
, gt
->gt_scope
) ? 'B' : ' ');
1917 for (i
= 0; i
< numvifs
; ++i
) {
1918 if (VIFM_ISSET(i
, gt
->gt_grpmems
))
1919 fprintf(fp2
, " %u ", i
);
1920 else if (VIFM_ISSET(i
, r
->rt_children
) &&
1921 !VIFM_ISSET(i
, r
->rt_leaves
))
1922 fprintf(fp2
, " %u%c", i
,
1923 VIFM_ISSET(i
, gt
->gt_scope
) ? 'b' : 'p');
1926 for (st
= gt
->gt_srctbl
; st
; st
= st
->st_next
) {
1927 fprintf(fp2
, ">%s\n", inet_fmt(st
->st_origin
));
1930 for (pt
= gt
->gt_pruntbl
; pt
; pt
= pt
->pt_next
) {
1931 fprintf(fp2
, "<r:%s v:%d t:%d\n", inet_fmt(pt
->pt_router
),
1932 pt
->pt_vifi
, pt
->pt_timer
);
1939 * Traceroute function which returns traceroute replies to the requesting
1940 * router. Also forwards the request to downstream routers.
1942 * no: promoted u_char
1945 accept_mtrace(u_int32_t src
, u_int32_t dst
, u_int32_t group
, char *data
,
1946 u_int no
, int datalen
)
1951 struct tr_query
*qry
;
1952 struct tr_resp
*resp
;
1956 int errcode
= TR_NO_ERR
;
1959 struct sioc_vif_req v_req
;
1960 struct sioc_sg_req sg_req
;
1962 /* Remember qid across invocations */
1963 static u_int32_t oqid
= 0;
1965 /* timestamp the request/response */
1966 gettimeofday(&tp
, 0);
1969 * Check if it is a query or a response
1971 if (datalen
== QLEN
) {
1973 logit(LOG_DEBUG
, 0, "Initial traceroute query rcvd from %s to %s",
1974 inet_fmt(src
), inet_fmt(dst
));
1976 else if ((datalen
- QLEN
) % RLEN
== 0) {
1978 logit(LOG_DEBUG
, 0, "In-transit traceroute query rcvd from %s to %s",
1979 inet_fmt(src
), inet_fmt(dst
));
1980 if (IN_MULTICAST(ntohl(dst
))) {
1981 logit(LOG_DEBUG
, 0, "Dropping multicast response");
1986 logit(LOG_WARNING
, 0, "%s from %s to %s",
1987 "Non decipherable traceroute request received",
1988 inet_fmt(src
), inet_fmt(dst
));
1992 qry
= (struct tr_query
*)data
;
1995 * if it is a packet with all reports filled, drop it
1997 if ((u_int
)(rcount
= (datalen
- QLEN
)/RLEN
) == no
) {
1998 logit(LOG_DEBUG
, 0, "packet with all reports filled in");
2002 logit(LOG_DEBUG
, 0, "s: %s g: %s d: %s ",
2003 inet_fmt(qry
->tr_src
),
2005 inet_fmt(qry
->tr_dst
));
2006 logit(LOG_DEBUG
, 0, "rttl: %d rd: %s", qry
->tr_rttl
,
2007 inet_fmt(qry
->tr_raddr
));
2008 logit(LOG_DEBUG
, 0, "rcount:%d, qid:%06x", rcount
, qry
->tr_qid
);
2010 /* determine the routing table entry for this traceroute */
2011 rt
= determine_route(qry
->tr_src
);
2013 logit(LOG_DEBUG
, 0, "rt parent vif: %d rtr: %s metric: %d",
2014 rt
->rt_parent
, inet_fmt(rt
->rt_gateway
),
2016 logit(LOG_DEBUG
, 0, "rt origin %s",
2017 inet_fmts(rt
->rt_origin
, rt
->rt_originmask
));
2019 logit(LOG_DEBUG
, 0, "...no route");
2022 * Query type packet - check if rte exists
2023 * Check if the query destination is a vif connected to me.
2024 * and if so, whether I should start response back
2026 if (type
== QUERY
) {
2027 if (oqid
== qry
->tr_qid
) {
2029 * If the multicast router is a member of the group being
2030 * queried, and the query is multicasted, then the router can
2031 * receive multiple copies of the same query. If we have already
2032 * replied to this traceroute, just ignore it this time.
2034 * This is not a total solution, but since if this fails you
2035 * only get N copies, N <= the number of interfaces on the router,
2038 logit(LOG_DEBUG
, 0, "ignoring duplicate traceroute packet");
2043 logit(LOG_DEBUG
, 0, "Mcast traceroute: no route entry %s",
2044 inet_fmt(qry
->tr_src
));
2045 if (IN_MULTICAST(ntohl(dst
)))
2048 vifi
= find_vif(qry
->tr_dst
, 0);
2050 if (vifi
== NO_VIF
) {
2051 /* The traceroute destination is not on one of my subnet vifs. */
2052 logit(LOG_DEBUG
, 0, "Destination %s not an interface",
2053 inet_fmt(qry
->tr_dst
));
2054 if (IN_MULTICAST(ntohl(dst
)))
2056 errcode
= TR_WRONG_IF
;
2057 } else if (rt
!= NULL
&& !VIFM_ISSET(vifi
, rt
->rt_children
)) {
2059 "Destination %s not on forwarding tree for src %s",
2060 inet_fmt(qry
->tr_dst
),
2061 inet_fmt(qry
->tr_src
));
2062 if (IN_MULTICAST(ntohl(dst
)))
2064 errcode
= TR_WRONG_IF
;
2069 * determine which interface the packet came in on
2070 * RESP packets travel hop-by-hop so this either traversed
2071 * a tunnel or came from a directly attached mrouter.
2073 if ((vifi
= find_vif(src
, dst
)) == NO_VIF
) {
2074 logit(LOG_DEBUG
, 0, "Wrong interface for packet");
2075 errcode
= TR_WRONG_IF
;
2079 /* Now that we've decided to send a response, save the qid */
2082 logit(LOG_DEBUG
, 0, "Sending traceroute response");
2084 /* copy the packet to the sending buffer */
2085 p
= send_buf
+ MIN_IP_HEADER_LEN
+ IGMP_MINLEN
;
2087 bcopy(data
, p
, datalen
);
2092 * If there is no room to insert our reply, coopt the previous hop
2093 * error indication to relay this fact.
2095 if (p
+ sizeof(struct tr_resp
) > send_buf
+ RECV_BUF_SIZE
) {
2096 resp
= (struct tr_resp
*)p
- 1;
2097 resp
->tr_rflags
= TR_NO_SPACE
;
2103 * fill in initial response fields
2105 resp
= (struct tr_resp
*)p
;
2106 bzero(resp
, sizeof(struct tr_resp
));
2109 resp
->tr_qarr
= htonl((tp
.tv_sec
+ JAN_1970
) << 16) +
2110 ((tp
.tv_usec
>> 4) & 0xffff);
2112 resp
->tr_rproto
= PROTO_DVMRP
;
2113 if (errcode
!= TR_NO_ERR
) {
2114 resp
->tr_rflags
= errcode
;
2115 rt
= NULL
; /* hack to enforce send straight to requestor */
2118 resp
->tr_outaddr
= uvifs
[vifi
].uv_lcl_addr
;
2119 resp
->tr_fttl
= uvifs
[vifi
].uv_threshold
;
2120 resp
->tr_rflags
= TR_NO_ERR
;
2123 * obtain # of packets out on interface
2126 if (ioctl(igmp_socket
, SIOCGETVIFCNT
, (char *)&v_req
) >= 0)
2127 resp
->tr_vifout
= htonl(v_req
.ocount
);
2130 * fill in scoping & pruning information
2133 for (gt
= rt
->rt_groups
; gt
; gt
= gt
->gt_next
) {
2134 if (gt
->gt_mcastgrp
>= group
)
2140 if (gt
&& gt
->gt_mcastgrp
== group
) {
2141 sg_req
.src
.s_addr
= qry
->tr_src
;
2142 sg_req
.grp
.s_addr
= group
;
2143 if (ioctl(igmp_socket
, SIOCGETSGCNT
, (char *)&sg_req
) >= 0)
2144 resp
->tr_pktcnt
= htonl(sg_req
.pktcnt
);
2146 if (VIFM_ISSET(vifi
, gt
->gt_scope
))
2147 resp
->tr_rflags
= TR_SCOPED
;
2148 else if (gt
->gt_prsent_timer
)
2149 resp
->tr_rflags
= TR_PRUNED
;
2150 else if (!VIFM_ISSET(vifi
, gt
->gt_grpmems
)) {
2151 if (VIFM_ISSET(vifi
, rt
->rt_children
) &&
2152 !VIFM_ISSET(vifi
, rt
->rt_leaves
))
2153 resp
->tr_rflags
= TR_OPRUNED
;
2155 resp
->tr_rflags
= TR_NO_FWD
;
2158 if (scoped_addr(vifi
, group
))
2159 resp
->tr_rflags
= TR_SCOPED
;
2160 else if (rt
&& !VIFM_ISSET(vifi
, rt
->rt_children
))
2161 resp
->tr_rflags
= TR_NO_FWD
;
2165 * if no rte exists, set NO_RTE error
2168 src
= dst
; /* the dst address of resp. pkt */
2169 resp
->tr_inaddr
= 0;
2170 resp
->tr_rflags
= TR_NO_RTE
;
2171 resp
->tr_rmtaddr
= 0;
2173 /* get # of packets in on interface */
2174 v_req
.vifi
= rt
->rt_parent
;
2175 if (ioctl(igmp_socket
, SIOCGETVIFCNT
, (char *)&v_req
) >= 0)
2176 resp
->tr_vifin
= htonl(v_req
.icount
);
2178 MASK_TO_VAL(rt
->rt_originmask
, resp
->tr_smask
);
2179 src
= uvifs
[rt
->rt_parent
].uv_lcl_addr
;
2180 resp
->tr_inaddr
= src
;
2181 resp
->tr_rmtaddr
= rt
->rt_gateway
;
2182 if (!VIFM_ISSET(vifi
, rt
->rt_children
)) {
2184 "Destination %s not on forwarding tree for src %s",
2185 inet_fmt(qry
->tr_dst
),
2186 inet_fmt(qry
->tr_src
));
2187 resp
->tr_rflags
= TR_WRONG_IF
;
2189 if (rt
->rt_metric
>= UNREACHABLE
) {
2190 resp
->tr_rflags
= TR_NO_RTE
;
2191 /* Hack to send reply directly */
2198 * if metric is 1 or no. of reports is 1, send response to requestor
2199 * else send to upstream router. If the upstream router can't handle
2200 * mtrace, set an error code and send to requestor anyway.
2202 logit(LOG_DEBUG
, 0, "rcount:%d, no:%d", rcount
, no
);
2204 if (((u_int
)(rcount
+ 1) == no
) || (rt
== NULL
) || (rt
->rt_metric
== 1)) {
2205 resptype
= IGMP_MTRACE_REPLY
;
2206 dst
= qry
->tr_raddr
;
2208 if (!can_mtrace(rt
->rt_parent
, rt
->rt_gateway
)) {
2209 dst
= qry
->tr_raddr
;
2210 resp
->tr_rflags
= TR_OLD_ROUTER
;
2211 resptype
= IGMP_MTRACE_REPLY
;
2213 dst
= rt
->rt_gateway
;
2214 resptype
= IGMP_MTRACE_QUERY
;
2217 if (IN_MULTICAST(ntohl(dst
))) {
2219 * Send the reply on a known multicast capable vif.
2220 * If we don't have one, we can't source any multicasts anyway.
2222 if (phys_vif
!= -1) {
2223 logit(LOG_DEBUG
, 0, "Sending reply to %s from %s",
2224 inet_fmt(dst
), inet_fmt(uvifs
[phys_vif
].uv_lcl_addr
));
2225 k_set_ttl(qry
->tr_rttl
);
2226 send_igmp(uvifs
[phys_vif
].uv_lcl_addr
, dst
,
2227 resptype
, no
, group
,
2231 logit(LOG_INFO
, 0, "No enabled phyints -- %s",
2232 "dropping traceroute reply");
2234 logit(LOG_DEBUG
, 0, "Sending %s to %s from %s",
2235 resptype
== IGMP_MTRACE_REPLY
? "reply" : "request on",
2236 inet_fmt(dst
), inet_fmt(src
));
2239 resptype
, no
, group
,