1 /* Linux multicast routing support
2 * Common logic shared by IPv4 [ipmr] and IPv6 [ip6mr] implementation
5 #include <linux/mroute_base.h>
7 /* Sets everything common except 'dev', since that is done under locking */
8 void vif_device_init(struct vif_device
*v
,
9 struct net_device
*dev
,
10 unsigned long rate_limit
,
11 unsigned char threshold
,
13 unsigned short get_iflink_mask
)
20 v
->rate_limit
= rate_limit
;
22 v
->threshold
= threshold
;
23 if (v
->flags
& get_iflink_mask
)
24 v
->link
= dev_get_iflink(dev
);
26 v
->link
= dev
->ifindex
;
28 EXPORT_SYMBOL(vif_device_init
);
31 mr_table_alloc(struct net
*net
, u32 id
,
32 struct mr_table_ops
*ops
,
33 void (*expire_func
)(struct timer_list
*t
),
34 void (*table_set
)(struct mr_table
*mrt
,
40 mrt
= kzalloc(sizeof(*mrt
), GFP_KERNEL
);
42 return ERR_PTR(-ENOMEM
);
44 write_pnet(&mrt
->net
, net
);
47 err
= rhltable_init(&mrt
->mfc_hash
, mrt
->ops
.rht_params
);
52 INIT_LIST_HEAD(&mrt
->mfc_cache_list
);
53 INIT_LIST_HEAD(&mrt
->mfc_unres_queue
);
55 timer_setup(&mrt
->ipmr_expire_timer
, expire_func
, 0);
57 mrt
->mroute_reg_vif_num
= -1;
61 EXPORT_SYMBOL(mr_table_alloc
);
63 void *mr_mfc_find_parent(struct mr_table
*mrt
, void *hasharg
, int parent
)
65 struct rhlist_head
*tmp
, *list
;
68 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
69 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
70 if (parent
== -1 || parent
== c
->mfc_parent
)
75 EXPORT_SYMBOL(mr_mfc_find_parent
);
77 void *mr_mfc_find_any_parent(struct mr_table
*mrt
, int vifi
)
79 struct rhlist_head
*tmp
, *list
;
82 list
= rhltable_lookup(&mrt
->mfc_hash
, mrt
->ops
.cmparg_any
,
83 *mrt
->ops
.rht_params
);
84 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
)
85 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
90 EXPORT_SYMBOL(mr_mfc_find_any_parent
);
92 void *mr_mfc_find_any(struct mr_table
*mrt
, int vifi
, void *hasharg
)
94 struct rhlist_head
*tmp
, *list
;
95 struct mr_mfc
*c
, *proxy
;
97 list
= rhltable_lookup(&mrt
->mfc_hash
, hasharg
, *mrt
->ops
.rht_params
);
98 rhl_for_each_entry_rcu(c
, tmp
, list
, mnode
) {
99 if (c
->mfc_un
.res
.ttls
[vifi
] < 255)
102 /* It's ok if the vifi is part of the static tree */
103 proxy
= mr_mfc_find_any_parent(mrt
, c
->mfc_parent
);
104 if (proxy
&& proxy
->mfc_un
.res
.ttls
[vifi
] < 255)
108 return mr_mfc_find_any_parent(mrt
, vifi
);
110 EXPORT_SYMBOL(mr_mfc_find_any
);
112 #ifdef CONFIG_PROC_FS
113 void *mr_vif_seq_idx(struct net
*net
, struct mr_vif_iter
*iter
, loff_t pos
)
115 struct mr_table
*mrt
= iter
->mrt
;
117 for (iter
->ct
= 0; iter
->ct
< mrt
->maxvif
; ++iter
->ct
) {
118 if (!VIF_EXISTS(mrt
, iter
->ct
))
121 return &mrt
->vif_table
[iter
->ct
];
125 EXPORT_SYMBOL(mr_vif_seq_idx
);
127 void *mr_vif_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
129 struct mr_vif_iter
*iter
= seq
->private;
130 struct net
*net
= seq_file_net(seq
);
131 struct mr_table
*mrt
= iter
->mrt
;
134 if (v
== SEQ_START_TOKEN
)
135 return mr_vif_seq_idx(net
, iter
, 0);
137 while (++iter
->ct
< mrt
->maxvif
) {
138 if (!VIF_EXISTS(mrt
, iter
->ct
))
140 return &mrt
->vif_table
[iter
->ct
];
144 EXPORT_SYMBOL(mr_vif_seq_next
);
146 void *mr_mfc_seq_idx(struct net
*net
,
147 struct mr_mfc_iter
*it
, loff_t pos
)
149 struct mr_table
*mrt
= it
->mrt
;
153 it
->cache
= &mrt
->mfc_cache_list
;
154 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
159 spin_lock_bh(it
->lock
);
160 it
->cache
= &mrt
->mfc_unres_queue
;
161 list_for_each_entry(mfc
, it
->cache
, list
)
164 spin_unlock_bh(it
->lock
);
169 EXPORT_SYMBOL(mr_mfc_seq_idx
);
171 void *mr_mfc_seq_next(struct seq_file
*seq
, void *v
,
174 struct mr_mfc_iter
*it
= seq
->private;
175 struct net
*net
= seq_file_net(seq
);
176 struct mr_table
*mrt
= it
->mrt
;
177 struct mr_mfc
*c
= v
;
181 if (v
== SEQ_START_TOKEN
)
182 return mr_mfc_seq_idx(net
, seq
->private, 0);
184 if (c
->list
.next
!= it
->cache
)
185 return list_entry(c
->list
.next
, struct mr_mfc
, list
);
187 if (it
->cache
== &mrt
->mfc_unres_queue
)
190 /* exhausted cache_array, show unresolved */
192 it
->cache
= &mrt
->mfc_unres_queue
;
194 spin_lock_bh(it
->lock
);
195 if (!list_empty(it
->cache
))
196 return list_first_entry(it
->cache
, struct mr_mfc
, list
);
199 spin_unlock_bh(it
->lock
);
204 EXPORT_SYMBOL(mr_mfc_seq_next
);
207 int mr_fill_mroute(struct mr_table
*mrt
, struct sk_buff
*skb
,
208 struct mr_mfc
*c
, struct rtmsg
*rtm
)
210 struct rta_mfc_stats mfcs
;
211 struct nlattr
*mp_attr
;
212 struct rtnexthop
*nhp
;
213 unsigned long lastuse
;
216 /* If cache is unresolved, don't try to parse IIF and OIF */
217 if (c
->mfc_parent
>= MAXVIFS
) {
218 rtm
->rtm_flags
|= RTNH_F_UNRESOLVED
;
222 if (VIF_EXISTS(mrt
, c
->mfc_parent
) &&
223 nla_put_u32(skb
, RTA_IIF
,
224 mrt
->vif_table
[c
->mfc_parent
].dev
->ifindex
) < 0)
227 if (c
->mfc_flags
& MFC_OFFLOAD
)
228 rtm
->rtm_flags
|= RTNH_F_OFFLOAD
;
230 mp_attr
= nla_nest_start(skb
, RTA_MULTIPATH
);
234 for (ct
= c
->mfc_un
.res
.minvif
; ct
< c
->mfc_un
.res
.maxvif
; ct
++) {
235 if (VIF_EXISTS(mrt
, ct
) && c
->mfc_un
.res
.ttls
[ct
] < 255) {
236 struct vif_device
*vif
;
238 nhp
= nla_reserve_nohdr(skb
, sizeof(*nhp
));
240 nla_nest_cancel(skb
, mp_attr
);
245 nhp
->rtnh_hops
= c
->mfc_un
.res
.ttls
[ct
];
246 vif
= &mrt
->vif_table
[ct
];
247 nhp
->rtnh_ifindex
= vif
->dev
->ifindex
;
248 nhp
->rtnh_len
= sizeof(*nhp
);
252 nla_nest_end(skb
, mp_attr
);
254 lastuse
= READ_ONCE(c
->mfc_un
.res
.lastuse
);
255 lastuse
= time_after_eq(jiffies
, lastuse
) ? jiffies
- lastuse
: 0;
257 mfcs
.mfcs_packets
= c
->mfc_un
.res
.pkt
;
258 mfcs
.mfcs_bytes
= c
->mfc_un
.res
.bytes
;
259 mfcs
.mfcs_wrong_if
= c
->mfc_un
.res
.wrong_if
;
260 if (nla_put_64bit(skb
, RTA_MFC_STATS
, sizeof(mfcs
), &mfcs
, RTA_PAD
) ||
261 nla_put_u64_64bit(skb
, RTA_EXPIRES
, jiffies_to_clock_t(lastuse
),
265 rtm
->rtm_type
= RTN_MULTICAST
;
268 EXPORT_SYMBOL(mr_fill_mroute
);
270 int mr_rtm_dumproute(struct sk_buff
*skb
, struct netlink_callback
*cb
,
271 struct mr_table
*(*iter
)(struct net
*net
,
272 struct mr_table
*mrt
),
273 int (*fill
)(struct mr_table
*mrt
,
275 u32 portid
, u32 seq
, struct mr_mfc
*c
,
279 unsigned int t
= 0, e
= 0, s_t
= cb
->args
[0], s_e
= cb
->args
[1];
280 struct net
*net
= sock_net(skb
->sk
);
281 struct mr_table
*mrt
;
285 for (mrt
= iter(net
, NULL
); mrt
; mrt
= iter(net
, mrt
)) {
288 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
) {
291 if (fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
292 cb
->nlh
->nlmsg_seq
, mfc
,
293 RTM_NEWROUTE
, NLM_F_MULTI
) < 0)
302 list_for_each_entry(mfc
, &mrt
->mfc_unres_queue
, list
) {
305 if (fill(mrt
, skb
, NETLINK_CB(cb
->skb
).portid
,
306 cb
->nlh
->nlmsg_seq
, mfc
,
307 RTM_NEWROUTE
, NLM_F_MULTI
) < 0) {
308 spin_unlock_bh(lock
);
314 spin_unlock_bh(lock
);
328 EXPORT_SYMBOL(mr_rtm_dumproute
);
330 int mr_dump(struct net
*net
, struct notifier_block
*nb
, unsigned short family
,
331 int (*rules_dump
)(struct net
*net
,
332 struct notifier_block
*nb
),
333 struct mr_table
*(*mr_iter
)(struct net
*net
,
334 struct mr_table
*mrt
),
337 struct mr_table
*mrt
;
340 err
= rules_dump(net
, nb
);
344 for (mrt
= mr_iter(net
, NULL
); mrt
; mrt
= mr_iter(net
, mrt
)) {
345 struct vif_device
*v
= &mrt
->vif_table
[0];
349 /* Notifiy on table VIF entries */
351 for (vifi
= 0; vifi
< mrt
->maxvif
; vifi
++, v
++) {
355 mr_call_vif_notifier(nb
, net
, family
,
359 read_unlock(mrt_lock
);
361 /* Notify on table MFC entries */
362 list_for_each_entry_rcu(mfc
, &mrt
->mfc_cache_list
, list
)
363 mr_call_mfc_notifier(nb
, net
, family
,
370 EXPORT_SYMBOL(mr_dump
);