There was a possibility for a memory leak, if the adding of the FTN
[mpls-ldp-portable.git] / ldp / ldp_fec.c
blob170aa5485c0571f36f4baa202f5941935e7b011a
2 /*
3 * Copyright (C) James R. Leu 2000
4 * jleu@mindspring.com
6 * This software is covered under the LGPL, for more
7 * info check out http://www.gnu.org/copyleft/lgpl.html
8 */
10 #include "ldp_struct.h"
11 #include "ldp_fec.h"
12 #include "ldp_if.h"
13 #include "ldp_attr.h"
14 #include "ldp_session.h"
15 #include "ldp_inlabel.h"
16 #include "ldp_outlabel.h"
17 #include "ldp_global.h"
18 #include "ldp_label_mapping.h"
19 #include "ldp_label_request.h"
20 #include "ldp_label_abort.h"
21 #include "ldp_label_rel_with.h"
22 #include "mpls_mm_impl.h"
23 #include "mpls_policy_impl.h"
24 #include "mpls_trace_impl.h"
26 #if MPLS_USE_LSR
27 #include "lsr_cfg.h"
28 #else
29 #include "mpls_mpls_impl.h"
30 #endif
32 mpls_return_enum Recognize_New_Fec(ldp_global * g, mpls_fec * f)
34 ldp_session *peer = NULL;
35 ldp_session *nh_session = NULL;
36 ldp_attr *ds_attr = NULL;
37 ldp_attr *us_attr = NULL;
38 mpls_bool egress = MPLS_BOOL_FALSE;
39 ldp_addr *nh_addr;
40 ldp_outlabel *out;
42 LDP_ENTER(g->user_data, "Recognize_New_Fec");
45 * find the info about the next hop for this FEC
47 switch (ldp_get_next_hop_session_for_fec(g, f, &nh_addr, &nh_session)) {
48 case MPLS_SUCCESS:
49 if (nh_addr == NULL) {
51 * we found the route, but no next hop
53 egress = MPLS_BOOL_TRUE;
55 break;
56 case MPLS_FAILURE:
57 case MPLS_NO_ROUTE:
58 return MPLS_FAILURE;
59 default:
60 MPLS_ASSERT(0);
63 if (nh_session) {
64 ds_attr = ldp_attr_find_downstream_state(g, nh_session, f,
65 LDP_LSP_STATE_MAP_RECV);
66 if (ds_attr && !ds_attr->outlabel) {
67 out = ldp_outlabel_create_complete(g, nh_session, nh_addr, ds_attr);
68 if (!out) {
69 return MPLS_FAILURE;
71 ds_attr->outlabel = out;
76 * for every peer except the nh hop peer, check to see if we need to
77 * send a mapping
79 peer = MPLS_LIST_HEAD(&g->session);
80 while (peer != NULL) { /* FEC.1 */
81 if (nh_session && peer->index == nh_session->index) {
82 goto next_peer;
84 /* have I already sent a mapping for FEC to peer */
85 if ((us_attr = ldp_attr_find_upstream_state(g, peer, f,
86 LDP_LSP_STATE_MAP_SENT))) {
87 /* yep, don't send another */
88 goto next_peer;
90 if (peer->oper_distribution_mode == LDP_DISTRIBUTION_UNSOLICITED) {
91 if (g->lsp_control_mode == LDP_CONTROL_INDEPENDENT) {
92 us_attr =
93 ldp_attr_find_upstream_state(g, peer, f, LDP_LSP_STATE_REQ_RECV);
95 /* FEC.1.DUI3,4 */
96 if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) !=
97 MPLS_SUCCESS) {
98 if (!us_attr->in_tree) {
99 ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE);
101 goto next_peer;
103 } else {
105 *LDP_CONTROL_ORDERED
108 if (ds_attr || egress == MPLS_BOOL_TRUE) { /* FEC.1.DUO2 */
109 if (!(us_attr = ldp_attr_create(f))) {
110 return MPLS_FAILURE;
112 /* FEC.1.DUO3-4 */
113 if ((egress == MPLS_BOOL_TRUE) && (mpls_policy_egress_check(
114 g->user_data, f, &f->nh) == MPLS_BOOL_TRUE) &&
115 (egress == MPLS_BOOL_TRUE)) {
116 goto next_peer;
119 if (ldp_label_mapping_with_xc(g, peer, f, &us_attr, ds_attr) !=
120 MPLS_SUCCESS) {
121 return MPLS_FAILURE;
126 next_peer:
127 peer = MPLS_LIST_NEXT(&g->session, peer, _global);
130 if (ds_attr) { /* FEC.2 */
131 if (ldp_label_mapping_process(g, nh_session, NULL, NULL, ds_attr, f) ==
132 MPLS_FAILURE) { /* FEC.5 */
133 return MPLS_FAILURE;
135 return MPLS_SUCCESS;
139 * LDP_DISTRIBUTION_ONDEMAND
141 /* FEC.3 */
142 if (nh_session &&
143 nh_session->oper_distribution_mode == LDP_DISTRIBUTION_ONDEMAND) {
144 /* assume we're always "request when needed" */
145 ds_attr = NULL;
146 if (ldp_label_request_for_xc(g, nh_session, f, NULL, &ds_attr) ==
147 MPLS_FAILURE) { /* FEC.4 */
148 return MPLS_FAILURE;
152 LDP_EXIT(g->user_data, "Recognize_New_Fec");
154 return MPLS_SUCCESS; /* FEC.6 */
157 mpls_return_enum Detect_Change_Fec_Next_Hop(ldp_global * g, mpls_fec * f,
158 ldp_session * nh_old)
160 ldp_session *peer = NULL;
161 ldp_attr *us_attr = NULL;
162 ldp_attr *ds_attr = NULL;
163 ldp_session *nh_new = NULL;
164 mpls_return_enum result;
166 LDP_ENTER(g->user_data, "Detect_Change_Fec_Next_Hop");
168 result = ldp_get_next_hop_session_for_fec(g, f, NULL, &nh_new);
171 * NH 1-5 decide if we need to release an existing mapping
173 ds_attr =
174 ldp_attr_find_downstream_state(g, nh_old, f, LDP_LSP_STATE_MAP_RECV);
175 if (!ds_attr) { /* NH.1 */
176 goto Detect_Change_Fec_Next_Hop_6;
179 if (ds_attr->ingress == MPLS_BOOL_TRUE) {
181 #if MPLS_USE_LSR
182 lsr_ftn ftn;
183 ftn.outsegment_index = ds_attr->outlabel->info.handle;
184 memcpy(&ftn.fec, f, sizeof(mpls_fec));
185 lsr_cfg_ftn_set2(g->lsr_handle, &ftn, LSR_CFG_DEL);
186 #else
187 mpls_mpls_fec2out_del(g->mpls_handle, f, &ds_attr->outlabel->info);
188 #endif
189 ds_attr->ingress = MPLS_BOOL_FALSE;
190 ds_attr->outlabel->merge_count--;
193 if (g->label_retention_mode == LDP_RETENTION_LIBERAL) { /* NH.3 */
194 ldp_attr *us_temp;
195 us_attr = MPLS_LIST_HEAD(&ds_attr->us_attr_root);
196 while (us_attr) {
197 /* need to walk the list in such a way as not to
198 * "pull the rug out from under me self"
200 us_temp = MPLS_LIST_NEXT(&ds_attr->us_attr_root, us_attr, _ds_attr);
201 if (us_attr->state == LDP_LSP_STATE_MAP_SENT) {
202 ldp_inlabel_del_outlabel(g, us_attr->inlabel); /* NH.2 */
203 ldp_attr_del_us2ds(us_attr, ds_attr);
205 us_attr = us_temp;
207 goto Detect_Change_Fec_Next_Hop_6;
210 ldp_label_release_send(g, nh_old, ds_attr, LDP_NOTIF_NONE); /* NH.4 */
211 ldp_attr_remove_complete(g, ds_attr, MPLS_BOOL_FALSE); /* NH.2,5 */
213 Detect_Change_Fec_Next_Hop_6:
216 * NH 6-9 decides is we need to send a label request abort
218 ds_attr =
219 ldp_attr_find_downstream_state(g, nh_old, f, LDP_LSP_STATE_REQ_SENT);
220 if (ds_attr) { /* NH.6 */
221 if (g->label_retention_mode != LDP_RETENTION_CONSERVATIVE) { /* NH.7 */
222 /* NH.8,9 */
223 if (ldp_label_abort_send(g, nh_old, ds_attr) != MPLS_SUCCESS) {
224 return MPLS_FAILURE;
230 * NH 10-12 decides if we can use a mapping from our database
232 if (!nh_new) {
233 goto Detect_Change_Fec_Next_Hop_16;
236 ds_attr =
237 ldp_attr_find_downstream_state(g, nh_new, f, LDP_LSP_STATE_MAP_RECV);
238 if (!ds_attr) { /* NH.11 */
239 goto Detect_Change_Fec_Next_Hop_13;
242 if (ldp_label_mapping_process(g, nh_new, NULL, NULL, ds_attr, f) !=
243 MPLS_SUCCESS) { /* NH.12 */
244 return MPLS_FAILURE;
246 goto Detect_Change_Fec_Next_Hop_20;
248 Detect_Change_Fec_Next_Hop_13:
251 * NH 13-15 decides if we need to make a label request
253 if (nh_new->oper_distribution_mode == LDP_DISTRIBUTION_ONDEMAND &&
254 g->label_retention_mode == LDP_RETENTION_CONSERVATIVE) {
255 /* NH.14-15 */
256 if (ldp_label_request_for_xc(g, nh_new, f, NULL, &ds_attr) !=
257 MPLS_SUCCESS) {
258 return MPLS_FAILURE;
261 goto Detect_Change_Fec_Next_Hop_20;
263 Detect_Change_Fec_Next_Hop_16:
265 peer = MPLS_LIST_HEAD(&g->session);
266 while (peer) {
267 us_attr = ldp_attr_find_upstream_state(g, peer, f, LDP_LSP_STATE_MAP_SENT);
268 if (us_attr) { /* NH.17 */
269 if (ldp_label_withdraw_send(g, peer, us_attr, LDP_NOTIF_NONE) !=
270 MPLS_SUCCESS) { /* NH.18 */
271 ldp_attr_remove_complete(g, us_attr, MPLS_BOOL_FALSE);
272 return MPLS_FAILURE;
275 peer = MPLS_LIST_NEXT(&g->session, peer, _global);
278 Detect_Change_Fec_Next_Hop_20:
280 LDP_EXIT(g->user_data, "Detect_Change_Fec_Next_Hop");
282 return MPLS_SUCCESS;
285 ldp_fec *ldp_fec_create()
287 ldp_fec *fec = (ldp_fec *) mpls_malloc(sizeof(ldp_fec));
289 if (fec != NULL) {
290 memset(fec, 0, sizeof(ldp_fec));
291 MPLS_REFCNT_INIT(fec, 0);
292 MPLS_LIST_ELEM_INIT(fec, _global);
293 MPLS_LIST_ELEM_INIT(fec, _inlabel);
294 MPLS_LIST_ELEM_INIT(fec, _outlabel);
295 MPLS_LIST_ELEM_INIT(fec, _fec);
296 MPLS_LIST_ELEM_INIT(fec, _nh);
297 MPLS_LIST_INIT(&fec->fs_root_us, ldp_fs);
298 MPLS_LIST_INIT(&fec->fs_root_ds, ldp_fs);
299 fec->info.type = MPLS_FEC_NONE;
300 fec->info.nh.type = MPLS_NH_NONE;
301 fec->info.nh.attached = MPLS_BOOL_FALSE;
303 return fec;
306 void mpls_fec2ldp_fec(mpls_fec * a, ldp_fec * b)
308 memcpy(&b->info, a, sizeof(mpls_fec));
311 ldp_fec *ldp_fec_create_host(mpls_inet_addr * host)
313 ldp_fec *fec = ldp_fec_create();
315 if (fec != NULL) {
316 fec->info.type = MPLS_FEC_HOST;
317 memcpy(&fec->info.u.host, host, sizeof(mpls_inet_addr));
319 return fec;
322 ldp_fec *ldp_fec_create_prefix(mpls_inet_addr * prefix, int prefix_len)
324 ldp_fec *fec = ldp_fec_create();
326 if (fec != NULL) {
327 fec->info.type = MPLS_FEC_PREFIX;
328 memcpy(&fec->info.u.prefix.network, prefix, sizeof(mpls_inet_addr));
329 fec->info.u.prefix.length = prefix_len;
331 return fec;
334 void ldp_fec_delete(ldp_fec * fec)
336 mpls_free(fec);
339 void mpls_fec2fec_tlv(mpls_fec * lf, mplsLdpFecTlv_t * tlv, int i)
341 tlv->fecElArray[i].addressEl.addressFam = 1;
343 switch (lf->type) {
344 case MPLS_FEC_PREFIX:
345 tlv->fecElArray[i].addressEl.type = MPLS_PREFIX_FEC;
346 tlv->fecElArray[i].addressEl.preLen = lf->u.prefix.length;
347 tlv->fecElArray[i].addressEl.address = lf->u.prefix.network.u.ipv4;
348 tlv->fecElemTypes[i] = MPLS_PREFIX_FEC;
349 break;
350 case MPLS_FEC_HOST:
351 tlv->fecElArray[i].addressEl.type = MPLS_HOSTADR_FEC;
352 tlv->fecElArray[i].addressEl.preLen = MPLS_IPv4LEN;
353 tlv->fecElArray[i].addressEl.address = lf->u.host.u.ipv4;
354 tlv->fecElemTypes[i] = MPLS_HOSTADR_FEC;
355 break;
356 default:
357 MPLS_ASSERT(0);
361 void fec_tlv2mpls_fec(mplsLdpFecTlv_t * tlv, int i, mpls_fec * lf) {
362 switch (tlv->fecElemTypes[i]) {
363 case MPLS_PREFIX_FEC:
364 lf->type = MPLS_FEC_PREFIX;
365 lf->u.prefix.length = tlv->fecElArray[i].addressEl.preLen;
366 lf->u.prefix.network.u.ipv4 = tlv->fecElArray[i].addressEl.address;
367 lf->u.prefix.network.type = MPLS_FAMILY_IPV4;
368 break;
369 case MPLS_HOSTADR_FEC:
370 lf->type = MPLS_FEC_HOST;
371 lf->u.host.u.ipv4 = tlv->fecElArray[i].addressEl.address;
372 lf->u.host.type = MPLS_FAMILY_IPV4;
373 break;
374 default:
375 MPLS_ASSERT(0);