bus: mhi: core: Fix some error return code
[linux/fpc-iii.git] / net / 802 / mrp.c
blobbea6e43d45a0ddb5356ab369acec71e4b93bc188
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * IEEE 802.1Q Multiple Registration Protocol (MRP)
5 * Copyright (c) 2012 Massachusetts Institute of Technology
7 * Adapted from code in net/802/garp.c
8 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
9 */
10 #include <linux/kernel.h>
11 #include <linux/timer.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/slab.h>
17 #include <linux/module.h>
18 #include <net/mrp.h>
19 #include <asm/unaligned.h>
21 static unsigned int mrp_join_time __read_mostly = 200;
22 module_param(mrp_join_time, uint, 0644);
23 MODULE_PARM_DESC(mrp_join_time, "Join time in ms (default 200ms)");
25 static unsigned int mrp_periodic_time __read_mostly = 1000;
26 module_param(mrp_periodic_time, uint, 0644);
27 MODULE_PARM_DESC(mrp_periodic_time, "Periodic time in ms (default 1s)");
29 MODULE_LICENSE("GPL");
31 static const u8
32 mrp_applicant_state_table[MRP_APPLICANT_MAX + 1][MRP_EVENT_MAX + 1] = {
33 [MRP_APPLICANT_VO] = {
34 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
35 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
36 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
37 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
38 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VO,
39 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AO,
40 [MRP_EVENT_R_IN] = MRP_APPLICANT_VO,
41 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VO,
42 [MRP_EVENT_R_MT] = MRP_APPLICANT_VO,
43 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
44 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
45 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
46 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VO,
48 [MRP_APPLICANT_VP] = {
49 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
50 [MRP_EVENT_JOIN] = MRP_APPLICANT_VP,
51 [MRP_EVENT_LV] = MRP_APPLICANT_VO,
52 [MRP_EVENT_TX] = MRP_APPLICANT_AA,
53 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VP,
54 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AP,
55 [MRP_EVENT_R_IN] = MRP_APPLICANT_VP,
56 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VP,
57 [MRP_EVENT_R_MT] = MRP_APPLICANT_VP,
58 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
59 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
60 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
61 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VP,
63 [MRP_APPLICANT_VN] = {
64 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
65 [MRP_EVENT_JOIN] = MRP_APPLICANT_VN,
66 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
67 [MRP_EVENT_TX] = MRP_APPLICANT_AN,
68 [MRP_EVENT_R_NEW] = MRP_APPLICANT_VN,
69 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_VN,
70 [MRP_EVENT_R_IN] = MRP_APPLICANT_VN,
71 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_VN,
72 [MRP_EVENT_R_MT] = MRP_APPLICANT_VN,
73 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
74 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
75 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
76 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_VN,
78 [MRP_APPLICANT_AN] = {
79 [MRP_EVENT_NEW] = MRP_APPLICANT_AN,
80 [MRP_EVENT_JOIN] = MRP_APPLICANT_AN,
81 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
82 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
83 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AN,
84 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_AN,
85 [MRP_EVENT_R_IN] = MRP_APPLICANT_AN,
86 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AN,
87 [MRP_EVENT_R_MT] = MRP_APPLICANT_AN,
88 [MRP_EVENT_R_LV] = MRP_APPLICANT_VN,
89 [MRP_EVENT_R_LA] = MRP_APPLICANT_VN,
90 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VN,
91 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AN,
93 [MRP_APPLICANT_AA] = {
94 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
95 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
96 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
97 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
98 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AA,
99 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
100 [MRP_EVENT_R_IN] = MRP_APPLICANT_AA,
101 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
102 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
103 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
104 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
105 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
106 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
108 [MRP_APPLICANT_QA] = {
109 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
110 [MRP_EVENT_JOIN] = MRP_APPLICANT_QA,
111 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
112 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
113 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QA,
114 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QA,
115 [MRP_EVENT_R_IN] = MRP_APPLICANT_QA,
116 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AA,
117 [MRP_EVENT_R_MT] = MRP_APPLICANT_AA,
118 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
119 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
120 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
121 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AA,
123 [MRP_APPLICANT_LA] = {
124 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
125 [MRP_EVENT_JOIN] = MRP_APPLICANT_AA,
126 [MRP_EVENT_LV] = MRP_APPLICANT_LA,
127 [MRP_EVENT_TX] = MRP_APPLICANT_VO,
128 [MRP_EVENT_R_NEW] = MRP_APPLICANT_LA,
129 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_LA,
130 [MRP_EVENT_R_IN] = MRP_APPLICANT_LA,
131 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_LA,
132 [MRP_EVENT_R_MT] = MRP_APPLICANT_LA,
133 [MRP_EVENT_R_LV] = MRP_APPLICANT_LA,
134 [MRP_EVENT_R_LA] = MRP_APPLICANT_LA,
135 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_LA,
136 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_LA,
138 [MRP_APPLICANT_AO] = {
139 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
140 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
141 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
142 [MRP_EVENT_TX] = MRP_APPLICANT_AO,
143 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AO,
144 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
145 [MRP_EVENT_R_IN] = MRP_APPLICANT_AO,
146 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
147 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
148 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
149 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
150 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
151 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AO,
153 [MRP_APPLICANT_QO] = {
154 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
155 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
156 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
157 [MRP_EVENT_TX] = MRP_APPLICANT_QO,
158 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QO,
159 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QO,
160 [MRP_EVENT_R_IN] = MRP_APPLICANT_QO,
161 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AO,
162 [MRP_EVENT_R_MT] = MRP_APPLICANT_AO,
163 [MRP_EVENT_R_LV] = MRP_APPLICANT_VO,
164 [MRP_EVENT_R_LA] = MRP_APPLICANT_VO,
165 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VO,
166 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_QO,
168 [MRP_APPLICANT_AP] = {
169 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
170 [MRP_EVENT_JOIN] = MRP_APPLICANT_AP,
171 [MRP_EVENT_LV] = MRP_APPLICANT_AO,
172 [MRP_EVENT_TX] = MRP_APPLICANT_QA,
173 [MRP_EVENT_R_NEW] = MRP_APPLICANT_AP,
174 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
175 [MRP_EVENT_R_IN] = MRP_APPLICANT_AP,
176 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
177 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
178 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
179 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
180 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
181 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
183 [MRP_APPLICANT_QP] = {
184 [MRP_EVENT_NEW] = MRP_APPLICANT_VN,
185 [MRP_EVENT_JOIN] = MRP_APPLICANT_QP,
186 [MRP_EVENT_LV] = MRP_APPLICANT_QO,
187 [MRP_EVENT_TX] = MRP_APPLICANT_QP,
188 [MRP_EVENT_R_NEW] = MRP_APPLICANT_QP,
189 [MRP_EVENT_R_JOIN_IN] = MRP_APPLICANT_QP,
190 [MRP_EVENT_R_IN] = MRP_APPLICANT_QP,
191 [MRP_EVENT_R_JOIN_MT] = MRP_APPLICANT_AP,
192 [MRP_EVENT_R_MT] = MRP_APPLICANT_AP,
193 [MRP_EVENT_R_LV] = MRP_APPLICANT_VP,
194 [MRP_EVENT_R_LA] = MRP_APPLICANT_VP,
195 [MRP_EVENT_REDECLARE] = MRP_APPLICANT_VP,
196 [MRP_EVENT_PERIODIC] = MRP_APPLICANT_AP,
200 static const u8
201 mrp_tx_action_table[MRP_APPLICANT_MAX + 1] = {
202 [MRP_APPLICANT_VO] = MRP_TX_ACTION_S_IN_OPTIONAL,
203 [MRP_APPLICANT_VP] = MRP_TX_ACTION_S_JOIN_IN,
204 [MRP_APPLICANT_VN] = MRP_TX_ACTION_S_NEW,
205 [MRP_APPLICANT_AN] = MRP_TX_ACTION_S_NEW,
206 [MRP_APPLICANT_AA] = MRP_TX_ACTION_S_JOIN_IN,
207 [MRP_APPLICANT_QA] = MRP_TX_ACTION_S_JOIN_IN_OPTIONAL,
208 [MRP_APPLICANT_LA] = MRP_TX_ACTION_S_LV,
209 [MRP_APPLICANT_AO] = MRP_TX_ACTION_S_IN_OPTIONAL,
210 [MRP_APPLICANT_QO] = MRP_TX_ACTION_S_IN_OPTIONAL,
211 [MRP_APPLICANT_AP] = MRP_TX_ACTION_S_JOIN_IN,
212 [MRP_APPLICANT_QP] = MRP_TX_ACTION_S_IN_OPTIONAL,
215 static void mrp_attrvalue_inc(void *value, u8 len)
217 u8 *v = (u8 *)value;
219 /* Add 1 to the last byte. If it becomes zero,
220 * go to the previous byte and repeat.
222 while (len > 0 && !++v[--len])
226 static int mrp_attr_cmp(const struct mrp_attr *attr,
227 const void *value, u8 len, u8 type)
229 if (attr->type != type)
230 return attr->type - type;
231 if (attr->len != len)
232 return attr->len - len;
233 return memcmp(attr->value, value, len);
236 static struct mrp_attr *mrp_attr_lookup(const struct mrp_applicant *app,
237 const void *value, u8 len, u8 type)
239 struct rb_node *parent = app->mad.rb_node;
240 struct mrp_attr *attr;
241 int d;
243 while (parent) {
244 attr = rb_entry(parent, struct mrp_attr, node);
245 d = mrp_attr_cmp(attr, value, len, type);
246 if (d > 0)
247 parent = parent->rb_left;
248 else if (d < 0)
249 parent = parent->rb_right;
250 else
251 return attr;
253 return NULL;
256 static struct mrp_attr *mrp_attr_create(struct mrp_applicant *app,
257 const void *value, u8 len, u8 type)
259 struct rb_node *parent = NULL, **p = &app->mad.rb_node;
260 struct mrp_attr *attr;
261 int d;
263 while (*p) {
264 parent = *p;
265 attr = rb_entry(parent, struct mrp_attr, node);
266 d = mrp_attr_cmp(attr, value, len, type);
267 if (d > 0)
268 p = &parent->rb_left;
269 else if (d < 0)
270 p = &parent->rb_right;
271 else {
272 /* The attribute already exists; re-use it. */
273 return attr;
276 attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC);
277 if (!attr)
278 return attr;
279 attr->state = MRP_APPLICANT_VO;
280 attr->type = type;
281 attr->len = len;
282 memcpy(attr->value, value, len);
284 rb_link_node(&attr->node, parent, p);
285 rb_insert_color(&attr->node, &app->mad);
286 return attr;
289 static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
291 rb_erase(&attr->node, &app->mad);
292 kfree(attr);
295 static int mrp_pdu_init(struct mrp_applicant *app)
297 struct sk_buff *skb;
298 struct mrp_pdu_hdr *ph;
300 skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev),
301 GFP_ATOMIC);
302 if (!skb)
303 return -ENOMEM;
305 skb->dev = app->dev;
306 skb->protocol = app->app->pkttype.type;
307 skb_reserve(skb, LL_RESERVED_SPACE(app->dev));
308 skb_reset_network_header(skb);
309 skb_reset_transport_header(skb);
311 ph = __skb_put(skb, sizeof(*ph));
312 ph->version = app->app->version;
314 app->pdu = skb;
315 return 0;
318 static int mrp_pdu_append_end_mark(struct mrp_applicant *app)
320 __be16 *endmark;
322 if (skb_tailroom(app->pdu) < sizeof(*endmark))
323 return -1;
324 endmark = __skb_put(app->pdu, sizeof(*endmark));
325 put_unaligned(MRP_END_MARK, endmark);
326 return 0;
329 static void mrp_pdu_queue(struct mrp_applicant *app)
331 if (!app->pdu)
332 return;
334 if (mrp_cb(app->pdu)->mh)
335 mrp_pdu_append_end_mark(app);
336 mrp_pdu_append_end_mark(app);
338 dev_hard_header(app->pdu, app->dev, ntohs(app->app->pkttype.type),
339 app->app->group_address, app->dev->dev_addr,
340 app->pdu->len);
342 skb_queue_tail(&app->queue, app->pdu);
343 app->pdu = NULL;
346 static void mrp_queue_xmit(struct mrp_applicant *app)
348 struct sk_buff *skb;
350 while ((skb = skb_dequeue(&app->queue)))
351 dev_queue_xmit(skb);
354 static int mrp_pdu_append_msg_hdr(struct mrp_applicant *app,
355 u8 attrtype, u8 attrlen)
357 struct mrp_msg_hdr *mh;
359 if (mrp_cb(app->pdu)->mh) {
360 if (mrp_pdu_append_end_mark(app) < 0)
361 return -1;
362 mrp_cb(app->pdu)->mh = NULL;
363 mrp_cb(app->pdu)->vah = NULL;
366 if (skb_tailroom(app->pdu) < sizeof(*mh))
367 return -1;
368 mh = __skb_put(app->pdu, sizeof(*mh));
369 mh->attrtype = attrtype;
370 mh->attrlen = attrlen;
371 mrp_cb(app->pdu)->mh = mh;
372 return 0;
375 static int mrp_pdu_append_vecattr_hdr(struct mrp_applicant *app,
376 const void *firstattrvalue, u8 attrlen)
378 struct mrp_vecattr_hdr *vah;
380 if (skb_tailroom(app->pdu) < sizeof(*vah) + attrlen)
381 return -1;
382 vah = __skb_put(app->pdu, sizeof(*vah) + attrlen);
383 put_unaligned(0, &vah->lenflags);
384 memcpy(vah->firstattrvalue, firstattrvalue, attrlen);
385 mrp_cb(app->pdu)->vah = vah;
386 memcpy(mrp_cb(app->pdu)->attrvalue, firstattrvalue, attrlen);
387 return 0;
390 static int mrp_pdu_append_vecattr_event(struct mrp_applicant *app,
391 const struct mrp_attr *attr,
392 enum mrp_vecattr_event vaevent)
394 u16 len, pos;
395 u8 *vaevents;
396 int err;
397 again:
398 if (!app->pdu) {
399 err = mrp_pdu_init(app);
400 if (err < 0)
401 return err;
404 /* If there is no Message header in the PDU, or the Message header is
405 * for a different attribute type, add an EndMark (if necessary) and a
406 * new Message header to the PDU.
408 if (!mrp_cb(app->pdu)->mh ||
409 mrp_cb(app->pdu)->mh->attrtype != attr->type ||
410 mrp_cb(app->pdu)->mh->attrlen != attr->len) {
411 if (mrp_pdu_append_msg_hdr(app, attr->type, attr->len) < 0)
412 goto queue;
415 /* If there is no VectorAttribute header for this Message in the PDU,
416 * or this attribute's value does not sequentially follow the previous
417 * attribute's value, add a new VectorAttribute header to the PDU.
419 if (!mrp_cb(app->pdu)->vah ||
420 memcmp(mrp_cb(app->pdu)->attrvalue, attr->value, attr->len)) {
421 if (mrp_pdu_append_vecattr_hdr(app, attr->value, attr->len) < 0)
422 goto queue;
425 len = be16_to_cpu(get_unaligned(&mrp_cb(app->pdu)->vah->lenflags));
426 pos = len % 3;
428 /* Events are packed into Vectors in the PDU, three to a byte. Add a
429 * byte to the end of the Vector if necessary.
431 if (!pos) {
432 if (skb_tailroom(app->pdu) < sizeof(u8))
433 goto queue;
434 vaevents = __skb_put(app->pdu, sizeof(u8));
435 } else {
436 vaevents = (u8 *)(skb_tail_pointer(app->pdu) - sizeof(u8));
439 switch (pos) {
440 case 0:
441 *vaevents = vaevent * (__MRP_VECATTR_EVENT_MAX *
442 __MRP_VECATTR_EVENT_MAX);
443 break;
444 case 1:
445 *vaevents += vaevent * __MRP_VECATTR_EVENT_MAX;
446 break;
447 case 2:
448 *vaevents += vaevent;
449 break;
450 default:
451 WARN_ON(1);
454 /* Increment the length of the VectorAttribute in the PDU, as well as
455 * the value of the next attribute that would continue its Vector.
457 put_unaligned(cpu_to_be16(++len), &mrp_cb(app->pdu)->vah->lenflags);
458 mrp_attrvalue_inc(mrp_cb(app->pdu)->attrvalue, attr->len);
460 return 0;
462 queue:
463 mrp_pdu_queue(app);
464 goto again;
467 static void mrp_attr_event(struct mrp_applicant *app,
468 struct mrp_attr *attr, enum mrp_event event)
470 enum mrp_applicant_state state;
472 state = mrp_applicant_state_table[attr->state][event];
473 if (state == MRP_APPLICANT_INVALID) {
474 WARN_ON(1);
475 return;
478 if (event == MRP_EVENT_TX) {
479 /* When appending the attribute fails, don't update its state
480 * in order to retry at the next TX event.
483 switch (mrp_tx_action_table[attr->state]) {
484 case MRP_TX_ACTION_NONE:
485 case MRP_TX_ACTION_S_JOIN_IN_OPTIONAL:
486 case MRP_TX_ACTION_S_IN_OPTIONAL:
487 break;
488 case MRP_TX_ACTION_S_NEW:
489 if (mrp_pdu_append_vecattr_event(
490 app, attr, MRP_VECATTR_EVENT_NEW) < 0)
491 return;
492 break;
493 case MRP_TX_ACTION_S_JOIN_IN:
494 if (mrp_pdu_append_vecattr_event(
495 app, attr, MRP_VECATTR_EVENT_JOIN_IN) < 0)
496 return;
497 break;
498 case MRP_TX_ACTION_S_LV:
499 if (mrp_pdu_append_vecattr_event(
500 app, attr, MRP_VECATTR_EVENT_LV) < 0)
501 return;
502 /* As a pure applicant, sending a leave message
503 * implies that the attribute was unregistered and
504 * can be destroyed.
506 mrp_attr_destroy(app, attr);
507 return;
508 default:
509 WARN_ON(1);
513 attr->state = state;
516 int mrp_request_join(const struct net_device *dev,
517 const struct mrp_application *appl,
518 const void *value, u8 len, u8 type)
520 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
521 struct mrp_applicant *app = rtnl_dereference(
522 port->applicants[appl->type]);
523 struct mrp_attr *attr;
525 if (sizeof(struct mrp_skb_cb) + len >
526 sizeof_field(struct sk_buff, cb))
527 return -ENOMEM;
529 spin_lock_bh(&app->lock);
530 attr = mrp_attr_create(app, value, len, type);
531 if (!attr) {
532 spin_unlock_bh(&app->lock);
533 return -ENOMEM;
535 mrp_attr_event(app, attr, MRP_EVENT_JOIN);
536 spin_unlock_bh(&app->lock);
537 return 0;
539 EXPORT_SYMBOL_GPL(mrp_request_join);
541 void mrp_request_leave(const struct net_device *dev,
542 const struct mrp_application *appl,
543 const void *value, u8 len, u8 type)
545 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
546 struct mrp_applicant *app = rtnl_dereference(
547 port->applicants[appl->type]);
548 struct mrp_attr *attr;
550 if (sizeof(struct mrp_skb_cb) + len >
551 sizeof_field(struct sk_buff, cb))
552 return;
554 spin_lock_bh(&app->lock);
555 attr = mrp_attr_lookup(app, value, len, type);
556 if (!attr) {
557 spin_unlock_bh(&app->lock);
558 return;
560 mrp_attr_event(app, attr, MRP_EVENT_LV);
561 spin_unlock_bh(&app->lock);
563 EXPORT_SYMBOL_GPL(mrp_request_leave);
565 static void mrp_mad_event(struct mrp_applicant *app, enum mrp_event event)
567 struct rb_node *node, *next;
568 struct mrp_attr *attr;
570 for (node = rb_first(&app->mad);
571 next = node ? rb_next(node) : NULL, node != NULL;
572 node = next) {
573 attr = rb_entry(node, struct mrp_attr, node);
574 mrp_attr_event(app, attr, event);
578 static void mrp_join_timer_arm(struct mrp_applicant *app)
580 unsigned long delay;
582 delay = (u64)msecs_to_jiffies(mrp_join_time) * prandom_u32() >> 32;
583 mod_timer(&app->join_timer, jiffies + delay);
586 static void mrp_join_timer(struct timer_list *t)
588 struct mrp_applicant *app = from_timer(app, t, join_timer);
590 spin_lock(&app->lock);
591 mrp_mad_event(app, MRP_EVENT_TX);
592 mrp_pdu_queue(app);
593 spin_unlock(&app->lock);
595 mrp_queue_xmit(app);
596 mrp_join_timer_arm(app);
599 static void mrp_periodic_timer_arm(struct mrp_applicant *app)
601 mod_timer(&app->periodic_timer,
602 jiffies + msecs_to_jiffies(mrp_periodic_time));
605 static void mrp_periodic_timer(struct timer_list *t)
607 struct mrp_applicant *app = from_timer(app, t, periodic_timer);
609 spin_lock(&app->lock);
610 mrp_mad_event(app, MRP_EVENT_PERIODIC);
611 mrp_pdu_queue(app);
612 spin_unlock(&app->lock);
614 mrp_periodic_timer_arm(app);
617 static int mrp_pdu_parse_end_mark(struct sk_buff *skb, int *offset)
619 __be16 endmark;
621 if (skb_copy_bits(skb, *offset, &endmark, sizeof(endmark)) < 0)
622 return -1;
623 if (endmark == MRP_END_MARK) {
624 *offset += sizeof(endmark);
625 return -1;
627 return 0;
630 static void mrp_pdu_parse_vecattr_event(struct mrp_applicant *app,
631 struct sk_buff *skb,
632 enum mrp_vecattr_event vaevent)
634 struct mrp_attr *attr;
635 enum mrp_event event;
637 attr = mrp_attr_lookup(app, mrp_cb(skb)->attrvalue,
638 mrp_cb(skb)->mh->attrlen,
639 mrp_cb(skb)->mh->attrtype);
640 if (attr == NULL)
641 return;
643 switch (vaevent) {
644 case MRP_VECATTR_EVENT_NEW:
645 event = MRP_EVENT_R_NEW;
646 break;
647 case MRP_VECATTR_EVENT_JOIN_IN:
648 event = MRP_EVENT_R_JOIN_IN;
649 break;
650 case MRP_VECATTR_EVENT_IN:
651 event = MRP_EVENT_R_IN;
652 break;
653 case MRP_VECATTR_EVENT_JOIN_MT:
654 event = MRP_EVENT_R_JOIN_MT;
655 break;
656 case MRP_VECATTR_EVENT_MT:
657 event = MRP_EVENT_R_MT;
658 break;
659 case MRP_VECATTR_EVENT_LV:
660 event = MRP_EVENT_R_LV;
661 break;
662 default:
663 return;
666 mrp_attr_event(app, attr, event);
669 static int mrp_pdu_parse_vecattr(struct mrp_applicant *app,
670 struct sk_buff *skb, int *offset)
672 struct mrp_vecattr_hdr _vah;
673 u16 valen;
674 u8 vaevents, vaevent;
676 mrp_cb(skb)->vah = skb_header_pointer(skb, *offset, sizeof(_vah),
677 &_vah);
678 if (!mrp_cb(skb)->vah)
679 return -1;
680 *offset += sizeof(_vah);
682 if (get_unaligned(&mrp_cb(skb)->vah->lenflags) &
683 MRP_VECATTR_HDR_FLAG_LA)
684 mrp_mad_event(app, MRP_EVENT_R_LA);
685 valen = be16_to_cpu(get_unaligned(&mrp_cb(skb)->vah->lenflags) &
686 MRP_VECATTR_HDR_LEN_MASK);
688 /* The VectorAttribute structure in a PDU carries event information
689 * about one or more attributes having consecutive values. Only the
690 * value for the first attribute is contained in the structure. So
691 * we make a copy of that value, and then increment it each time we
692 * advance to the next event in its Vector.
694 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen >
695 sizeof_field(struct sk_buff, cb))
696 return -1;
697 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue,
698 mrp_cb(skb)->mh->attrlen) < 0)
699 return -1;
700 *offset += mrp_cb(skb)->mh->attrlen;
702 /* In a VectorAttribute, the Vector contains events which are packed
703 * three to a byte. We process one byte of the Vector at a time.
705 while (valen > 0) {
706 if (skb_copy_bits(skb, *offset, &vaevents,
707 sizeof(vaevents)) < 0)
708 return -1;
709 *offset += sizeof(vaevents);
711 /* Extract and process the first event. */
712 vaevent = vaevents / (__MRP_VECATTR_EVENT_MAX *
713 __MRP_VECATTR_EVENT_MAX);
714 if (vaevent >= __MRP_VECATTR_EVENT_MAX) {
715 /* The byte is malformed; stop processing. */
716 return -1;
718 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
720 /* If present, extract and process the second event. */
721 if (!--valen)
722 break;
723 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
724 mrp_cb(skb)->mh->attrlen);
725 vaevents %= (__MRP_VECATTR_EVENT_MAX *
726 __MRP_VECATTR_EVENT_MAX);
727 vaevent = vaevents / __MRP_VECATTR_EVENT_MAX;
728 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
730 /* If present, extract and process the third event. */
731 if (!--valen)
732 break;
733 mrp_attrvalue_inc(mrp_cb(skb)->attrvalue,
734 mrp_cb(skb)->mh->attrlen);
735 vaevents %= __MRP_VECATTR_EVENT_MAX;
736 vaevent = vaevents;
737 mrp_pdu_parse_vecattr_event(app, skb, vaevent);
739 return 0;
742 static int mrp_pdu_parse_msg(struct mrp_applicant *app, struct sk_buff *skb,
743 int *offset)
745 struct mrp_msg_hdr _mh;
747 mrp_cb(skb)->mh = skb_header_pointer(skb, *offset, sizeof(_mh), &_mh);
748 if (!mrp_cb(skb)->mh)
749 return -1;
750 *offset += sizeof(_mh);
752 if (mrp_cb(skb)->mh->attrtype == 0 ||
753 mrp_cb(skb)->mh->attrtype > app->app->maxattr ||
754 mrp_cb(skb)->mh->attrlen == 0)
755 return -1;
757 while (skb->len > *offset) {
758 if (mrp_pdu_parse_end_mark(skb, offset) < 0)
759 break;
760 if (mrp_pdu_parse_vecattr(app, skb, offset) < 0)
761 return -1;
763 return 0;
766 static int mrp_rcv(struct sk_buff *skb, struct net_device *dev,
767 struct packet_type *pt, struct net_device *orig_dev)
769 struct mrp_application *appl = container_of(pt, struct mrp_application,
770 pkttype);
771 struct mrp_port *port;
772 struct mrp_applicant *app;
773 struct mrp_pdu_hdr _ph;
774 const struct mrp_pdu_hdr *ph;
775 int offset = skb_network_offset(skb);
777 /* If the interface is in promiscuous mode, drop the packet if
778 * it was unicast to another host.
780 if (unlikely(skb->pkt_type == PACKET_OTHERHOST))
781 goto out;
782 skb = skb_share_check(skb, GFP_ATOMIC);
783 if (unlikely(!skb))
784 goto out;
785 port = rcu_dereference(dev->mrp_port);
786 if (unlikely(!port))
787 goto out;
788 app = rcu_dereference(port->applicants[appl->type]);
789 if (unlikely(!app))
790 goto out;
792 ph = skb_header_pointer(skb, offset, sizeof(_ph), &_ph);
793 if (!ph)
794 goto out;
795 offset += sizeof(_ph);
797 if (ph->version != app->app->version)
798 goto out;
800 spin_lock(&app->lock);
801 while (skb->len > offset) {
802 if (mrp_pdu_parse_end_mark(skb, &offset) < 0)
803 break;
804 if (mrp_pdu_parse_msg(app, skb, &offset) < 0)
805 break;
807 spin_unlock(&app->lock);
808 out:
809 kfree_skb(skb);
810 return 0;
813 static int mrp_init_port(struct net_device *dev)
815 struct mrp_port *port;
817 port = kzalloc(sizeof(*port), GFP_KERNEL);
818 if (!port)
819 return -ENOMEM;
820 rcu_assign_pointer(dev->mrp_port, port);
821 return 0;
824 static void mrp_release_port(struct net_device *dev)
826 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
827 unsigned int i;
829 for (i = 0; i <= MRP_APPLICATION_MAX; i++) {
830 if (rtnl_dereference(port->applicants[i]))
831 return;
833 RCU_INIT_POINTER(dev->mrp_port, NULL);
834 kfree_rcu(port, rcu);
837 int mrp_init_applicant(struct net_device *dev, struct mrp_application *appl)
839 struct mrp_applicant *app;
840 int err;
842 ASSERT_RTNL();
844 if (!rtnl_dereference(dev->mrp_port)) {
845 err = mrp_init_port(dev);
846 if (err < 0)
847 goto err1;
850 err = -ENOMEM;
851 app = kzalloc(sizeof(*app), GFP_KERNEL);
852 if (!app)
853 goto err2;
855 err = dev_mc_add(dev, appl->group_address);
856 if (err < 0)
857 goto err3;
859 app->dev = dev;
860 app->app = appl;
861 app->mad = RB_ROOT;
862 spin_lock_init(&app->lock);
863 skb_queue_head_init(&app->queue);
864 rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
865 timer_setup(&app->join_timer, mrp_join_timer, 0);
866 mrp_join_timer_arm(app);
867 timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
868 mrp_periodic_timer_arm(app);
869 return 0;
871 err3:
872 kfree(app);
873 err2:
874 mrp_release_port(dev);
875 err1:
876 return err;
878 EXPORT_SYMBOL_GPL(mrp_init_applicant);
880 void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
882 struct mrp_port *port = rtnl_dereference(dev->mrp_port);
883 struct mrp_applicant *app = rtnl_dereference(
884 port->applicants[appl->type]);
886 ASSERT_RTNL();
888 RCU_INIT_POINTER(port->applicants[appl->type], NULL);
890 /* Delete timer and generate a final TX event to flush out
891 * all pending messages before the applicant is gone.
893 del_timer_sync(&app->join_timer);
894 del_timer_sync(&app->periodic_timer);
896 spin_lock_bh(&app->lock);
897 mrp_mad_event(app, MRP_EVENT_TX);
898 mrp_pdu_queue(app);
899 spin_unlock_bh(&app->lock);
901 mrp_queue_xmit(app);
903 dev_mc_del(dev, appl->group_address);
904 kfree_rcu(app, rcu);
905 mrp_release_port(dev);
907 EXPORT_SYMBOL_GPL(mrp_uninit_applicant);
909 int mrp_register_application(struct mrp_application *appl)
911 appl->pkttype.func = mrp_rcv;
912 dev_add_pack(&appl->pkttype);
913 return 0;
915 EXPORT_SYMBOL_GPL(mrp_register_application);
917 void mrp_unregister_application(struct mrp_application *appl)
919 dev_remove_pack(&appl->pkttype);
921 EXPORT_SYMBOL_GPL(mrp_unregister_application);