2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_table.h"
43 #define MAX_FORWARD_SIZE 1024
45 static unsigned int align(unsigned int i
)
51 * tipc_buf_acquire - creates a TIPC message buffer
52 * @size: message size (including TIPC header)
54 * Returns a new buffer with data pointers set to the specified size.
56 * NOTE: Headroom is reserved to allow prepending of a data link header.
57 * There may also be unrequested tailroom present at the buffer's end.
59 struct sk_buff
*tipc_buf_acquire(u32 size
)
62 unsigned int buf_size
= (BUF_HEADROOM
+ size
+ 3) & ~3u;
64 skb
= alloc_skb_fclone(buf_size
, GFP_ATOMIC
);
66 skb_reserve(skb
, BUF_HEADROOM
);
73 void tipc_msg_init(u32 own_node
, struct tipc_msg
*m
, u32 user
, u32 type
,
78 msg_set_user(m
, user
);
79 msg_set_hdr_sz(m
, hsize
);
80 msg_set_size(m
, hsize
);
81 msg_set_prevnode(m
, own_node
);
82 msg_set_type(m
, type
);
83 if (hsize
> SHORT_H_SIZE
) {
84 msg_set_orignode(m
, own_node
);
85 msg_set_destnode(m
, dnode
);
89 struct sk_buff
*tipc_msg_create(uint user
, uint type
,
90 uint hdr_sz
, uint data_sz
, u32 dnode
,
91 u32 onode
, u32 dport
, u32 oport
, int errcode
)
96 buf
= tipc_buf_acquire(hdr_sz
+ data_sz
);
101 tipc_msg_init(onode
, msg
, user
, type
, hdr_sz
, dnode
);
102 msg_set_size(msg
, hdr_sz
+ data_sz
);
103 msg_set_origport(msg
, oport
);
104 msg_set_destport(msg
, dport
);
105 msg_set_errcode(msg
, errcode
);
106 if (hdr_sz
> SHORT_H_SIZE
) {
107 msg_set_orignode(msg
, onode
);
108 msg_set_destnode(msg
, dnode
);
113 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
114 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
115 * out: set when successful non-complete reassembly, otherwise NULL
116 * @*buf: in: the buffer to append. Always defined
117 * out: head buf after successful complete reassembly, otherwise NULL
118 * Returns 1 when reassembly complete, otherwise 0
120 int tipc_buf_append(struct sk_buff
**headbuf
, struct sk_buff
**buf
)
122 struct sk_buff
*head
= *headbuf
;
123 struct sk_buff
*frag
= *buf
;
124 struct sk_buff
*tail
= NULL
;
125 struct tipc_msg
*msg
;
134 fragid
= msg_type(msg
);
136 skb_pull(frag
, msg_hdr_sz(msg
));
138 if (fragid
== FIRST_FRAGMENT
) {
141 if (unlikely(skb_unclone(frag
, GFP_ATOMIC
)))
143 head
= *headbuf
= frag
;
145 TIPC_SKB_CB(head
)->tail
= NULL
;
146 if (skb_is_nonlinear(head
)) {
147 skb_walk_frags(head
, tail
) {
148 TIPC_SKB_CB(head
)->tail
= tail
;
151 skb_frag_list_init(head
);
159 if (skb_try_coalesce(head
, frag
, &headstolen
, &delta
)) {
160 kfree_skb_partial(frag
, headstolen
);
162 tail
= TIPC_SKB_CB(head
)->tail
;
163 if (!skb_has_frag_list(head
))
164 skb_shinfo(head
)->frag_list
= frag
;
167 head
->truesize
+= frag
->truesize
;
168 head
->data_len
+= frag
->len
;
169 head
->len
+= frag
->len
;
170 TIPC_SKB_CB(head
)->tail
= frag
;
173 if (fragid
== LAST_FRAGMENT
) {
174 TIPC_SKB_CB(head
)->validated
= false;
175 if (unlikely(!tipc_msg_validate(head
)))
178 TIPC_SKB_CB(head
)->tail
= NULL
;
185 pr_warn_ratelimited("Unable to build fragment list\n");
188 *buf
= *headbuf
= NULL
;
192 /* tipc_msg_validate - validate basic format of received message
194 * This routine ensures a TIPC message has an acceptable header, and at least
195 * as much data as the header indicates it should. The routine also ensures
196 * that the entire message header is stored in the main fragment of the message
197 * buffer, to simplify future access to message header fields.
199 * Note: Having extra info present in the message header or data areas is OK.
200 * TIPC will ignore the excess, under the assumption that it is optional info
201 * introduced by a later release of the protocol.
203 bool tipc_msg_validate(struct sk_buff
*skb
)
205 struct tipc_msg
*msg
;
208 if (unlikely(TIPC_SKB_CB(skb
)->validated
))
210 if (unlikely(!pskb_may_pull(skb
, MIN_H_SIZE
)))
213 hsz
= msg_hdr_sz(buf_msg(skb
));
214 if (unlikely(hsz
< MIN_H_SIZE
) || (hsz
> MAX_H_SIZE
))
216 if (unlikely(!pskb_may_pull(skb
, hsz
)))
220 if (unlikely(msg_version(msg
) != TIPC_VERSION
))
224 if (unlikely(msz
< hsz
))
226 if (unlikely((msz
- hsz
) > TIPC_MAX_USER_MSG_SIZE
))
228 if (unlikely(skb
->len
< msz
))
231 TIPC_SKB_CB(skb
)->validated
= true;
236 * tipc_msg_build - create buffer chain containing specified header and data
237 * @mhdr: Message header, to be prepended to data
239 * @dsz: Total length of user data
240 * @pktmax: Max packet size that can be used
241 * @list: Buffer or chain of buffers to be returned to caller
243 * Returns message data size or errno: -ENOMEM, -EFAULT
245 int tipc_msg_build(struct tipc_msg
*mhdr
, struct msghdr
*m
,
246 int offset
, int dsz
, int pktmax
, struct sk_buff_head
*list
)
248 int mhsz
= msg_hdr_sz(mhdr
);
249 int msz
= mhsz
+ dsz
;
254 struct tipc_msg pkthdr
;
259 msg_set_size(mhdr
, msz
);
261 /* No fragmentation needed? */
262 if (likely(msz
<= pktmax
)) {
263 skb
= tipc_buf_acquire(msz
);
267 __skb_queue_tail(list
, skb
);
268 skb_copy_to_linear_data(skb
, mhdr
, mhsz
);
269 pktpos
= skb
->data
+ mhsz
;
270 if (copy_from_iter(pktpos
, dsz
, &m
->msg_iter
) == dsz
)
276 /* Prepare reusable fragment header */
277 tipc_msg_init(msg_prevnode(mhdr
), &pkthdr
, MSG_FRAGMENTER
,
278 FIRST_FRAGMENT
, INT_H_SIZE
, msg_destnode(mhdr
));
279 msg_set_size(&pkthdr
, pktmax
);
280 msg_set_fragm_no(&pkthdr
, pktno
);
281 msg_set_importance(&pkthdr
, msg_importance(mhdr
));
283 /* Prepare first fragment */
284 skb
= tipc_buf_acquire(pktmax
);
288 __skb_queue_tail(list
, skb
);
290 skb_copy_to_linear_data(skb
, &pkthdr
, INT_H_SIZE
);
291 pktpos
+= INT_H_SIZE
;
292 pktrem
-= INT_H_SIZE
;
293 skb_copy_to_linear_data_offset(skb
, INT_H_SIZE
, mhdr
, mhsz
);
301 if (copy_from_iter(pktpos
, pktrem
, &m
->msg_iter
) != pktrem
) {
310 /* Prepare new fragment: */
311 if (drem
< (pktmax
- INT_H_SIZE
))
312 pktsz
= drem
+ INT_H_SIZE
;
315 skb
= tipc_buf_acquire(pktsz
);
321 __skb_queue_tail(list
, skb
);
322 msg_set_type(&pkthdr
, FRAGMENT
);
323 msg_set_size(&pkthdr
, pktsz
);
324 msg_set_fragm_no(&pkthdr
, ++pktno
);
325 skb_copy_to_linear_data(skb
, &pkthdr
, INT_H_SIZE
);
326 pktpos
= skb
->data
+ INT_H_SIZE
;
327 pktrem
= pktsz
- INT_H_SIZE
;
330 msg_set_type(buf_msg(skb
), LAST_FRAGMENT
);
333 __skb_queue_purge(list
);
334 __skb_queue_head_init(list
);
339 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
340 * @bskb: the buffer to append to ("bundle")
341 * @skb: buffer to be appended
342 * @mtu: max allowable size for the bundle buffer
343 * Consumes buffer if successful
344 * Returns true if bundling could be performed, otherwise false
346 bool tipc_msg_bundle(struct sk_buff
*bskb
, struct sk_buff
*skb
, u32 mtu
)
348 struct tipc_msg
*bmsg
;
349 struct tipc_msg
*msg
= buf_msg(skb
);
351 unsigned int msz
= msg_size(msg
);
353 u32 max
= mtu
- INT_H_SIZE
;
355 if (likely(msg_user(msg
) == MSG_FRAGMENTER
))
359 bmsg
= buf_msg(bskb
);
360 bsz
= msg_size(bmsg
);
364 if (unlikely(msg_user(msg
) == TUNNEL_PROTOCOL
))
366 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
))
368 if (likely(msg_user(bmsg
) != MSG_BUNDLER
))
370 if (unlikely(skb_tailroom(bskb
) < (pad
+ msz
)))
372 if (unlikely(max
< (start
+ msz
)))
375 skb_put(bskb
, pad
+ msz
);
376 skb_copy_to_linear_data_offset(bskb
, start
, skb
->data
, msz
);
377 msg_set_size(bmsg
, start
+ msz
);
378 msg_set_msgcnt(bmsg
, msg_msgcnt(bmsg
) + 1);
384 * tipc_msg_extract(): extract bundled inner packet from buffer
385 * @skb: buffer to be extracted from.
386 * @iskb: extracted inner buffer, to be returned
387 * @pos: position in outer message of msg to be extracted.
388 * Returns position of next msg
389 * Consumes outer buffer when last packet extracted
390 * Returns true when when there is an extracted buffer, otherwise false
392 bool tipc_msg_extract(struct sk_buff
*skb
, struct sk_buff
**iskb
, int *pos
)
394 struct tipc_msg
*msg
;
398 if (unlikely(skb_linearize(skb
)))
402 offset
= msg_hdr_sz(msg
) + *pos
;
403 if (unlikely(offset
> (msg_size(msg
) - MIN_H_SIZE
)))
406 *iskb
= skb_clone(skb
, GFP_ATOMIC
);
407 if (unlikely(!*iskb
))
409 skb_pull(*iskb
, offset
);
410 imsz
= msg_size(buf_msg(*iskb
));
411 skb_trim(*iskb
, imsz
);
412 if (unlikely(!tipc_msg_validate(*iskb
)))
424 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
425 * @list: the buffer chain
426 * @skb: buffer to be appended and replaced
427 * @mtu: max allowable size for the bundle buffer, inclusive header
428 * @dnode: destination node for message. (Not always present in header)
429 * Replaces buffer if successful
430 * Returns true if success, otherwise false
432 bool tipc_msg_make_bundle(struct sk_buff
**skb
, u32 mtu
, u32 dnode
)
434 struct sk_buff
*bskb
;
435 struct tipc_msg
*bmsg
;
436 struct tipc_msg
*msg
= buf_msg(*skb
);
437 u32 msz
= msg_size(msg
);
438 u32 max
= mtu
- INT_H_SIZE
;
440 if (msg_user(msg
) == MSG_FRAGMENTER
)
442 if (msg_user(msg
) == TUNNEL_PROTOCOL
)
444 if (msg_user(msg
) == BCAST_PROTOCOL
)
449 bskb
= tipc_buf_acquire(max
);
453 skb_trim(bskb
, INT_H_SIZE
);
454 bmsg
= buf_msg(bskb
);
455 tipc_msg_init(msg_prevnode(msg
), bmsg
, MSG_BUNDLER
, 0,
457 msg_set_seqno(bmsg
, msg_seqno(msg
));
458 msg_set_ack(bmsg
, msg_ack(msg
));
459 msg_set_bcast_ack(bmsg
, msg_bcast_ack(msg
));
460 tipc_msg_bundle(bskb
, *skb
, mtu
);
466 * tipc_msg_reverse(): swap source and destination addresses and add error code
467 * @buf: buffer containing message to be reversed
468 * @dnode: return value: node where to send message after reversal
469 * @err: error code to be set in message
470 * Consumes buffer if failure
471 * Returns true if success, otherwise false
473 bool tipc_msg_reverse(u32 own_addr
, struct sk_buff
*buf
, u32
*dnode
,
476 struct tipc_msg
*msg
= buf_msg(buf
);
477 struct tipc_msg ohdr
;
478 uint rdsz
= min_t(uint
, msg_data_sz(msg
), MAX_FORWARD_SIZE
);
480 if (skb_linearize(buf
))
483 if (msg_dest_droppable(msg
))
485 if (msg_errcode(msg
))
487 memcpy(&ohdr
, msg
, msg_hdr_sz(msg
));
488 msg_set_errcode(msg
, err
);
489 msg_set_origport(msg
, msg_destport(&ohdr
));
490 msg_set_destport(msg
, msg_origport(&ohdr
));
491 msg_set_prevnode(msg
, own_addr
);
492 if (!msg_short(msg
)) {
493 msg_set_orignode(msg
, msg_destnode(&ohdr
));
494 msg_set_destnode(msg
, msg_orignode(&ohdr
));
496 msg_set_size(msg
, msg_hdr_sz(msg
) + rdsz
);
497 skb_trim(buf
, msg_size(msg
));
499 *dnode
= msg_orignode(&ohdr
);
508 * tipc_msg_lookup_dest(): try to find new destination for named message
509 * @skb: the buffer containing the message.
510 * @dnode: return value: next-hop node, if destination found
511 * @err: return value: error code to use, if message to be rejected
512 * Does not consume buffer
513 * Returns true if a destination is found, false otherwise
515 bool tipc_msg_lookup_dest(struct net
*net
, struct sk_buff
*skb
,
516 u32
*dnode
, int *err
)
518 struct tipc_msg
*msg
= buf_msg(skb
);
520 u32 own_addr
= tipc_own_addr(net
);
522 if (!msg_isdata(msg
))
526 if (msg_errcode(msg
))
528 *err
= -TIPC_ERR_NO_NAME
;
529 if (skb_linearize(skb
))
531 if (msg_reroute_cnt(msg
))
533 *dnode
= addr_domain(net
, msg_lookup_scope(msg
));
534 dport
= tipc_nametbl_translate(net
, msg_nametype(msg
),
535 msg_nameinst(msg
), dnode
);
538 msg_incr_reroute_cnt(msg
);
539 if (*dnode
!= own_addr
)
540 msg_set_prevnode(msg
, own_addr
);
541 msg_set_destnode(msg
, *dnode
);
542 msg_set_destport(msg
, dport
);
547 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
548 * reassemble the clones into one message
550 struct sk_buff
*tipc_msg_reassemble(struct sk_buff_head
*list
)
553 struct sk_buff
*frag
= NULL
;
554 struct sk_buff
*head
= NULL
;
557 /* Copy header if single buffer */
558 if (skb_queue_len(list
) == 1) {
559 skb
= skb_peek(list
);
560 hdr_sz
= skb_headroom(skb
) + msg_hdr_sz(buf_msg(skb
));
561 return __pskb_copy(skb
, hdr_sz
, GFP_ATOMIC
);
564 /* Clone all fragments and reassemble */
565 skb_queue_walk(list
, skb
) {
566 frag
= skb_clone(skb
, GFP_ATOMIC
);
570 if (tipc_buf_append(&head
, &frag
))
577 pr_warn("Failed do clone local mcast rcv buffer\n");