2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_table.h"
42 #define MAX_FORWARD_SIZE 1024
44 static unsigned int align(unsigned int i
)
49 void tipc_msg_init(struct tipc_msg
*m
, u32 user
, u32 type
, u32 hsize
,
54 msg_set_user(m
, user
);
55 msg_set_hdr_sz(m
, hsize
);
56 msg_set_size(m
, hsize
);
57 msg_set_prevnode(m
, tipc_own_addr
);
58 msg_set_type(m
, type
);
59 if (hsize
> SHORT_H_SIZE
) {
60 msg_set_orignode(m
, tipc_own_addr
);
61 msg_set_destnode(m
, destnode
);
65 struct sk_buff
*tipc_msg_create(uint user
, uint type
, uint hdr_sz
,
66 uint data_sz
, u32 dnode
, u32 onode
,
67 u32 dport
, u32 oport
, int errcode
)
72 buf
= tipc_buf_acquire(hdr_sz
+ data_sz
);
77 tipc_msg_init(msg
, user
, type
, hdr_sz
, dnode
);
78 msg_set_size(msg
, hdr_sz
+ data_sz
);
79 msg_set_prevnode(msg
, onode
);
80 msg_set_origport(msg
, oport
);
81 msg_set_destport(msg
, dport
);
82 msg_set_errcode(msg
, errcode
);
83 if (hdr_sz
> SHORT_H_SIZE
) {
84 msg_set_orignode(msg
, onode
);
85 msg_set_destnode(msg
, dnode
);
90 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
91 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
92 * out: set when successful non-complete reassembly, otherwise NULL
93 * @*buf: in: the buffer to append. Always defined
94 * out: head buf after sucessful complete reassembly, otherwise NULL
95 * Returns 1 when reassembly complete, otherwise 0
97 int tipc_buf_append(struct sk_buff
**headbuf
, struct sk_buff
**buf
)
99 struct sk_buff
*head
= *headbuf
;
100 struct sk_buff
*frag
= *buf
;
101 struct sk_buff
*tail
;
102 struct tipc_msg
*msg
;
111 fragid
= msg_type(msg
);
113 skb_pull(frag
, msg_hdr_sz(msg
));
115 if (fragid
== FIRST_FRAGMENT
) {
118 if (unlikely(skb_unclone(frag
, GFP_ATOMIC
)))
120 head
= *headbuf
= frag
;
121 skb_frag_list_init(head
);
122 TIPC_SKB_CB(head
)->tail
= NULL
;
130 if (skb_try_coalesce(head
, frag
, &headstolen
, &delta
)) {
131 kfree_skb_partial(frag
, headstolen
);
133 tail
= TIPC_SKB_CB(head
)->tail
;
134 if (!skb_has_frag_list(head
))
135 skb_shinfo(head
)->frag_list
= frag
;
138 head
->truesize
+= frag
->truesize
;
139 head
->data_len
+= frag
->len
;
140 head
->len
+= frag
->len
;
141 TIPC_SKB_CB(head
)->tail
= frag
;
144 if (fragid
== LAST_FRAGMENT
) {
146 TIPC_SKB_CB(head
)->tail
= NULL
;
154 pr_warn_ratelimited("Unable to build fragment list\n");
157 *buf
= *headbuf
= NULL
;
163 * tipc_msg_build - create buffer chain containing specified header and data
164 * @mhdr: Message header, to be prepended to data
166 * @offset: Posision in iov to start copying from
167 * @dsz: Total length of user data
168 * @pktmax: Max packet size that can be used
169 * @chain: Buffer or chain of buffers to be returned to caller
170 * Returns message data size or errno: -ENOMEM, -EFAULT
172 int tipc_msg_build(struct tipc_msg
*mhdr
, struct iovec
const *iov
,
173 int offset
, int dsz
, int pktmax
, struct sk_buff
**chain
)
175 int mhsz
= msg_hdr_sz(mhdr
);
176 int msz
= mhsz
+ dsz
;
181 struct tipc_msg pkthdr
;
182 struct sk_buff
*buf
, *prev
;
186 msg_set_size(mhdr
, msz
);
188 /* No fragmentation needed? */
189 if (likely(msz
<= pktmax
)) {
190 buf
= tipc_buf_acquire(msz
);
194 skb_copy_to_linear_data(buf
, mhdr
, mhsz
);
195 pktpos
= buf
->data
+ mhsz
;
196 TIPC_SKB_CB(buf
)->chain_sz
= 1;
197 if (!dsz
|| !memcpy_fromiovecend(pktpos
, iov
, offset
, dsz
))
203 /* Prepare reusable fragment header */
204 tipc_msg_init(&pkthdr
, MSG_FRAGMENTER
, FIRST_FRAGMENT
,
205 INT_H_SIZE
, msg_destnode(mhdr
));
206 msg_set_size(&pkthdr
, pktmax
);
207 msg_set_fragm_no(&pkthdr
, pktno
);
209 /* Prepare first fragment */
210 *chain
= buf
= tipc_buf_acquire(pktmax
);
215 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
216 pktpos
+= INT_H_SIZE
;
217 pktrem
-= INT_H_SIZE
;
218 skb_copy_to_linear_data_offset(buf
, INT_H_SIZE
, mhdr
, mhsz
);
226 if (memcpy_fromiovecend(pktpos
, iov
, offset
, pktrem
)) {
236 /* Prepare new fragment: */
237 if (drem
< (pktmax
- INT_H_SIZE
))
238 pktsz
= drem
+ INT_H_SIZE
;
242 buf
= tipc_buf_acquire(pktsz
);
249 msg_set_type(&pkthdr
, FRAGMENT
);
250 msg_set_size(&pkthdr
, pktsz
);
251 msg_set_fragm_no(&pkthdr
, ++pktno
);
252 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
253 pktpos
= buf
->data
+ INT_H_SIZE
;
254 pktrem
= pktsz
- INT_H_SIZE
;
257 TIPC_SKB_CB(*chain
)->chain_sz
= chain_sz
;
258 msg_set_type(buf_msg(buf
), LAST_FRAGMENT
);
261 kfree_skb_list(*chain
);
267 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
268 * @bbuf: the existing buffer ("bundle")
269 * @buf: buffer to be appended
270 * @mtu: max allowable size for the bundle buffer
271 * Consumes buffer if successful
272 * Returns true if bundling could be performed, otherwise false
274 bool tipc_msg_bundle(struct sk_buff
*bbuf
, struct sk_buff
*buf
, u32 mtu
)
276 struct tipc_msg
*bmsg
= buf_msg(bbuf
);
277 struct tipc_msg
*msg
= buf_msg(buf
);
278 unsigned int bsz
= msg_size(bmsg
);
279 unsigned int msz
= msg_size(msg
);
280 u32 start
= align(bsz
);
281 u32 max
= mtu
- INT_H_SIZE
;
282 u32 pad
= start
- bsz
;
284 if (likely(msg_user(msg
) == MSG_FRAGMENTER
))
286 if (unlikely(msg_user(msg
) == CHANGEOVER_PROTOCOL
))
288 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
))
290 if (likely(msg_user(bmsg
) != MSG_BUNDLER
))
292 if (likely(msg_type(bmsg
) != BUNDLE_OPEN
))
294 if (unlikely(skb_tailroom(bbuf
) < (pad
+ msz
)))
296 if (unlikely(max
< (start
+ msz
)))
299 skb_put(bbuf
, pad
+ msz
);
300 skb_copy_to_linear_data_offset(bbuf
, start
, buf
->data
, msz
);
301 msg_set_size(bmsg
, start
+ msz
);
302 msg_set_msgcnt(bmsg
, msg_msgcnt(bmsg
) + 1);
303 bbuf
->next
= buf
->next
;
309 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
310 * @buf: buffer to be appended and replaced
311 * @mtu: max allowable size for the bundle buffer, inclusive header
312 * @dnode: destination node for message. (Not always present in header)
313 * Replaces buffer if successful
314 * Returns true if sucess, otherwise false
316 bool tipc_msg_make_bundle(struct sk_buff
**buf
, u32 mtu
, u32 dnode
)
318 struct sk_buff
*bbuf
;
319 struct tipc_msg
*bmsg
;
320 struct tipc_msg
*msg
= buf_msg(*buf
);
321 u32 msz
= msg_size(msg
);
322 u32 max
= mtu
- INT_H_SIZE
;
324 if (msg_user(msg
) == MSG_FRAGMENTER
)
326 if (msg_user(msg
) == CHANGEOVER_PROTOCOL
)
328 if (msg_user(msg
) == BCAST_PROTOCOL
)
333 bbuf
= tipc_buf_acquire(max
);
337 skb_trim(bbuf
, INT_H_SIZE
);
338 bmsg
= buf_msg(bbuf
);
339 tipc_msg_init(bmsg
, MSG_BUNDLER
, BUNDLE_OPEN
, INT_H_SIZE
, dnode
);
340 msg_set_seqno(bmsg
, msg_seqno(msg
));
341 msg_set_ack(bmsg
, msg_ack(msg
));
342 msg_set_bcast_ack(bmsg
, msg_bcast_ack(msg
));
343 bbuf
->next
= (*buf
)->next
;
344 tipc_msg_bundle(bbuf
, *buf
, mtu
);
350 * tipc_msg_reverse(): swap source and destination addresses and add error code
351 * @buf: buffer containing message to be reversed
352 * @dnode: return value: node where to send message after reversal
353 * @err: error code to be set in message
354 * Consumes buffer if failure
355 * Returns true if success, otherwise false
357 bool tipc_msg_reverse(struct sk_buff
*buf
, u32
*dnode
, int err
)
359 struct tipc_msg
*msg
= buf_msg(buf
);
360 uint imp
= msg_importance(msg
);
361 struct tipc_msg ohdr
;
362 uint rdsz
= min_t(uint
, msg_data_sz(msg
), MAX_FORWARD_SIZE
);
364 if (skb_linearize(buf
))
366 if (msg_dest_droppable(msg
))
368 if (msg_errcode(msg
))
371 memcpy(&ohdr
, msg
, msg_hdr_sz(msg
));
372 imp
= min_t(uint
, imp
+ 1, TIPC_CRITICAL_IMPORTANCE
);
374 msg_set_importance(msg
, imp
);
375 msg_set_errcode(msg
, err
);
376 msg_set_origport(msg
, msg_destport(&ohdr
));
377 msg_set_destport(msg
, msg_origport(&ohdr
));
378 msg_set_prevnode(msg
, tipc_own_addr
);
379 if (!msg_short(msg
)) {
380 msg_set_orignode(msg
, msg_destnode(&ohdr
));
381 msg_set_destnode(msg
, msg_orignode(&ohdr
));
383 msg_set_size(msg
, msg_hdr_sz(msg
) + rdsz
);
384 skb_trim(buf
, msg_size(msg
));
386 *dnode
= msg_orignode(&ohdr
);
394 * tipc_msg_eval: determine fate of message that found no destination
395 * @buf: the buffer containing the message.
396 * @dnode: return value: next-hop node, if message to be forwarded
397 * @err: error code to use, if message to be rejected
399 * Does not consume buffer
400 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
401 * code if message to be rejected
403 int tipc_msg_eval(struct sk_buff
*buf
, u32
*dnode
)
405 struct tipc_msg
*msg
= buf_msg(buf
);
408 if (msg_type(msg
) != TIPC_NAMED_MSG
)
409 return -TIPC_ERR_NO_PORT
;
410 if (skb_linearize(buf
))
411 return -TIPC_ERR_NO_NAME
;
412 if (msg_data_sz(msg
) > MAX_FORWARD_SIZE
)
413 return -TIPC_ERR_NO_NAME
;
414 if (msg_reroute_cnt(msg
) > 0)
415 return -TIPC_ERR_NO_NAME
;
417 *dnode
= addr_domain(msg_lookup_scope(msg
));
418 dport
= tipc_nametbl_translate(msg_nametype(msg
),
422 return -TIPC_ERR_NO_NAME
;
423 msg_incr_reroute_cnt(msg
);
424 msg_set_destnode(msg
, *dnode
);
425 msg_set_destport(msg
, dport
);
429 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
430 * reassemble the clones into one message
432 struct sk_buff
*tipc_msg_reassemble(struct sk_buff
*chain
)
434 struct sk_buff
*buf
= chain
;
435 struct sk_buff
*frag
= buf
;
436 struct sk_buff
*head
= NULL
;
439 /* Copy header if single buffer */
441 hdr_sz
= skb_headroom(buf
) + msg_hdr_sz(buf_msg(buf
));
442 return __pskb_copy(buf
, hdr_sz
, GFP_ATOMIC
);
445 /* Clone all fragments and reassemble */
447 frag
= skb_clone(buf
, GFP_ATOMIC
);
451 if (tipc_buf_append(&head
, &frag
))
459 pr_warn("Failed do clone local mcast rcv buffer\n");