2 * net/tipc/msg.c: TIPC message header routines
4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_table.h"
42 #define MAX_FORWARD_SIZE 1024
44 static unsigned int align(unsigned int i
)
49 void tipc_msg_init(struct tipc_msg
*m
, u32 user
, u32 type
, u32 hsize
,
54 msg_set_user(m
, user
);
55 msg_set_hdr_sz(m
, hsize
);
56 msg_set_size(m
, hsize
);
57 msg_set_prevnode(m
, tipc_own_addr
);
58 msg_set_type(m
, type
);
59 msg_set_orignode(m
, tipc_own_addr
);
60 msg_set_destnode(m
, destnode
);
63 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
64 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
65 * out: set when successful non-complete reassembly, otherwise NULL
66 * @*buf: in: the buffer to append. Always defined
67 * out: head buf after sucessful complete reassembly, otherwise NULL
68 * Returns 1 when reassembly complete, otherwise 0
70 int tipc_buf_append(struct sk_buff
**headbuf
, struct sk_buff
**buf
)
72 struct sk_buff
*head
= *headbuf
;
73 struct sk_buff
*frag
= *buf
;
84 fragid
= msg_type(msg
);
86 skb_pull(frag
, msg_hdr_sz(msg
));
88 if (fragid
== FIRST_FRAGMENT
) {
91 if (unlikely(skb_unclone(frag
, GFP_ATOMIC
)))
93 head
= *headbuf
= frag
;
94 skb_frag_list_init(head
);
95 TIPC_SKB_CB(head
)->tail
= NULL
;
103 if (skb_try_coalesce(head
, frag
, &headstolen
, &delta
)) {
104 kfree_skb_partial(frag
, headstolen
);
106 tail
= TIPC_SKB_CB(head
)->tail
;
107 if (!skb_has_frag_list(head
))
108 skb_shinfo(head
)->frag_list
= frag
;
111 head
->truesize
+= frag
->truesize
;
112 head
->data_len
+= frag
->len
;
113 head
->len
+= frag
->len
;
114 TIPC_SKB_CB(head
)->tail
= frag
;
117 if (fragid
== LAST_FRAGMENT
) {
119 TIPC_SKB_CB(head
)->tail
= NULL
;
127 pr_warn_ratelimited("Unable to build fragment list\n");
130 *buf
= *headbuf
= NULL
;
136 * tipc_msg_build - create buffer chain containing specified header and data
137 * @mhdr: Message header, to be prepended to data
139 * @offset: Posision in iov to start copying from
140 * @dsz: Total length of user data
141 * @pktmax: Max packet size that can be used
142 * @chain: Buffer or chain of buffers to be returned to caller
143 * Returns message data size or errno: -ENOMEM, -EFAULT
145 int tipc_msg_build(struct tipc_msg
*mhdr
, struct iovec
const *iov
,
146 int offset
, int dsz
, int pktmax
, struct sk_buff
**chain
)
148 int mhsz
= msg_hdr_sz(mhdr
);
149 int msz
= mhsz
+ dsz
;
154 struct tipc_msg pkthdr
;
155 struct sk_buff
*buf
, *prev
;
159 msg_set_size(mhdr
, msz
);
161 /* No fragmentation needed? */
162 if (likely(msz
<= pktmax
)) {
163 buf
= tipc_buf_acquire(msz
);
167 skb_copy_to_linear_data(buf
, mhdr
, mhsz
);
168 pktpos
= buf
->data
+ mhsz
;
169 if (!dsz
|| !memcpy_fromiovecend(pktpos
, iov
, offset
, dsz
))
175 /* Prepare reusable fragment header */
176 tipc_msg_init(&pkthdr
, MSG_FRAGMENTER
, FIRST_FRAGMENT
,
177 INT_H_SIZE
, msg_destnode(mhdr
));
178 msg_set_size(&pkthdr
, pktmax
);
179 msg_set_fragm_no(&pkthdr
, pktno
);
181 /* Prepare first fragment */
182 *chain
= buf
= tipc_buf_acquire(pktmax
);
186 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
187 pktpos
+= INT_H_SIZE
;
188 pktrem
-= INT_H_SIZE
;
189 skb_copy_to_linear_data_offset(buf
, INT_H_SIZE
, mhdr
, mhsz
);
197 if (memcpy_fromiovecend(pktpos
, iov
, offset
, pktrem
)) {
207 /* Prepare new fragment: */
208 if (drem
< (pktmax
- INT_H_SIZE
))
209 pktsz
= drem
+ INT_H_SIZE
;
213 buf
= tipc_buf_acquire(pktsz
);
219 msg_set_type(&pkthdr
, FRAGMENT
);
220 msg_set_size(&pkthdr
, pktsz
);
221 msg_set_fragm_no(&pkthdr
, ++pktno
);
222 skb_copy_to_linear_data(buf
, &pkthdr
, INT_H_SIZE
);
223 pktpos
= buf
->data
+ INT_H_SIZE
;
224 pktrem
= pktsz
- INT_H_SIZE
;
228 msg_set_type(buf_msg(buf
), LAST_FRAGMENT
);
231 kfree_skb_list(*chain
);
237 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
238 * @bbuf: the existing buffer ("bundle")
239 * @buf: buffer to be appended
240 * @mtu: max allowable size for the bundle buffer
241 * Consumes buffer if successful
242 * Returns true if bundling could be performed, otherwise false
244 bool tipc_msg_bundle(struct sk_buff
*bbuf
, struct sk_buff
*buf
, u32 mtu
)
246 struct tipc_msg
*bmsg
= buf_msg(bbuf
);
247 struct tipc_msg
*msg
= buf_msg(buf
);
248 unsigned int bsz
= msg_size(bmsg
);
249 unsigned int msz
= msg_size(msg
);
250 u32 start
= align(bsz
);
251 u32 max
= mtu
- INT_H_SIZE
;
252 u32 pad
= start
- bsz
;
254 if (likely(msg_user(msg
) == MSG_FRAGMENTER
))
256 if (unlikely(msg_user(msg
) == CHANGEOVER_PROTOCOL
))
258 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
))
260 if (likely(msg_user(bmsg
) != MSG_BUNDLER
))
262 if (likely(msg_type(bmsg
) != BUNDLE_OPEN
))
264 if (unlikely(skb_tailroom(bbuf
) < (pad
+ msz
)))
266 if (unlikely(max
< (start
+ msz
)))
269 skb_put(bbuf
, pad
+ msz
);
270 skb_copy_to_linear_data_offset(bbuf
, start
, buf
->data
, msz
);
271 msg_set_size(bmsg
, start
+ msz
);
272 msg_set_msgcnt(bmsg
, msg_msgcnt(bmsg
) + 1);
273 bbuf
->next
= buf
->next
;
279 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
280 * @buf: buffer to be appended and replaced
281 * @mtu: max allowable size for the bundle buffer, inclusive header
282 * @dnode: destination node for message. (Not always present in header)
283 * Replaces buffer if successful
284 * Returns true if sucess, otherwise false
286 bool tipc_msg_make_bundle(struct sk_buff
**buf
, u32 mtu
, u32 dnode
)
288 struct sk_buff
*bbuf
;
289 struct tipc_msg
*bmsg
;
290 struct tipc_msg
*msg
= buf_msg(*buf
);
291 u32 msz
= msg_size(msg
);
292 u32 max
= mtu
- INT_H_SIZE
;
294 if (msg_user(msg
) == MSG_FRAGMENTER
)
296 if (msg_user(msg
) == CHANGEOVER_PROTOCOL
)
298 if (msg_user(msg
) == BCAST_PROTOCOL
)
303 bbuf
= tipc_buf_acquire(max
);
307 skb_trim(bbuf
, INT_H_SIZE
);
308 bmsg
= buf_msg(bbuf
);
309 tipc_msg_init(bmsg
, MSG_BUNDLER
, BUNDLE_OPEN
, INT_H_SIZE
, dnode
);
310 msg_set_seqno(bmsg
, msg_seqno(msg
));
311 msg_set_ack(bmsg
, msg_ack(msg
));
312 msg_set_bcast_ack(bmsg
, msg_bcast_ack(msg
));
313 bbuf
->next
= (*buf
)->next
;
314 tipc_msg_bundle(bbuf
, *buf
, mtu
);
320 * tipc_msg_reverse(): swap source and destination addresses and add error code
321 * @buf: buffer containing message to be reversed
322 * @dnode: return value: node where to send message after reversal
323 * @err: error code to be set in message
324 * Consumes buffer if failure
325 * Returns true if success, otherwise false
327 bool tipc_msg_reverse(struct sk_buff
*buf
, u32
*dnode
, int err
)
329 struct tipc_msg
*msg
= buf_msg(buf
);
330 uint imp
= msg_importance(msg
);
331 struct tipc_msg ohdr
;
332 uint rdsz
= min_t(uint
, msg_data_sz(msg
), MAX_FORWARD_SIZE
);
334 if (skb_linearize(buf
))
336 if (msg_dest_droppable(msg
))
338 if (msg_errcode(msg
))
341 memcpy(&ohdr
, msg
, msg_hdr_sz(msg
));
342 imp
= min_t(uint
, imp
+ 1, TIPC_CRITICAL_IMPORTANCE
);
344 msg_set_importance(msg
, imp
);
345 msg_set_errcode(msg
, err
);
346 msg_set_origport(msg
, msg_destport(&ohdr
));
347 msg_set_destport(msg
, msg_origport(&ohdr
));
348 msg_set_prevnode(msg
, tipc_own_addr
);
349 if (!msg_short(msg
)) {
350 msg_set_orignode(msg
, msg_destnode(&ohdr
));
351 msg_set_destnode(msg
, msg_orignode(&ohdr
));
353 msg_set_size(msg
, msg_hdr_sz(msg
) + rdsz
);
354 skb_trim(buf
, msg_size(msg
));
356 *dnode
= msg_orignode(&ohdr
);
364 * tipc_msg_eval: determine fate of message that found no destination
365 * @buf: the buffer containing the message.
366 * @dnode: return value: next-hop node, if message to be forwarded
367 * @err: error code to use, if message to be rejected
369 * Does not consume buffer
370 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
371 * code if message to be rejected
373 int tipc_msg_eval(struct sk_buff
*buf
, u32
*dnode
)
375 struct tipc_msg
*msg
= buf_msg(buf
);
378 if (msg_type(msg
) != TIPC_NAMED_MSG
)
379 return -TIPC_ERR_NO_PORT
;
380 if (skb_linearize(buf
))
381 return -TIPC_ERR_NO_NAME
;
382 if (msg_data_sz(msg
) > MAX_FORWARD_SIZE
)
383 return -TIPC_ERR_NO_NAME
;
384 if (msg_reroute_cnt(msg
) > 0)
385 return -TIPC_ERR_NO_NAME
;
387 *dnode
= addr_domain(msg_lookup_scope(msg
));
388 dport
= tipc_nametbl_translate(msg_nametype(msg
),
392 return -TIPC_ERR_NO_NAME
;
393 msg_incr_reroute_cnt(msg
);
394 msg_set_destnode(msg
, *dnode
);
395 msg_set_destport(msg
, dport
);
399 /* tipc_msg_reassemble() - clone a buffer chain of fragments and
400 * reassemble the clones into one message
402 struct sk_buff
*tipc_msg_reassemble(struct sk_buff
*chain
)
404 struct sk_buff
*buf
= chain
;
405 struct sk_buff
*frag
= buf
;
406 struct sk_buff
*head
= NULL
;
409 /* Copy header if single buffer */
411 hdr_sz
= skb_headroom(buf
) + msg_hdr_sz(buf_msg(buf
));
412 return __pskb_copy(buf
, hdr_sz
, GFP_ATOMIC
);
415 /* Clone all fragments and reassemble */
417 frag
= skb_clone(buf
, GFP_ATOMIC
);
421 if (tipc_buf_append(&head
, &frag
))
429 pr_warn("Failed do clone local mcast rcv buffer\n");