xref: /kernel/linux/linux-5.10/net/tipc/msg.c (revision 8c2ecf20)
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 *    contributors may be used to endorse or promote products derived from
18 *    this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/sock.h>
38#include "core.h"
39#include "msg.h"
40#include "addr.h"
41#include "name_table.h"
42#include "crypto.h"
43
44#define MAX_FORWARD_SIZE 1024
45#ifdef CONFIG_TIPC_CRYPTO
46#define BUF_HEADROOM ALIGN(((LL_MAX_HEADER + 48) + EHDR_MAX_SIZE), 16)
47#define BUF_OVERHEAD (BUF_HEADROOM + TIPC_AES_GCM_TAG_SIZE)
48#else
49#define BUF_HEADROOM (LL_MAX_HEADER + 48)
50#define BUF_OVERHEAD BUF_HEADROOM
51#endif
52
53const int one_page_mtu = PAGE_SIZE - SKB_DATA_ALIGN(BUF_OVERHEAD) -
54			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
55
56static unsigned int align(unsigned int i)
57{
58	return (i + 3) & ~3u;
59}
60
61/**
62 * tipc_buf_acquire - creates a TIPC message buffer
63 * @size: message size (including TIPC header)
64 *
65 * Returns a new buffer with data pointers set to the specified size.
66 *
67 * NOTE: Headroom is reserved to allow prepending of a data link header.
68 *       There may also be unrequested tailroom present at the buffer's end.
69 */
70struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
71{
72	struct sk_buff *skb;
73
74	skb = alloc_skb_fclone(BUF_OVERHEAD + size, gfp);
75	if (skb) {
76		skb_reserve(skb, BUF_HEADROOM);
77		skb_put(skb, size);
78		skb->next = NULL;
79	}
80	return skb;
81}
82
83void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
84		   u32 hsize, u32 dnode)
85{
86	memset(m, 0, hsize);
87	msg_set_version(m);
88	msg_set_user(m, user);
89	msg_set_hdr_sz(m, hsize);
90	msg_set_size(m, hsize);
91	msg_set_prevnode(m, own_node);
92	msg_set_type(m, type);
93	if (hsize > SHORT_H_SIZE) {
94		msg_set_orignode(m, own_node);
95		msg_set_destnode(m, dnode);
96	}
97}
98
99struct sk_buff *tipc_msg_create(uint user, uint type,
100				uint hdr_sz, uint data_sz, u32 dnode,
101				u32 onode, u32 dport, u32 oport, int errcode)
102{
103	struct tipc_msg *msg;
104	struct sk_buff *buf;
105
106	buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
107	if (unlikely(!buf))
108		return NULL;
109
110	msg = buf_msg(buf);
111	tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
112	msg_set_size(msg, hdr_sz + data_sz);
113	msg_set_origport(msg, oport);
114	msg_set_destport(msg, dport);
115	msg_set_errcode(msg, errcode);
116	if (hdr_sz > SHORT_H_SIZE) {
117		msg_set_orignode(msg, onode);
118		msg_set_destnode(msg, dnode);
119	}
120	return buf;
121}
122
123/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
124 * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
125 *            out: set when successful non-complete reassembly, otherwise NULL
126 * @*buf:     in:  the buffer to append. Always defined
127 *            out: head buf after successful complete reassembly, otherwise NULL
128 * Returns 1 when reassembly complete, otherwise 0
129 */
130int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
131{
132	struct sk_buff *head = *headbuf;
133	struct sk_buff *frag = *buf;
134	struct sk_buff *tail = NULL;
135	struct tipc_msg *msg;
136	u32 fragid;
137	int delta;
138	bool headstolen;
139
140	if (!frag)
141		goto err;
142
143	msg = buf_msg(frag);
144	fragid = msg_type(msg);
145	frag->next = NULL;
146	skb_pull(frag, msg_hdr_sz(msg));
147
148	if (fragid == FIRST_FRAGMENT) {
149		if (unlikely(head))
150			goto err;
151		*buf = NULL;
152		if (skb_has_frag_list(frag) && __skb_linearize(frag))
153			goto err;
154		frag = skb_unshare(frag, GFP_ATOMIC);
155		if (unlikely(!frag))
156			goto err;
157		head = *headbuf = frag;
158		TIPC_SKB_CB(head)->tail = NULL;
159		return 0;
160	}
161
162	if (!head)
163		goto err;
164
165	if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
166		kfree_skb_partial(frag, headstolen);
167	} else {
168		tail = TIPC_SKB_CB(head)->tail;
169		if (!skb_has_frag_list(head))
170			skb_shinfo(head)->frag_list = frag;
171		else
172			tail->next = frag;
173		head->truesize += frag->truesize;
174		head->data_len += frag->len;
175		head->len += frag->len;
176		TIPC_SKB_CB(head)->tail = frag;
177	}
178
179	if (fragid == LAST_FRAGMENT) {
180		TIPC_SKB_CB(head)->validated = 0;
181		if (unlikely(!tipc_msg_validate(&head)))
182			goto err;
183		*buf = head;
184		TIPC_SKB_CB(head)->tail = NULL;
185		*headbuf = NULL;
186		return 1;
187	}
188	*buf = NULL;
189	return 0;
190err:
191	kfree_skb(*buf);
192	kfree_skb(*headbuf);
193	*buf = *headbuf = NULL;
194	return 0;
195}
196
197/**
198 * tipc_msg_append(): Append data to tail of an existing buffer queue
199 * @_hdr: header to be used
200 * @m: the data to be appended
201 * @mss: max allowable size of buffer
202 * @dlen: size of data to be appended
203 * @txq: queue to appand to
204 * Returns the number og 1k blocks appended or errno value
205 */
206int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen,
207		    int mss, struct sk_buff_head *txq)
208{
209	struct sk_buff *skb;
210	int accounted, total, curr;
211	int mlen, cpy, rem = dlen;
212	struct tipc_msg *hdr;
213
214	skb = skb_peek_tail(txq);
215	accounted = skb ? msg_blocks(buf_msg(skb)) : 0;
216	total = accounted;
217
218	do {
219		if (!skb || skb->len >= mss) {
220			skb = tipc_buf_acquire(mss, GFP_KERNEL);
221			if (unlikely(!skb))
222				return -ENOMEM;
223			skb_orphan(skb);
224			skb_trim(skb, MIN_H_SIZE);
225			hdr = buf_msg(skb);
226			skb_copy_to_linear_data(skb, _hdr, MIN_H_SIZE);
227			msg_set_hdr_sz(hdr, MIN_H_SIZE);
228			msg_set_size(hdr, MIN_H_SIZE);
229			__skb_queue_tail(txq, skb);
230			total += 1;
231		}
232		hdr = buf_msg(skb);
233		curr = msg_blocks(hdr);
234		mlen = msg_size(hdr);
235		cpy = min_t(size_t, rem, mss - mlen);
236		if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter))
237			return -EFAULT;
238		msg_set_size(hdr, mlen + cpy);
239		skb_put(skb, cpy);
240		rem -= cpy;
241		total += msg_blocks(hdr) - curr;
242	} while (rem > 0);
243	return total - accounted;
244}
245
246/* tipc_msg_validate - validate basic format of received message
247 *
248 * This routine ensures a TIPC message has an acceptable header, and at least
249 * as much data as the header indicates it should.  The routine also ensures
250 * that the entire message header is stored in the main fragment of the message
251 * buffer, to simplify future access to message header fields.
252 *
253 * Note: Having extra info present in the message header or data areas is OK.
254 * TIPC will ignore the excess, under the assumption that it is optional info
255 * introduced by a later release of the protocol.
256 */
257bool tipc_msg_validate(struct sk_buff **_skb)
258{
259	struct sk_buff *skb = *_skb;
260	struct tipc_msg *hdr;
261	int msz, hsz;
262
263	/* Ensure that flow control ratio condition is satisfied */
264	if (unlikely(skb->truesize / buf_roundup_len(skb) >= 4)) {
265		skb = skb_copy_expand(skb, BUF_HEADROOM, 0, GFP_ATOMIC);
266		if (!skb)
267			return false;
268		kfree_skb(*_skb);
269		*_skb = skb;
270	}
271
272	if (unlikely(TIPC_SKB_CB(skb)->validated))
273		return true;
274
275	if (unlikely(!pskb_may_pull(skb, MIN_H_SIZE)))
276		return false;
277
278	hsz = msg_hdr_sz(buf_msg(skb));
279	if (unlikely(hsz < MIN_H_SIZE) || (hsz > MAX_H_SIZE))
280		return false;
281	if (unlikely(!pskb_may_pull(skb, hsz)))
282		return false;
283
284	hdr = buf_msg(skb);
285	if (unlikely(msg_version(hdr) != TIPC_VERSION))
286		return false;
287
288	msz = msg_size(hdr);
289	if (unlikely(msz < hsz))
290		return false;
291	if (unlikely((msz - hsz) > TIPC_MAX_USER_MSG_SIZE))
292		return false;
293	if (unlikely(skb->len < msz))
294		return false;
295
296	TIPC_SKB_CB(skb)->validated = 1;
297	return true;
298}
299
300/**
301 * tipc_msg_fragment - build a fragment skb list for TIPC message
302 *
303 * @skb: TIPC message skb
304 * @hdr: internal msg header to be put on the top of the fragments
305 * @pktmax: max size of a fragment incl. the header
306 * @frags: returned fragment skb list
307 *
308 * Returns 0 if the fragmentation is successful, otherwise: -EINVAL
309 * or -ENOMEM
310 */
311int tipc_msg_fragment(struct sk_buff *skb, const struct tipc_msg *hdr,
312		      int pktmax, struct sk_buff_head *frags)
313{
314	int pktno, nof_fragms, dsz, dmax, eat;
315	struct tipc_msg *_hdr;
316	struct sk_buff *_skb;
317	u8 *data;
318
319	/* Non-linear buffer? */
320	if (skb_linearize(skb))
321		return -ENOMEM;
322
323	data = (u8 *)skb->data;
324	dsz = msg_size(buf_msg(skb));
325	dmax = pktmax - INT_H_SIZE;
326	if (dsz <= dmax || !dmax)
327		return -EINVAL;
328
329	nof_fragms = dsz / dmax + 1;
330	for (pktno = 1; pktno <= nof_fragms; pktno++) {
331		if (pktno < nof_fragms)
332			eat = dmax;
333		else
334			eat = dsz % dmax;
335		/* Allocate a new fragment */
336		_skb = tipc_buf_acquire(INT_H_SIZE + eat, GFP_ATOMIC);
337		if (!_skb)
338			goto error;
339		skb_orphan(_skb);
340		__skb_queue_tail(frags, _skb);
341		/* Copy header & data to the fragment */
342		skb_copy_to_linear_data(_skb, hdr, INT_H_SIZE);
343		skb_copy_to_linear_data_offset(_skb, INT_H_SIZE, data, eat);
344		data += eat;
345		/* Update the fragment's header */
346		_hdr = buf_msg(_skb);
347		msg_set_fragm_no(_hdr, pktno);
348		msg_set_nof_fragms(_hdr, nof_fragms);
349		msg_set_size(_hdr, INT_H_SIZE + eat);
350	}
351	return 0;
352
353error:
354	__skb_queue_purge(frags);
355	__skb_queue_head_init(frags);
356	return -ENOMEM;
357}
358
359/**
360 * tipc_msg_build - create buffer chain containing specified header and data
361 * @mhdr: Message header, to be prepended to data
362 * @m: User message
363 * @dsz: Total length of user data
364 * @pktmax: Max packet size that can be used
365 * @list: Buffer or chain of buffers to be returned to caller
366 *
367 * Note that the recursive call we are making here is safe, since it can
368 * logically go only one further level down.
369 *
370 * Returns message data size or errno: -ENOMEM, -EFAULT
371 */
372int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
373		   int dsz, int pktmax, struct sk_buff_head *list)
374{
375	int mhsz = msg_hdr_sz(mhdr);
376	struct tipc_msg pkthdr;
377	int msz = mhsz + dsz;
378	int pktrem = pktmax;
379	struct sk_buff *skb;
380	int drem = dsz;
381	int pktno = 1;
382	char *pktpos;
383	int pktsz;
384	int rc;
385
386	msg_set_size(mhdr, msz);
387
388	/* No fragmentation needed? */
389	if (likely(msz <= pktmax)) {
390		skb = tipc_buf_acquire(msz, GFP_KERNEL);
391
392		/* Fall back to smaller MTU if node local message */
393		if (unlikely(!skb)) {
394			if (pktmax != MAX_MSG_SIZE)
395				return -ENOMEM;
396			rc = tipc_msg_build(mhdr, m, offset, dsz,
397					    one_page_mtu, list);
398			if (rc != dsz)
399				return rc;
400			if (tipc_msg_assemble(list))
401				return dsz;
402			return -ENOMEM;
403		}
404		skb_orphan(skb);
405		__skb_queue_tail(list, skb);
406		skb_copy_to_linear_data(skb, mhdr, mhsz);
407		pktpos = skb->data + mhsz;
408		if (copy_from_iter_full(pktpos, dsz, &m->msg_iter))
409			return dsz;
410		rc = -EFAULT;
411		goto error;
412	}
413
414	/* Prepare reusable fragment header */
415	tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
416		      FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
417	msg_set_size(&pkthdr, pktmax);
418	msg_set_fragm_no(&pkthdr, pktno);
419	msg_set_importance(&pkthdr, msg_importance(mhdr));
420
421	/* Prepare first fragment */
422	skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
423	if (!skb)
424		return -ENOMEM;
425	skb_orphan(skb);
426	__skb_queue_tail(list, skb);
427	pktpos = skb->data;
428	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
429	pktpos += INT_H_SIZE;
430	pktrem -= INT_H_SIZE;
431	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
432	pktpos += mhsz;
433	pktrem -= mhsz;
434
435	do {
436		if (drem < pktrem)
437			pktrem = drem;
438
439		if (!copy_from_iter_full(pktpos, pktrem, &m->msg_iter)) {
440			rc = -EFAULT;
441			goto error;
442		}
443		drem -= pktrem;
444
445		if (!drem)
446			break;
447
448		/* Prepare new fragment: */
449		if (drem < (pktmax - INT_H_SIZE))
450			pktsz = drem + INT_H_SIZE;
451		else
452			pktsz = pktmax;
453		skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
454		if (!skb) {
455			rc = -ENOMEM;
456			goto error;
457		}
458		skb_orphan(skb);
459		__skb_queue_tail(list, skb);
460		msg_set_type(&pkthdr, FRAGMENT);
461		msg_set_size(&pkthdr, pktsz);
462		msg_set_fragm_no(&pkthdr, ++pktno);
463		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
464		pktpos = skb->data + INT_H_SIZE;
465		pktrem = pktsz - INT_H_SIZE;
466
467	} while (1);
468	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
469	return dsz;
470error:
471	__skb_queue_purge(list);
472	__skb_queue_head_init(list);
473	return rc;
474}
475
476/**
477 * tipc_msg_bundle - Append contents of a buffer to tail of an existing one
478 * @bskb: the bundle buffer to append to
479 * @msg: message to be appended
480 * @max: max allowable size for the bundle buffer
481 *
482 * Returns "true" if bundling has been performed, otherwise "false"
483 */
484static bool tipc_msg_bundle(struct sk_buff *bskb, struct tipc_msg *msg,
485			    u32 max)
486{
487	struct tipc_msg *bmsg = buf_msg(bskb);
488	u32 msz, bsz, offset, pad;
489
490	msz = msg_size(msg);
491	bsz = msg_size(bmsg);
492	offset = align(bsz);
493	pad = offset - bsz;
494
495	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
496		return false;
497	if (unlikely(max < (offset + msz)))
498		return false;
499
500	skb_put(bskb, pad + msz);
501	skb_copy_to_linear_data_offset(bskb, offset, msg, msz);
502	msg_set_size(bmsg, offset + msz);
503	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
504	return true;
505}
506
507/**
508 * tipc_msg_try_bundle - Try to bundle a new message to the last one
509 * @tskb: the last/target message to which the new one will be appended
510 * @skb: the new message skb pointer
511 * @mss: max message size (header inclusive)
512 * @dnode: destination node for the message
513 * @new_bundle: if this call made a new bundle or not
514 *
515 * Return: "true" if the new message skb is potential for bundling this time or
516 * later, in the case a bundling has been done this time, the skb is consumed
517 * (the skb pointer = NULL).
518 * Otherwise, "false" if the skb cannot be bundled at all.
519 */
520bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss,
521			 u32 dnode, bool *new_bundle)
522{
523	struct tipc_msg *msg, *inner, *outer;
524	u32 tsz;
525
526	/* First, check if the new buffer is suitable for bundling */
527	msg = buf_msg(*skb);
528	if (msg_user(msg) == MSG_FRAGMENTER)
529		return false;
530	if (msg_user(msg) == TUNNEL_PROTOCOL)
531		return false;
532	if (msg_user(msg) == BCAST_PROTOCOL)
533		return false;
534	if (mss <= INT_H_SIZE + msg_size(msg))
535		return false;
536
537	/* Ok, but the last/target buffer can be empty? */
538	if (unlikely(!tskb))
539		return true;
540
541	/* Is it a bundle already? Try to bundle the new message to it */
542	if (msg_user(buf_msg(tskb)) == MSG_BUNDLER) {
543		*new_bundle = false;
544		goto bundle;
545	}
546
547	/* Make a new bundle of the two messages if possible */
548	tsz = msg_size(buf_msg(tskb));
549	if (unlikely(mss < align(INT_H_SIZE + tsz) + msg_size(msg)))
550		return true;
551	if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE,
552				      GFP_ATOMIC)))
553		return true;
554	inner = buf_msg(tskb);
555	skb_push(tskb, INT_H_SIZE);
556	outer = buf_msg(tskb);
557	tipc_msg_init(msg_prevnode(inner), outer, MSG_BUNDLER, 0, INT_H_SIZE,
558		      dnode);
559	msg_set_importance(outer, msg_importance(inner));
560	msg_set_size(outer, INT_H_SIZE + tsz);
561	msg_set_msgcnt(outer, 1);
562	*new_bundle = true;
563
564bundle:
565	if (likely(tipc_msg_bundle(tskb, msg, mss))) {
566		consume_skb(*skb);
567		*skb = NULL;
568	}
569	return true;
570}
571
572/**
573 *  tipc_msg_extract(): extract bundled inner packet from buffer
574 *  @skb: buffer to be extracted from.
575 *  @iskb: extracted inner buffer, to be returned
576 *  @pos: position in outer message of msg to be extracted.
577 *        Returns position of next msg
578 *  Consumes outer buffer when last packet extracted
579 *  Returns true when there is an extracted buffer, otherwise false
580 */
581bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
582{
583	struct tipc_msg *hdr, *ihdr;
584	int imsz;
585
586	*iskb = NULL;
587	if (unlikely(skb_linearize(skb)))
588		goto none;
589
590	hdr = buf_msg(skb);
591	if (unlikely(*pos > (msg_data_sz(hdr) - MIN_H_SIZE)))
592		goto none;
593
594	ihdr = (struct tipc_msg *)(msg_data(hdr) + *pos);
595	imsz = msg_size(ihdr);
596
597	if ((*pos + imsz) > msg_data_sz(hdr))
598		goto none;
599
600	*iskb = tipc_buf_acquire(imsz, GFP_ATOMIC);
601	if (!*iskb)
602		goto none;
603
604	skb_copy_to_linear_data(*iskb, ihdr, imsz);
605	if (unlikely(!tipc_msg_validate(iskb)))
606		goto none;
607
608	*pos += align(imsz);
609	return true;
610none:
611	kfree_skb(skb);
612	kfree_skb(*iskb);
613	*iskb = NULL;
614	return false;
615}
616
617/**
618 * tipc_msg_reverse(): swap source and destination addresses and add error code
619 * @own_node: originating node id for reversed message
620 * @skb:  buffer containing message to be reversed; will be consumed
621 * @err:  error code to be set in message, if any
622 * Replaces consumed buffer with new one when successful
623 * Returns true if success, otherwise false
624 */
625bool tipc_msg_reverse(u32 own_node,  struct sk_buff **skb, int err)
626{
627	struct sk_buff *_skb = *skb;
628	struct tipc_msg *_hdr, *hdr;
629	int hlen, dlen;
630
631	if (skb_linearize(_skb))
632		goto exit;
633	_hdr = buf_msg(_skb);
634	dlen = min_t(uint, msg_data_sz(_hdr), MAX_FORWARD_SIZE);
635	hlen = msg_hdr_sz(_hdr);
636
637	if (msg_dest_droppable(_hdr))
638		goto exit;
639	if (msg_errcode(_hdr))
640		goto exit;
641
642	/* Never return SHORT header */
643	if (hlen == SHORT_H_SIZE)
644		hlen = BASIC_H_SIZE;
645
646	/* Don't return data along with SYN+, - sender has a clone */
647	if (msg_is_syn(_hdr) && err == TIPC_ERR_OVERLOAD)
648		dlen = 0;
649
650	/* Allocate new buffer to return */
651	*skb = tipc_buf_acquire(hlen + dlen, GFP_ATOMIC);
652	if (!*skb)
653		goto exit;
654	memcpy((*skb)->data, _skb->data, msg_hdr_sz(_hdr));
655	memcpy((*skb)->data + hlen, msg_data(_hdr), dlen);
656
657	/* Build reverse header in new buffer */
658	hdr = buf_msg(*skb);
659	msg_set_hdr_sz(hdr, hlen);
660	msg_set_errcode(hdr, err);
661	msg_set_non_seq(hdr, 0);
662	msg_set_origport(hdr, msg_destport(_hdr));
663	msg_set_destport(hdr, msg_origport(_hdr));
664	msg_set_destnode(hdr, msg_prevnode(_hdr));
665	msg_set_prevnode(hdr, own_node);
666	msg_set_orignode(hdr, own_node);
667	msg_set_size(hdr, hlen + dlen);
668	skb_orphan(_skb);
669	kfree_skb(_skb);
670	return true;
671exit:
672	kfree_skb(_skb);
673	*skb = NULL;
674	return false;
675}
676
677bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy)
678{
679	struct sk_buff *skb, *_skb;
680
681	skb_queue_walk(msg, skb) {
682		_skb = skb_clone(skb, GFP_ATOMIC);
683		if (!_skb) {
684			__skb_queue_purge(cpy);
685			pr_err_ratelimited("Failed to clone buffer chain\n");
686			return false;
687		}
688		__skb_queue_tail(cpy, _skb);
689	}
690	return true;
691}
692
693/**
694 * tipc_msg_lookup_dest(): try to find new destination for named message
695 * @skb: the buffer containing the message.
696 * @err: error code to be used by caller if lookup fails
697 * Does not consume buffer
698 * Returns true if a destination is found, false otherwise
699 */
700bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err)
701{
702	struct tipc_msg *msg = buf_msg(skb);
703	u32 dport, dnode;
704	u32 onode = tipc_own_addr(net);
705
706	if (!msg_isdata(msg))
707		return false;
708	if (!msg_named(msg))
709		return false;
710	if (msg_errcode(msg))
711		return false;
712	*err = TIPC_ERR_NO_NAME;
713	if (skb_linearize(skb))
714		return false;
715	msg = buf_msg(skb);
716	if (msg_reroute_cnt(msg))
717		return false;
718	dnode = tipc_scope2node(net, msg_lookup_scope(msg));
719	dport = tipc_nametbl_translate(net, msg_nametype(msg),
720				       msg_nameinst(msg), &dnode);
721	if (!dport)
722		return false;
723	msg_incr_reroute_cnt(msg);
724	if (dnode != onode)
725		msg_set_prevnode(msg, onode);
726	msg_set_destnode(msg, dnode);
727	msg_set_destport(msg, dport);
728	*err = TIPC_OK;
729
730	return true;
731}
732
733/* tipc_msg_assemble() - assemble chain of fragments into one message
734 */
735bool tipc_msg_assemble(struct sk_buff_head *list)
736{
737	struct sk_buff *skb, *tmp = NULL;
738
739	if (skb_queue_len(list) == 1)
740		return true;
741
742	while ((skb = __skb_dequeue(list))) {
743		skb->next = NULL;
744		if (tipc_buf_append(&tmp, &skb)) {
745			__skb_queue_tail(list, skb);
746			return true;
747		}
748		if (!tmp)
749			break;
750	}
751	__skb_queue_purge(list);
752	__skb_queue_head_init(list);
753	pr_warn("Failed do assemble buffer\n");
754	return false;
755}
756
757/* tipc_msg_reassemble() - clone a buffer chain of fragments and
758 *                         reassemble the clones into one message
759 */
760bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq)
761{
762	struct sk_buff *skb, *_skb;
763	struct sk_buff *frag = NULL;
764	struct sk_buff *head = NULL;
765	int hdr_len;
766
767	/* Copy header if single buffer */
768	if (skb_queue_len(list) == 1) {
769		skb = skb_peek(list);
770		hdr_len = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
771		_skb = __pskb_copy(skb, hdr_len, GFP_ATOMIC);
772		if (!_skb)
773			return false;
774		__skb_queue_tail(rcvq, _skb);
775		return true;
776	}
777
778	/* Clone all fragments and reassemble */
779	skb_queue_walk(list, skb) {
780		frag = skb_clone(skb, GFP_ATOMIC);
781		if (!frag)
782			goto error;
783		frag->next = NULL;
784		if (tipc_buf_append(&head, &frag))
785			break;
786		if (!head)
787			goto error;
788	}
789	__skb_queue_tail(rcvq, frag);
790	return true;
791error:
792	pr_warn("Failed do clone local mcast rcv buffer\n");
793	kfree_skb(head);
794	return false;
795}
796
797bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
798			struct sk_buff_head *cpy)
799{
800	struct sk_buff *skb, *_skb;
801
802	skb_queue_walk(msg, skb) {
803		_skb = pskb_copy(skb, GFP_ATOMIC);
804		if (!_skb) {
805			__skb_queue_purge(cpy);
806			return false;
807		}
808		msg_set_destnode(buf_msg(_skb), dst);
809		__skb_queue_tail(cpy, _skb);
810	}
811	return true;
812}
813
814/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
815 * @list: list to be appended to
816 * @seqno: sequence number of buffer to add
817 * @skb: buffer to add
818 */
819bool __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
820			     struct sk_buff *skb)
821{
822	struct sk_buff *_skb, *tmp;
823
824	if (skb_queue_empty(list) || less(seqno, buf_seqno(skb_peek(list)))) {
825		__skb_queue_head(list, skb);
826		return true;
827	}
828
829	if (more(seqno, buf_seqno(skb_peek_tail(list)))) {
830		__skb_queue_tail(list, skb);
831		return true;
832	}
833
834	skb_queue_walk_safe(list, _skb, tmp) {
835		if (more(seqno, buf_seqno(_skb)))
836			continue;
837		if (seqno == buf_seqno(_skb))
838			break;
839		__skb_queue_before(list, _skb, skb);
840		return true;
841	}
842	kfree_skb(skb);
843	return false;
844}
845
846void tipc_skb_reject(struct net *net, int err, struct sk_buff *skb,
847		     struct sk_buff_head *xmitq)
848{
849	if (tipc_msg_reverse(tipc_own_addr(net), &skb, err))
850		__skb_queue_tail(xmitq, skb);
851}
852