xref: /kernel/linux/linux-5.10/net/sctp/ulpqueue.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 Nokia, Inc.
8 * Copyright (c) 2001 La Monte H.P. Yarroll
9 *
10 * This abstraction carries sctp events to the ULP (sockets).
11 *
12 * Please send any bug reports or fixes you make to the
13 * email address(es):
14 *    lksctp developers <linux-sctp@vger.kernel.org>
15 *
16 * Written or modified by:
17 *    Jon Grimm             <jgrimm@us.ibm.com>
18 *    La Monte H.P. Yarroll <piggy@acm.org>
19 *    Sridhar Samudrala     <sri@us.ibm.com>
20 */
21
22#include <linux/slab.h>
23#include <linux/types.h>
24#include <linux/skbuff.h>
25#include <net/sock.h>
26#include <net/busy_poll.h>
27#include <net/sctp/structs.h>
28#include <net/sctp/sctp.h>
29#include <net/sctp/sm.h>
30
31/* Forward declarations for internal helpers.  */
32static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
33					      struct sctp_ulpevent *);
34static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
35					      struct sctp_ulpevent *);
36static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
37
38/* 1st Level Abstractions */
39
40/* Initialize a ULP queue from a block of memory.  */
41struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
42				 struct sctp_association *asoc)
43{
44	memset(ulpq, 0, sizeof(struct sctp_ulpq));
45
46	ulpq->asoc = asoc;
47	skb_queue_head_init(&ulpq->reasm);
48	skb_queue_head_init(&ulpq->reasm_uo);
49	skb_queue_head_init(&ulpq->lobby);
50	ulpq->pd_mode  = 0;
51
52	return ulpq;
53}
54
55
56/* Flush the reassembly and ordering queues.  */
57void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
58{
59	struct sk_buff *skb;
60	struct sctp_ulpevent *event;
61
62	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
63		event = sctp_skb2event(skb);
64		sctp_ulpevent_free(event);
65	}
66
67	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
68		event = sctp_skb2event(skb);
69		sctp_ulpevent_free(event);
70	}
71
72	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
73		event = sctp_skb2event(skb);
74		sctp_ulpevent_free(event);
75	}
76}
77
78/* Dispose of a ulpqueue.  */
79void sctp_ulpq_free(struct sctp_ulpq *ulpq)
80{
81	sctp_ulpq_flush(ulpq);
82}
83
84/* Process an incoming DATA chunk.  */
85int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
86			gfp_t gfp)
87{
88	struct sk_buff_head temp;
89	struct sctp_ulpevent *event;
90	int event_eor = 0;
91
92	/* Create an event from the incoming chunk. */
93	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
94	if (!event)
95		return -ENOMEM;
96
97	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
98	event->ppid = chunk->subh.data_hdr->ppid;
99
100	/* Do reassembly if needed.  */
101	event = sctp_ulpq_reasm(ulpq, event);
102
103	/* Do ordering if needed.  */
104	if (event) {
105		/* Create a temporary list to collect chunks on.  */
106		skb_queue_head_init(&temp);
107		__skb_queue_tail(&temp, sctp_event2skb(event));
108
109		if (event->msg_flags & MSG_EOR)
110			event = sctp_ulpq_order(ulpq, event);
111	}
112
113	/* Send event to the ULP.  'event' is the sctp_ulpevent for
114	 * very first SKB on the 'temp' list.
115	 */
116	if (event) {
117		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
118		sctp_ulpq_tail_event(ulpq, &temp);
119	}
120
121	return event_eor;
122}
123
124/* Add a new event for propagation to the ULP.  */
125/* Clear the partial delivery mode for this socket.   Note: This
126 * assumes that no association is currently in partial delivery mode.
127 */
128int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
129{
130	struct sctp_sock *sp = sctp_sk(sk);
131
132	if (atomic_dec_and_test(&sp->pd_mode)) {
133		/* This means there are no other associations in PD, so
134		 * we can go ahead and clear out the lobby in one shot
135		 */
136		if (!skb_queue_empty(&sp->pd_lobby)) {
137			skb_queue_splice_tail_init(&sp->pd_lobby,
138						   &sk->sk_receive_queue);
139			return 1;
140		}
141	} else {
142		/* There are other associations in PD, so we only need to
143		 * pull stuff out of the lobby that belongs to the
144		 * associations that is exiting PD (all of its notifications
145		 * are posted here).
146		 */
147		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
148			struct sk_buff *skb, *tmp;
149			struct sctp_ulpevent *event;
150
151			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
152				event = sctp_skb2event(skb);
153				if (event->asoc == asoc) {
154					__skb_unlink(skb, &sp->pd_lobby);
155					__skb_queue_tail(&sk->sk_receive_queue,
156							 skb);
157				}
158			}
159		}
160	}
161
162	return 0;
163}
164
165/* Set the pd_mode on the socket and ulpq */
166static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
167{
168	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
169
170	atomic_inc(&sp->pd_mode);
171	ulpq->pd_mode = 1;
172}
173
174/* Clear the pd_mode and restart any pending messages waiting for delivery. */
175static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
176{
177	ulpq->pd_mode = 0;
178	sctp_ulpq_reasm_drain(ulpq);
179	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
180}
181
182int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
183{
184	struct sock *sk = ulpq->asoc->base.sk;
185	struct sctp_sock *sp = sctp_sk(sk);
186	struct sctp_ulpevent *event;
187	struct sk_buff_head *queue;
188	struct sk_buff *skb;
189	int clear_pd = 0;
190
191	skb = __skb_peek(skb_list);
192	event = sctp_skb2event(skb);
193
194	/* If the socket is just going to throw this away, do not
195	 * even try to deliver it.
196	 */
197	if (sk->sk_shutdown & RCV_SHUTDOWN &&
198	    (sk->sk_shutdown & SEND_SHUTDOWN ||
199	     !sctp_ulpevent_is_notification(event)))
200		goto out_free;
201
202	if (!sctp_ulpevent_is_notification(event)) {
203		sk_mark_napi_id(sk, skb);
204		sk_incoming_cpu_update(sk);
205	}
206	/* Check if the user wishes to receive this event.  */
207	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
208		goto out_free;
209
210	/* If we are in partial delivery mode, post to the lobby until
211	 * partial delivery is cleared, unless, of course _this_ is
212	 * the association the cause of the partial delivery.
213	 */
214
215	if (atomic_read(&sp->pd_mode) == 0) {
216		queue = &sk->sk_receive_queue;
217	} else {
218		if (ulpq->pd_mode) {
219			/* If the association is in partial delivery, we
220			 * need to finish delivering the partially processed
221			 * packet before passing any other data.  This is
222			 * because we don't truly support stream interleaving.
223			 */
224			if ((event->msg_flags & MSG_NOTIFICATION) ||
225			    (SCTP_DATA_NOT_FRAG ==
226				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
227				queue = &sp->pd_lobby;
228			else {
229				clear_pd = event->msg_flags & MSG_EOR;
230				queue = &sk->sk_receive_queue;
231			}
232		} else {
233			/*
234			 * If fragment interleave is enabled, we
235			 * can queue this to the receive queue instead
236			 * of the lobby.
237			 */
238			if (sp->frag_interleave)
239				queue = &sk->sk_receive_queue;
240			else
241				queue = &sp->pd_lobby;
242		}
243	}
244
245	skb_queue_splice_tail_init(skb_list, queue);
246
247	/* Did we just complete partial delivery and need to get
248	 * rolling again?  Move pending data to the receive
249	 * queue.
250	 */
251	if (clear_pd)
252		sctp_ulpq_clear_pd(ulpq);
253
254	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
255		if (!sock_owned_by_user(sk))
256			sp->data_ready_signalled = 1;
257		sk->sk_data_ready(sk);
258	}
259	return 1;
260
261out_free:
262	if (skb_list)
263		sctp_queue_purge_ulpevents(skb_list);
264	else
265		sctp_ulpevent_free(event);
266
267	return 0;
268}
269
270/* 2nd Level Abstractions */
271
272/* Helper function to store chunks that need to be reassembled.  */
273static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
274					 struct sctp_ulpevent *event)
275{
276	struct sk_buff *pos;
277	struct sctp_ulpevent *cevent;
278	__u32 tsn, ctsn;
279
280	tsn = event->tsn;
281
282	/* See if it belongs at the end. */
283	pos = skb_peek_tail(&ulpq->reasm);
284	if (!pos) {
285		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
286		return;
287	}
288
289	/* Short circuit just dropping it at the end. */
290	cevent = sctp_skb2event(pos);
291	ctsn = cevent->tsn;
292	if (TSN_lt(ctsn, tsn)) {
293		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
294		return;
295	}
296
297	/* Find the right place in this list. We store them by TSN.  */
298	skb_queue_walk(&ulpq->reasm, pos) {
299		cevent = sctp_skb2event(pos);
300		ctsn = cevent->tsn;
301
302		if (TSN_lt(tsn, ctsn))
303			break;
304	}
305
306	/* Insert before pos. */
307	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
308
309}
310
311/* Helper function to return an event corresponding to the reassembled
312 * datagram.
313 * This routine creates a re-assembled skb given the first and last skb's
314 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
315 * payload was fragmented on the way and ip had to reassemble them.
316 * We add the rest of skb's to the first skb's fraglist.
317 */
318struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
319						  struct sk_buff_head *queue,
320						  struct sk_buff *f_frag,
321						  struct sk_buff *l_frag)
322{
323	struct sk_buff *pos;
324	struct sk_buff *new = NULL;
325	struct sctp_ulpevent *event;
326	struct sk_buff *pnext, *last;
327	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
328
329	/* Store the pointer to the 2nd skb */
330	if (f_frag == l_frag)
331		pos = NULL;
332	else
333		pos = f_frag->next;
334
335	/* Get the last skb in the f_frag's frag_list if present. */
336	for (last = list; list; last = list, list = list->next)
337		;
338
339	/* Add the list of remaining fragments to the first fragments
340	 * frag_list.
341	 */
342	if (last)
343		last->next = pos;
344	else {
345		if (skb_cloned(f_frag)) {
346			/* This is a cloned skb, we can't just modify
347			 * the frag_list.  We need a new skb to do that.
348			 * Instead of calling skb_unshare(), we'll do it
349			 * ourselves since we need to delay the free.
350			 */
351			new = skb_copy(f_frag, GFP_ATOMIC);
352			if (!new)
353				return NULL;	/* try again later */
354
355			sctp_skb_set_owner_r(new, f_frag->sk);
356
357			skb_shinfo(new)->frag_list = pos;
358		} else
359			skb_shinfo(f_frag)->frag_list = pos;
360	}
361
362	/* Remove the first fragment from the reassembly queue.  */
363	__skb_unlink(f_frag, queue);
364
365	/* if we did unshare, then free the old skb and re-assign */
366	if (new) {
367		kfree_skb(f_frag);
368		f_frag = new;
369	}
370
371	while (pos) {
372
373		pnext = pos->next;
374
375		/* Update the len and data_len fields of the first fragment. */
376		f_frag->len += pos->len;
377		f_frag->data_len += pos->len;
378
379		/* Remove the fragment from the reassembly queue.  */
380		__skb_unlink(pos, queue);
381
382		/* Break if we have reached the last fragment.  */
383		if (pos == l_frag)
384			break;
385		pos->next = pnext;
386		pos = pnext;
387	}
388
389	event = sctp_skb2event(f_frag);
390	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
391
392	return event;
393}
394
395
396/* Helper function to check if an incoming chunk has filled up the last
397 * missing fragment in a SCTP datagram and return the corresponding event.
398 */
399static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
400{
401	struct sk_buff *pos;
402	struct sctp_ulpevent *cevent;
403	struct sk_buff *first_frag = NULL;
404	__u32 ctsn, next_tsn;
405	struct sctp_ulpevent *retval = NULL;
406	struct sk_buff *pd_first = NULL;
407	struct sk_buff *pd_last = NULL;
408	size_t pd_len = 0;
409	struct sctp_association *asoc;
410	u32 pd_point;
411
412	/* Initialized to 0 just to avoid compiler warning message.  Will
413	 * never be used with this value. It is referenced only after it
414	 * is set when we find the first fragment of a message.
415	 */
416	next_tsn = 0;
417
418	/* The chunks are held in the reasm queue sorted by TSN.
419	 * Walk through the queue sequentially and look for a sequence of
420	 * fragmented chunks that complete a datagram.
421	 * 'first_frag' and next_tsn are reset when we find a chunk which
422	 * is the first fragment of a datagram. Once these 2 fields are set
423	 * we expect to find the remaining middle fragments and the last
424	 * fragment in order. If not, first_frag is reset to NULL and we
425	 * start the next pass when we find another first fragment.
426	 *
427	 * There is a potential to do partial delivery if user sets
428	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
429	 * to see if can do PD.
430	 */
431	skb_queue_walk(&ulpq->reasm, pos) {
432		cevent = sctp_skb2event(pos);
433		ctsn = cevent->tsn;
434
435		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
436		case SCTP_DATA_FIRST_FRAG:
437			/* If this "FIRST_FRAG" is the first
438			 * element in the queue, then count it towards
439			 * possible PD.
440			 */
441			if (skb_queue_is_first(&ulpq->reasm, pos)) {
442			    pd_first = pos;
443			    pd_last = pos;
444			    pd_len = pos->len;
445			} else {
446			    pd_first = NULL;
447			    pd_last = NULL;
448			    pd_len = 0;
449			}
450
451			first_frag = pos;
452			next_tsn = ctsn + 1;
453			break;
454
455		case SCTP_DATA_MIDDLE_FRAG:
456			if ((first_frag) && (ctsn == next_tsn)) {
457				next_tsn++;
458				if (pd_first) {
459				    pd_last = pos;
460				    pd_len += pos->len;
461				}
462			} else
463				first_frag = NULL;
464			break;
465
466		case SCTP_DATA_LAST_FRAG:
467			if (first_frag && (ctsn == next_tsn))
468				goto found;
469			else
470				first_frag = NULL;
471			break;
472		}
473	}
474
475	asoc = ulpq->asoc;
476	if (pd_first) {
477		/* Make sure we can enter partial deliver.
478		 * We can trigger partial delivery only if framgent
479		 * interleave is set, or the socket is not already
480		 * in  partial delivery.
481		 */
482		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
483		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
484			goto done;
485
486		cevent = sctp_skb2event(pd_first);
487		pd_point = sctp_sk(asoc->base.sk)->pd_point;
488		if (pd_point && pd_point <= pd_len) {
489			retval = sctp_make_reassembled_event(asoc->base.net,
490							     &ulpq->reasm,
491							     pd_first, pd_last);
492			if (retval)
493				sctp_ulpq_set_pd(ulpq);
494		}
495	}
496done:
497	return retval;
498found:
499	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
500					     &ulpq->reasm, first_frag, pos);
501	if (retval)
502		retval->msg_flags |= MSG_EOR;
503	goto done;
504}
505
506/* Retrieve the next set of fragments of a partial message. */
507static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
508{
509	struct sk_buff *pos, *last_frag, *first_frag;
510	struct sctp_ulpevent *cevent;
511	__u32 ctsn, next_tsn;
512	int is_last;
513	struct sctp_ulpevent *retval;
514
515	/* The chunks are held in the reasm queue sorted by TSN.
516	 * Walk through the queue sequentially and look for the first
517	 * sequence of fragmented chunks.
518	 */
519
520	if (skb_queue_empty(&ulpq->reasm))
521		return NULL;
522
523	last_frag = first_frag = NULL;
524	retval = NULL;
525	next_tsn = 0;
526	is_last = 0;
527
528	skb_queue_walk(&ulpq->reasm, pos) {
529		cevent = sctp_skb2event(pos);
530		ctsn = cevent->tsn;
531
532		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
533		case SCTP_DATA_FIRST_FRAG:
534			if (!first_frag)
535				return NULL;
536			goto done;
537		case SCTP_DATA_MIDDLE_FRAG:
538			if (!first_frag) {
539				first_frag = pos;
540				next_tsn = ctsn + 1;
541				last_frag = pos;
542			} else if (next_tsn == ctsn) {
543				next_tsn++;
544				last_frag = pos;
545			} else
546				goto done;
547			break;
548		case SCTP_DATA_LAST_FRAG:
549			if (!first_frag)
550				first_frag = pos;
551			else if (ctsn != next_tsn)
552				goto done;
553			last_frag = pos;
554			is_last = 1;
555			goto done;
556		default:
557			return NULL;
558		}
559	}
560
561	/* We have the reassembled event. There is no need to look
562	 * further.
563	 */
564done:
565	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
566					     first_frag, last_frag);
567	if (retval && is_last)
568		retval->msg_flags |= MSG_EOR;
569
570	return retval;
571}
572
573
574/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
575 * need reassembling.
576 */
577static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
578						struct sctp_ulpevent *event)
579{
580	struct sctp_ulpevent *retval = NULL;
581
582	/* Check if this is part of a fragmented message.  */
583	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
584		event->msg_flags |= MSG_EOR;
585		return event;
586	}
587
588	sctp_ulpq_store_reasm(ulpq, event);
589	if (!ulpq->pd_mode)
590		retval = sctp_ulpq_retrieve_reassembled(ulpq);
591	else {
592		__u32 ctsn, ctsnap;
593
594		/* Do not even bother unless this is the next tsn to
595		 * be delivered.
596		 */
597		ctsn = event->tsn;
598		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
599		if (TSN_lte(ctsn, ctsnap))
600			retval = sctp_ulpq_retrieve_partial(ulpq);
601	}
602
603	return retval;
604}
605
606/* Retrieve the first part (sequential fragments) for partial delivery.  */
607static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
608{
609	struct sk_buff *pos, *last_frag, *first_frag;
610	struct sctp_ulpevent *cevent;
611	__u32 ctsn, next_tsn;
612	struct sctp_ulpevent *retval;
613
614	/* The chunks are held in the reasm queue sorted by TSN.
615	 * Walk through the queue sequentially and look for a sequence of
616	 * fragmented chunks that start a datagram.
617	 */
618
619	if (skb_queue_empty(&ulpq->reasm))
620		return NULL;
621
622	last_frag = first_frag = NULL;
623	retval = NULL;
624	next_tsn = 0;
625
626	skb_queue_walk(&ulpq->reasm, pos) {
627		cevent = sctp_skb2event(pos);
628		ctsn = cevent->tsn;
629
630		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
631		case SCTP_DATA_FIRST_FRAG:
632			if (!first_frag) {
633				first_frag = pos;
634				next_tsn = ctsn + 1;
635				last_frag = pos;
636			} else
637				goto done;
638			break;
639
640		case SCTP_DATA_MIDDLE_FRAG:
641			if (!first_frag)
642				return NULL;
643			if (ctsn == next_tsn) {
644				next_tsn++;
645				last_frag = pos;
646			} else
647				goto done;
648			break;
649
650		case SCTP_DATA_LAST_FRAG:
651			if (!first_frag)
652				return NULL;
653			else
654				goto done;
655			break;
656
657		default:
658			return NULL;
659		}
660	}
661
662	/* We have the reassembled event. There is no need to look
663	 * further.
664	 */
665done:
666	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
667					     first_frag, last_frag);
668	return retval;
669}
670
671/*
672 * Flush out stale fragments from the reassembly queue when processing
673 * a Forward TSN.
674 *
675 * RFC 3758, Section 3.6
676 *
677 * After receiving and processing a FORWARD TSN, the data receiver MUST
678 * take cautions in updating its re-assembly queue.  The receiver MUST
679 * remove any partially reassembled message, which is still missing one
680 * or more TSNs earlier than or equal to the new cumulative TSN point.
681 * In the event that the receiver has invoked the partial delivery API,
682 * a notification SHOULD also be generated to inform the upper layer API
683 * that the message being partially delivered will NOT be completed.
684 */
685void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
686{
687	struct sk_buff *pos, *tmp;
688	struct sctp_ulpevent *event;
689	__u32 tsn;
690
691	if (skb_queue_empty(&ulpq->reasm))
692		return;
693
694	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
695		event = sctp_skb2event(pos);
696		tsn = event->tsn;
697
698		/* Since the entire message must be abandoned by the
699		 * sender (item A3 in Section 3.5, RFC 3758), we can
700		 * free all fragments on the list that are less then
701		 * or equal to ctsn_point
702		 */
703		if (TSN_lte(tsn, fwd_tsn)) {
704			__skb_unlink(pos, &ulpq->reasm);
705			sctp_ulpevent_free(event);
706		} else
707			break;
708	}
709}
710
711/*
712 * Drain the reassembly queue.  If we just cleared parted delivery, it
713 * is possible that the reassembly queue will contain already reassembled
714 * messages.  Retrieve any such messages and give them to the user.
715 */
716static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
717{
718	struct sctp_ulpevent *event = NULL;
719
720	if (skb_queue_empty(&ulpq->reasm))
721		return;
722
723	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
724		struct sk_buff_head temp;
725
726		skb_queue_head_init(&temp);
727		__skb_queue_tail(&temp, sctp_event2skb(event));
728
729		/* Do ordering if needed.  */
730		if (event->msg_flags & MSG_EOR)
731			event = sctp_ulpq_order(ulpq, event);
732
733		/* Send event to the ULP.  'event' is the
734		 * sctp_ulpevent for  very first SKB on the  temp' list.
735		 */
736		if (event)
737			sctp_ulpq_tail_event(ulpq, &temp);
738	}
739}
740
741
742/* Helper function to gather skbs that have possibly become
743 * ordered by an incoming chunk.
744 */
745static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
746					      struct sctp_ulpevent *event)
747{
748	struct sk_buff_head *event_list;
749	struct sk_buff *pos, *tmp;
750	struct sctp_ulpevent *cevent;
751	struct sctp_stream *stream;
752	__u16 sid, csid, cssn;
753
754	sid = event->stream;
755	stream  = &ulpq->asoc->stream;
756
757	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
758
759	/* We are holding the chunks by stream, by SSN.  */
760	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
761		cevent = (struct sctp_ulpevent *) pos->cb;
762		csid = cevent->stream;
763		cssn = cevent->ssn;
764
765		/* Have we gone too far?  */
766		if (csid > sid)
767			break;
768
769		/* Have we not gone far enough?  */
770		if (csid < sid)
771			continue;
772
773		if (cssn != sctp_ssn_peek(stream, in, sid))
774			break;
775
776		/* Found it, so mark in the stream. */
777		sctp_ssn_next(stream, in, sid);
778
779		__skb_unlink(pos, &ulpq->lobby);
780
781		/* Attach all gathered skbs to the event.  */
782		__skb_queue_tail(event_list, pos);
783	}
784}
785
786/* Helper function to store chunks needing ordering.  */
787static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
788					   struct sctp_ulpevent *event)
789{
790	struct sk_buff *pos;
791	struct sctp_ulpevent *cevent;
792	__u16 sid, csid;
793	__u16 ssn, cssn;
794
795	pos = skb_peek_tail(&ulpq->lobby);
796	if (!pos) {
797		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
798		return;
799	}
800
801	sid = event->stream;
802	ssn = event->ssn;
803
804	cevent = (struct sctp_ulpevent *) pos->cb;
805	csid = cevent->stream;
806	cssn = cevent->ssn;
807	if (sid > csid) {
808		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
809		return;
810	}
811
812	if ((sid == csid) && SSN_lt(cssn, ssn)) {
813		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
814		return;
815	}
816
817	/* Find the right place in this list.  We store them by
818	 * stream ID and then by SSN.
819	 */
820	skb_queue_walk(&ulpq->lobby, pos) {
821		cevent = (struct sctp_ulpevent *) pos->cb;
822		csid = cevent->stream;
823		cssn = cevent->ssn;
824
825		if (csid > sid)
826			break;
827		if (csid == sid && SSN_lt(ssn, cssn))
828			break;
829	}
830
831
832	/* Insert before pos. */
833	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
834}
835
836static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
837					     struct sctp_ulpevent *event)
838{
839	__u16 sid, ssn;
840	struct sctp_stream *stream;
841
842	/* Check if this message needs ordering.  */
843	if (event->msg_flags & SCTP_DATA_UNORDERED)
844		return event;
845
846	/* Note: The stream ID must be verified before this routine.  */
847	sid = event->stream;
848	ssn = event->ssn;
849	stream  = &ulpq->asoc->stream;
850
851	/* Is this the expected SSN for this stream ID?  */
852	if (ssn != sctp_ssn_peek(stream, in, sid)) {
853		/* We've received something out of order, so find where it
854		 * needs to be placed.  We order by stream and then by SSN.
855		 */
856		sctp_ulpq_store_ordered(ulpq, event);
857		return NULL;
858	}
859
860	/* Mark that the next chunk has been found.  */
861	sctp_ssn_next(stream, in, sid);
862
863	/* Go find any other chunks that were waiting for
864	 * ordering.
865	 */
866	sctp_ulpq_retrieve_ordered(ulpq, event);
867
868	return event;
869}
870
871/* Helper function to gather skbs that have possibly become
872 * ordered by forward tsn skipping their dependencies.
873 */
874static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
875{
876	struct sk_buff *pos, *tmp;
877	struct sctp_ulpevent *cevent;
878	struct sctp_ulpevent *event;
879	struct sctp_stream *stream;
880	struct sk_buff_head temp;
881	struct sk_buff_head *lobby = &ulpq->lobby;
882	__u16 csid, cssn;
883
884	stream = &ulpq->asoc->stream;
885
886	/* We are holding the chunks by stream, by SSN.  */
887	skb_queue_head_init(&temp);
888	event = NULL;
889	sctp_skb_for_each(pos, lobby, tmp) {
890		cevent = (struct sctp_ulpevent *) pos->cb;
891		csid = cevent->stream;
892		cssn = cevent->ssn;
893
894		/* Have we gone too far?  */
895		if (csid > sid)
896			break;
897
898		/* Have we not gone far enough?  */
899		if (csid < sid)
900			continue;
901
902		/* see if this ssn has been marked by skipping */
903		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
904			break;
905
906		__skb_unlink(pos, lobby);
907		if (!event)
908			/* Create a temporary list to collect chunks on.  */
909			event = sctp_skb2event(pos);
910
911		/* Attach all gathered skbs to the event.  */
912		__skb_queue_tail(&temp, pos);
913	}
914
915	/* If we didn't reap any data, see if the next expected SSN
916	 * is next on the queue and if so, use that.
917	 */
918	if (event == NULL && pos != (struct sk_buff *)lobby) {
919		cevent = (struct sctp_ulpevent *) pos->cb;
920		csid = cevent->stream;
921		cssn = cevent->ssn;
922
923		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
924			sctp_ssn_next(stream, in, csid);
925			__skb_unlink(pos, lobby);
926			__skb_queue_tail(&temp, pos);
927			event = sctp_skb2event(pos);
928		}
929	}
930
931	/* Send event to the ULP.  'event' is the sctp_ulpevent for
932	 * very first SKB on the 'temp' list.
933	 */
934	if (event) {
935		/* see if we have more ordered that we can deliver */
936		sctp_ulpq_retrieve_ordered(ulpq, event);
937		sctp_ulpq_tail_event(ulpq, &temp);
938	}
939}
940
941/* Skip over an SSN. This is used during the processing of
942 * Forwared TSN chunk to skip over the abandoned ordered data
943 */
944void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
945{
946	struct sctp_stream *stream;
947
948	/* Note: The stream ID must be verified before this routine.  */
949	stream  = &ulpq->asoc->stream;
950
951	/* Is this an old SSN?  If so ignore. */
952	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
953		return;
954
955	/* Mark that we are no longer expecting this SSN or lower. */
956	sctp_ssn_skip(stream, in, sid, ssn);
957
958	/* Go find any other chunks that were waiting for
959	 * ordering and deliver them if needed.
960	 */
961	sctp_ulpq_reap_ordered(ulpq, sid);
962}
963
964__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
965			    __u16 needed)
966{
967	__u16 freed = 0;
968	__u32 tsn, last_tsn;
969	struct sk_buff *skb, *flist, *last;
970	struct sctp_ulpevent *event;
971	struct sctp_tsnmap *tsnmap;
972
973	tsnmap = &ulpq->asoc->peer.tsn_map;
974
975	while ((skb = skb_peek_tail(list)) != NULL) {
976		event = sctp_skb2event(skb);
977		tsn = event->tsn;
978
979		/* Don't renege below the Cumulative TSN ACK Point. */
980		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
981			break;
982
983		/* Events in ordering queue may have multiple fragments
984		 * corresponding to additional TSNs.  Sum the total
985		 * freed space; find the last TSN.
986		 */
987		freed += skb_headlen(skb);
988		flist = skb_shinfo(skb)->frag_list;
989		for (last = flist; flist; flist = flist->next) {
990			last = flist;
991			freed += skb_headlen(last);
992		}
993		if (last)
994			last_tsn = sctp_skb2event(last)->tsn;
995		else
996			last_tsn = tsn;
997
998		/* Unlink the event, then renege all applicable TSNs. */
999		__skb_unlink(skb, list);
1000		sctp_ulpevent_free(event);
1001		while (TSN_lte(tsn, last_tsn)) {
1002			sctp_tsnmap_renege(tsnmap, tsn);
1003			tsn++;
1004		}
1005		if (freed >= needed)
1006			return freed;
1007	}
1008
1009	return freed;
1010}
1011
1012/* Renege 'needed' bytes from the ordering queue. */
1013static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1014{
1015	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1016}
1017
1018/* Renege 'needed' bytes from the reassembly queue. */
1019static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1020{
1021	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1022}
1023
1024/* Partial deliver the first message as there is pressure on rwnd. */
1025void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1026				gfp_t gfp)
1027{
1028	struct sctp_ulpevent *event;
1029	struct sctp_association *asoc;
1030	struct sctp_sock *sp;
1031	__u32 ctsn;
1032	struct sk_buff *skb;
1033
1034	asoc = ulpq->asoc;
1035	sp = sctp_sk(asoc->base.sk);
1036
1037	/* If the association is already in Partial Delivery mode
1038	 * we have nothing to do.
1039	 */
1040	if (ulpq->pd_mode)
1041		return;
1042
1043	/* Data must be at or below the Cumulative TSN ACK Point to
1044	 * start partial delivery.
1045	 */
1046	skb = skb_peek(&asoc->ulpq.reasm);
1047	if (skb != NULL) {
1048		ctsn = sctp_skb2event(skb)->tsn;
1049		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1050			return;
1051	}
1052
1053	/* If the user enabled fragment interleave socket option,
1054	 * multiple associations can enter partial delivery.
1055	 * Otherwise, we can only enter partial delivery if the
1056	 * socket is not in partial deliver mode.
1057	 */
1058	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1059		/* Is partial delivery possible?  */
1060		event = sctp_ulpq_retrieve_first(ulpq);
1061		/* Send event to the ULP.   */
1062		if (event) {
1063			struct sk_buff_head temp;
1064
1065			skb_queue_head_init(&temp);
1066			__skb_queue_tail(&temp, sctp_event2skb(event));
1067			sctp_ulpq_tail_event(ulpq, &temp);
1068			sctp_ulpq_set_pd(ulpq);
1069			return;
1070		}
1071	}
1072}
1073
1074/* Renege some packets to make room for an incoming chunk.  */
1075void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1076		      gfp_t gfp)
1077{
1078	struct sctp_association *asoc = ulpq->asoc;
1079	__u32 freed = 0;
1080	__u16 needed;
1081
1082	needed = ntohs(chunk->chunk_hdr->length) -
1083		 sizeof(struct sctp_data_chunk);
1084
1085	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1086		freed = sctp_ulpq_renege_order(ulpq, needed);
1087		if (freed < needed)
1088			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1089	}
1090	/* If able to free enough room, accept this chunk. */
1091	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1092	    freed >= needed) {
1093		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1094		/*
1095		 * Enter partial delivery if chunk has not been
1096		 * delivered; otherwise, drain the reassembly queue.
1097		 */
1098		if (retval <= 0)
1099			sctp_ulpq_partial_delivery(ulpq, gfp);
1100		else if (retval == 1)
1101			sctp_ulpq_reasm_drain(ulpq);
1102	}
1103
1104	sk_mem_reclaim(asoc->base.sk);
1105}
1106
1107
1108
1109/* Notify the application if an association is aborted and in
1110 * partial delivery mode.  Send up any pending received messages.
1111 */
1112void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1113{
1114	struct sctp_ulpevent *ev = NULL;
1115	struct sctp_sock *sp;
1116	struct sock *sk;
1117
1118	if (!ulpq->pd_mode)
1119		return;
1120
1121	sk = ulpq->asoc->base.sk;
1122	sp = sctp_sk(sk);
1123	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1124				       SCTP_PARTIAL_DELIVERY_EVENT))
1125		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1126					      SCTP_PARTIAL_DELIVERY_ABORTED,
1127					      0, 0, 0, gfp);
1128	if (ev)
1129		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1130
1131	/* If there is data waiting, send it up the socket now. */
1132	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1133		sp->data_ready_signalled = 1;
1134		sk->sk_data_ready(sk);
1135	}
1136}
1137