1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
4 *
5 * This file is part of the SCTP kernel implementation
6 *
7 * These functions implement sctp stream message interleaving, mostly
8 * including I-DATA and I-FORWARD-TSN chunks process.
9 *
10 * Please send any bug reports or fixes you make to the
11 * email addresched(es):
12 *    lksctp developers <linux-sctp@vger.kernel.org>
13 *
14 * Written or modified by:
15 *    Xin Long <lucien.xin@gmail.com>
16 */
17
18#include <net/busy_poll.h>
19#include <net/sctp/sctp.h>
20#include <net/sctp/sm.h>
21#include <net/sctp/ulpevent.h>
22#include <linux/sctp.h>
23
24static struct sctp_chunk *sctp_make_idatafrag_empty(
25					const struct sctp_association *asoc,
26					const struct sctp_sndrcvinfo *sinfo,
27					int len, __u8 flags, gfp_t gfp)
28{
29	struct sctp_chunk *retval;
30	struct sctp_idatahdr dp;
31
32	memset(&dp, 0, sizeof(dp));
33	dp.stream = htons(sinfo->sinfo_stream);
34
35	if (sinfo->sinfo_flags & SCTP_UNORDERED)
36		flags |= SCTP_DATA_UNORDERED;
37
38	retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
39	if (!retval)
40		return NULL;
41
42	retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
43	memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
44
45	return retval;
46}
47
48static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
49{
50	struct sctp_stream *stream;
51	struct sctp_chunk *lchunk;
52	__u32 cfsn = 0;
53	__u16 sid;
54
55	if (chunk->has_mid)
56		return;
57
58	sid = sctp_chunk_stream_no(chunk);
59	stream = &chunk->asoc->stream;
60
61	list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
62		struct sctp_idatahdr *hdr;
63		__u32 mid;
64
65		lchunk->has_mid = 1;
66
67		hdr = lchunk->subh.idata_hdr;
68
69		if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
70			hdr->ppid = lchunk->sinfo.sinfo_ppid;
71		else
72			hdr->fsn = htonl(cfsn++);
73
74		if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
75			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
76				sctp_mid_uo_next(stream, out, sid) :
77				sctp_mid_uo_peek(stream, out, sid);
78		} else {
79			mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
80				sctp_mid_next(stream, out, sid) :
81				sctp_mid_peek(stream, out, sid);
82		}
83		hdr->mid = htonl(mid);
84	}
85}
86
87static bool sctp_validate_data(struct sctp_chunk *chunk)
88{
89	struct sctp_stream *stream;
90	__u16 sid, ssn;
91
92	if (chunk->chunk_hdr->type != SCTP_CID_DATA)
93		return false;
94
95	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
96		return true;
97
98	stream = &chunk->asoc->stream;
99	sid = sctp_chunk_stream_no(chunk);
100	ssn = ntohs(chunk->subh.data_hdr->ssn);
101
102	return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
103}
104
105static bool sctp_validate_idata(struct sctp_chunk *chunk)
106{
107	struct sctp_stream *stream;
108	__u32 mid;
109	__u16 sid;
110
111	if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
112		return false;
113
114	if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
115		return true;
116
117	stream = &chunk->asoc->stream;
118	sid = sctp_chunk_stream_no(chunk);
119	mid = ntohl(chunk->subh.idata_hdr->mid);
120
121	return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
122}
123
124static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
125				  struct sctp_ulpevent *event)
126{
127	struct sctp_ulpevent *cevent;
128	struct sk_buff *pos, *loc;
129
130	pos = skb_peek_tail(&ulpq->reasm);
131	if (!pos) {
132		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
133		return;
134	}
135
136	cevent = sctp_skb2event(pos);
137
138	if (event->stream == cevent->stream &&
139	    event->mid == cevent->mid &&
140	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
141	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
142	      event->fsn > cevent->fsn))) {
143		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
144		return;
145	}
146
147	if ((event->stream == cevent->stream &&
148	     MID_lt(cevent->mid, event->mid)) ||
149	    event->stream > cevent->stream) {
150		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
151		return;
152	}
153
154	loc = NULL;
155	skb_queue_walk(&ulpq->reasm, pos) {
156		cevent = sctp_skb2event(pos);
157
158		if (event->stream < cevent->stream ||
159		    (event->stream == cevent->stream &&
160		     MID_lt(event->mid, cevent->mid))) {
161			loc = pos;
162			break;
163		}
164		if (event->stream == cevent->stream &&
165		    event->mid == cevent->mid &&
166		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
167		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
168		     event->fsn < cevent->fsn)) {
169			loc = pos;
170			break;
171		}
172	}
173
174	if (!loc)
175		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
176	else
177		__skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
178}
179
180static struct sctp_ulpevent *sctp_intl_retrieve_partial(
181						struct sctp_ulpq *ulpq,
182						struct sctp_ulpevent *event)
183{
184	struct sk_buff *first_frag = NULL;
185	struct sk_buff *last_frag = NULL;
186	struct sctp_ulpevent *retval;
187	struct sctp_stream_in *sin;
188	struct sk_buff *pos;
189	__u32 next_fsn = 0;
190	int is_last = 0;
191
192	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
193
194	skb_queue_walk(&ulpq->reasm, pos) {
195		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
196
197		if (cevent->stream < event->stream)
198			continue;
199
200		if (cevent->stream > event->stream ||
201		    cevent->mid != sin->mid)
202			break;
203
204		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
205		case SCTP_DATA_FIRST_FRAG:
206			goto out;
207		case SCTP_DATA_MIDDLE_FRAG:
208			if (!first_frag) {
209				if (cevent->fsn == sin->fsn) {
210					first_frag = pos;
211					last_frag = pos;
212					next_fsn = cevent->fsn + 1;
213				}
214			} else if (cevent->fsn == next_fsn) {
215				last_frag = pos;
216				next_fsn++;
217			} else {
218				goto out;
219			}
220			break;
221		case SCTP_DATA_LAST_FRAG:
222			if (!first_frag) {
223				if (cevent->fsn == sin->fsn) {
224					first_frag = pos;
225					last_frag = pos;
226					next_fsn = 0;
227					is_last = 1;
228				}
229			} else if (cevent->fsn == next_fsn) {
230				last_frag = pos;
231				next_fsn = 0;
232				is_last = 1;
233			}
234			goto out;
235		default:
236			goto out;
237		}
238	}
239
240out:
241	if (!first_frag)
242		return NULL;
243
244	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
245					     first_frag, last_frag);
246	if (retval) {
247		sin->fsn = next_fsn;
248		if (is_last) {
249			retval->msg_flags |= MSG_EOR;
250			sin->pd_mode = 0;
251		}
252	}
253
254	return retval;
255}
256
257static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
258						struct sctp_ulpq *ulpq,
259						struct sctp_ulpevent *event)
260{
261	struct sctp_association *asoc = ulpq->asoc;
262	struct sk_buff *pos, *first_frag = NULL;
263	struct sctp_ulpevent *retval = NULL;
264	struct sk_buff *pd_first = NULL;
265	struct sk_buff *pd_last = NULL;
266	struct sctp_stream_in *sin;
267	__u32 next_fsn = 0;
268	__u32 pd_point = 0;
269	__u32 pd_len = 0;
270	__u32 mid = 0;
271
272	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
273
274	skb_queue_walk(&ulpq->reasm, pos) {
275		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
276
277		if (cevent->stream < event->stream)
278			continue;
279		if (cevent->stream > event->stream)
280			break;
281
282		if (MID_lt(cevent->mid, event->mid))
283			continue;
284		if (MID_lt(event->mid, cevent->mid))
285			break;
286
287		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
288		case SCTP_DATA_FIRST_FRAG:
289			if (cevent->mid == sin->mid) {
290				pd_first = pos;
291				pd_last = pos;
292				pd_len = pos->len;
293			}
294
295			first_frag = pos;
296			next_fsn = 0;
297			mid = cevent->mid;
298			break;
299
300		case SCTP_DATA_MIDDLE_FRAG:
301			if (first_frag && cevent->mid == mid &&
302			    cevent->fsn == next_fsn) {
303				next_fsn++;
304				if (pd_first) {
305					pd_last = pos;
306					pd_len += pos->len;
307				}
308			} else {
309				first_frag = NULL;
310			}
311			break;
312
313		case SCTP_DATA_LAST_FRAG:
314			if (first_frag && cevent->mid == mid &&
315			    cevent->fsn == next_fsn)
316				goto found;
317			else
318				first_frag = NULL;
319			break;
320		}
321	}
322
323	if (!pd_first)
324		goto out;
325
326	pd_point = sctp_sk(asoc->base.sk)->pd_point;
327	if (pd_point && pd_point <= pd_len) {
328		retval = sctp_make_reassembled_event(asoc->base.net,
329						     &ulpq->reasm,
330						     pd_first, pd_last);
331		if (retval) {
332			sin->fsn = next_fsn;
333			sin->pd_mode = 1;
334		}
335	}
336	goto out;
337
338found:
339	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
340					     first_frag, pos);
341	if (retval)
342		retval->msg_flags |= MSG_EOR;
343
344out:
345	return retval;
346}
347
348static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
349					     struct sctp_ulpevent *event)
350{
351	struct sctp_ulpevent *retval = NULL;
352	struct sctp_stream_in *sin;
353
354	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
355		event->msg_flags |= MSG_EOR;
356		return event;
357	}
358
359	sctp_intl_store_reasm(ulpq, event);
360
361	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
362	if (sin->pd_mode && event->mid == sin->mid &&
363	    event->fsn == sin->fsn)
364		retval = sctp_intl_retrieve_partial(ulpq, event);
365
366	if (!retval)
367		retval = sctp_intl_retrieve_reassembled(ulpq, event);
368
369	return retval;
370}
371
372static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
373				    struct sctp_ulpevent *event)
374{
375	struct sctp_ulpevent *cevent;
376	struct sk_buff *pos, *loc;
377
378	pos = skb_peek_tail(&ulpq->lobby);
379	if (!pos) {
380		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
381		return;
382	}
383
384	cevent = (struct sctp_ulpevent *)pos->cb;
385	if (event->stream == cevent->stream &&
386	    MID_lt(cevent->mid, event->mid)) {
387		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
388		return;
389	}
390
391	if (event->stream > cevent->stream) {
392		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
393		return;
394	}
395
396	loc = NULL;
397	skb_queue_walk(&ulpq->lobby, pos) {
398		cevent = (struct sctp_ulpevent *)pos->cb;
399
400		if (cevent->stream > event->stream) {
401			loc = pos;
402			break;
403		}
404		if (cevent->stream == event->stream &&
405		    MID_lt(event->mid, cevent->mid)) {
406			loc = pos;
407			break;
408		}
409	}
410
411	if (!loc)
412		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
413	else
414		__skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
415}
416
417static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
418				       struct sctp_ulpevent *event)
419{
420	struct sk_buff_head *event_list;
421	struct sctp_stream *stream;
422	struct sk_buff *pos, *tmp;
423	__u16 sid = event->stream;
424
425	stream  = &ulpq->asoc->stream;
426	event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
427
428	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
429		struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
430
431		if (cevent->stream > sid)
432			break;
433
434		if (cevent->stream < sid)
435			continue;
436
437		if (cevent->mid != sctp_mid_peek(stream, in, sid))
438			break;
439
440		sctp_mid_next(stream, in, sid);
441
442		__skb_unlink(pos, &ulpq->lobby);
443
444		__skb_queue_tail(event_list, pos);
445	}
446}
447
448static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
449					     struct sctp_ulpevent *event)
450{
451	struct sctp_stream *stream;
452	__u16 sid;
453
454	stream  = &ulpq->asoc->stream;
455	sid = event->stream;
456
457	if (event->mid != sctp_mid_peek(stream, in, sid)) {
458		sctp_intl_store_ordered(ulpq, event);
459		return NULL;
460	}
461
462	sctp_mid_next(stream, in, sid);
463
464	sctp_intl_retrieve_ordered(ulpq, event);
465
466	return event;
467}
468
469static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
470			      struct sk_buff_head *skb_list)
471{
472	struct sock *sk = ulpq->asoc->base.sk;
473	struct sctp_sock *sp = sctp_sk(sk);
474	struct sctp_ulpevent *event;
475	struct sk_buff *skb;
476
477	skb = __skb_peek(skb_list);
478	event = sctp_skb2event(skb);
479
480	if (sk->sk_shutdown & RCV_SHUTDOWN &&
481	    (sk->sk_shutdown & SEND_SHUTDOWN ||
482	     !sctp_ulpevent_is_notification(event)))
483		goto out_free;
484
485	if (!sctp_ulpevent_is_notification(event)) {
486		sk_mark_napi_id(sk, skb);
487		sk_incoming_cpu_update(sk);
488	}
489
490	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
491		goto out_free;
492
493	if (skb_list)
494		skb_queue_splice_tail_init(skb_list,
495					   &sk->sk_receive_queue);
496	else
497		__skb_queue_tail(&sk->sk_receive_queue, skb);
498
499	if (!sp->data_ready_signalled) {
500		sp->data_ready_signalled = 1;
501		sk->sk_data_ready(sk);
502	}
503
504	return 1;
505
506out_free:
507	if (skb_list)
508		sctp_queue_purge_ulpevents(skb_list);
509	else
510		sctp_ulpevent_free(event);
511
512	return 0;
513}
514
515static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
516				     struct sctp_ulpevent *event)
517{
518	struct sctp_ulpevent *cevent;
519	struct sk_buff *pos;
520
521	pos = skb_peek_tail(&ulpq->reasm_uo);
522	if (!pos) {
523		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
524		return;
525	}
526
527	cevent = sctp_skb2event(pos);
528
529	if (event->stream == cevent->stream &&
530	    event->mid == cevent->mid &&
531	    (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
532	     (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
533	      event->fsn > cevent->fsn))) {
534		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
535		return;
536	}
537
538	if ((event->stream == cevent->stream &&
539	     MID_lt(cevent->mid, event->mid)) ||
540	    event->stream > cevent->stream) {
541		__skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
542		return;
543	}
544
545	skb_queue_walk(&ulpq->reasm_uo, pos) {
546		cevent = sctp_skb2event(pos);
547
548		if (event->stream < cevent->stream ||
549		    (event->stream == cevent->stream &&
550		     MID_lt(event->mid, cevent->mid)))
551			break;
552
553		if (event->stream == cevent->stream &&
554		    event->mid == cevent->mid &&
555		    !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
556		    (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
557		     event->fsn < cevent->fsn))
558			break;
559	}
560
561	__skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
562}
563
564static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
565						struct sctp_ulpq *ulpq,
566						struct sctp_ulpevent *event)
567{
568	struct sk_buff *first_frag = NULL;
569	struct sk_buff *last_frag = NULL;
570	struct sctp_ulpevent *retval;
571	struct sctp_stream_in *sin;
572	struct sk_buff *pos;
573	__u32 next_fsn = 0;
574	int is_last = 0;
575
576	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
577
578	skb_queue_walk(&ulpq->reasm_uo, pos) {
579		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
580
581		if (cevent->stream < event->stream)
582			continue;
583		if (cevent->stream > event->stream)
584			break;
585
586		if (MID_lt(cevent->mid, sin->mid_uo))
587			continue;
588		if (MID_lt(sin->mid_uo, cevent->mid))
589			break;
590
591		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
592		case SCTP_DATA_FIRST_FRAG:
593			goto out;
594		case SCTP_DATA_MIDDLE_FRAG:
595			if (!first_frag) {
596				if (cevent->fsn == sin->fsn_uo) {
597					first_frag = pos;
598					last_frag = pos;
599					next_fsn = cevent->fsn + 1;
600				}
601			} else if (cevent->fsn == next_fsn) {
602				last_frag = pos;
603				next_fsn++;
604			} else {
605				goto out;
606			}
607			break;
608		case SCTP_DATA_LAST_FRAG:
609			if (!first_frag) {
610				if (cevent->fsn == sin->fsn_uo) {
611					first_frag = pos;
612					last_frag = pos;
613					next_fsn = 0;
614					is_last = 1;
615				}
616			} else if (cevent->fsn == next_fsn) {
617				last_frag = pos;
618				next_fsn = 0;
619				is_last = 1;
620			}
621			goto out;
622		default:
623			goto out;
624		}
625	}
626
627out:
628	if (!first_frag)
629		return NULL;
630
631	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
632					     &ulpq->reasm_uo, first_frag,
633					     last_frag);
634	if (retval) {
635		sin->fsn_uo = next_fsn;
636		if (is_last) {
637			retval->msg_flags |= MSG_EOR;
638			sin->pd_mode_uo = 0;
639		}
640	}
641
642	return retval;
643}
644
645static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
646						struct sctp_ulpq *ulpq,
647						struct sctp_ulpevent *event)
648{
649	struct sctp_association *asoc = ulpq->asoc;
650	struct sk_buff *pos, *first_frag = NULL;
651	struct sctp_ulpevent *retval = NULL;
652	struct sk_buff *pd_first = NULL;
653	struct sk_buff *pd_last = NULL;
654	struct sctp_stream_in *sin;
655	__u32 next_fsn = 0;
656	__u32 pd_point = 0;
657	__u32 pd_len = 0;
658	__u32 mid = 0;
659
660	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
661
662	skb_queue_walk(&ulpq->reasm_uo, pos) {
663		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
664
665		if (cevent->stream < event->stream)
666			continue;
667		if (cevent->stream > event->stream)
668			break;
669
670		if (MID_lt(cevent->mid, event->mid))
671			continue;
672		if (MID_lt(event->mid, cevent->mid))
673			break;
674
675		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
676		case SCTP_DATA_FIRST_FRAG:
677			if (!sin->pd_mode_uo) {
678				sin->mid_uo = cevent->mid;
679				pd_first = pos;
680				pd_last = pos;
681				pd_len = pos->len;
682			}
683
684			first_frag = pos;
685			next_fsn = 0;
686			mid = cevent->mid;
687			break;
688
689		case SCTP_DATA_MIDDLE_FRAG:
690			if (first_frag && cevent->mid == mid &&
691			    cevent->fsn == next_fsn) {
692				next_fsn++;
693				if (pd_first) {
694					pd_last = pos;
695					pd_len += pos->len;
696				}
697			} else {
698				first_frag = NULL;
699			}
700			break;
701
702		case SCTP_DATA_LAST_FRAG:
703			if (first_frag && cevent->mid == mid &&
704			    cevent->fsn == next_fsn)
705				goto found;
706			else
707				first_frag = NULL;
708			break;
709		}
710	}
711
712	if (!pd_first)
713		goto out;
714
715	pd_point = sctp_sk(asoc->base.sk)->pd_point;
716	if (pd_point && pd_point <= pd_len) {
717		retval = sctp_make_reassembled_event(asoc->base.net,
718						     &ulpq->reasm_uo,
719						     pd_first, pd_last);
720		if (retval) {
721			sin->fsn_uo = next_fsn;
722			sin->pd_mode_uo = 1;
723		}
724	}
725	goto out;
726
727found:
728	retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
729					     first_frag, pos);
730	if (retval)
731		retval->msg_flags |= MSG_EOR;
732
733out:
734	return retval;
735}
736
737static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
738						struct sctp_ulpevent *event)
739{
740	struct sctp_ulpevent *retval = NULL;
741	struct sctp_stream_in *sin;
742
743	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
744		event->msg_flags |= MSG_EOR;
745		return event;
746	}
747
748	sctp_intl_store_reasm_uo(ulpq, event);
749
750	sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
751	if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
752	    event->fsn == sin->fsn_uo)
753		retval = sctp_intl_retrieve_partial_uo(ulpq, event);
754
755	if (!retval)
756		retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
757
758	return retval;
759}
760
761static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
762{
763	struct sctp_stream_in *csin, *sin = NULL;
764	struct sk_buff *first_frag = NULL;
765	struct sk_buff *last_frag = NULL;
766	struct sctp_ulpevent *retval;
767	struct sk_buff *pos;
768	__u32 next_fsn = 0;
769	__u16 sid = 0;
770
771	skb_queue_walk(&ulpq->reasm_uo, pos) {
772		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
773
774		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
775		if (csin->pd_mode_uo)
776			continue;
777
778		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
779		case SCTP_DATA_FIRST_FRAG:
780			if (first_frag)
781				goto out;
782			first_frag = pos;
783			last_frag = pos;
784			next_fsn = 0;
785			sin = csin;
786			sid = cevent->stream;
787			sin->mid_uo = cevent->mid;
788			break;
789		case SCTP_DATA_MIDDLE_FRAG:
790			if (!first_frag)
791				break;
792			if (cevent->stream == sid &&
793			    cevent->mid == sin->mid_uo &&
794			    cevent->fsn == next_fsn) {
795				next_fsn++;
796				last_frag = pos;
797			} else {
798				goto out;
799			}
800			break;
801		case SCTP_DATA_LAST_FRAG:
802			if (first_frag)
803				goto out;
804			break;
805		default:
806			break;
807		}
808	}
809
810	if (!first_frag)
811		return NULL;
812
813out:
814	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
815					     &ulpq->reasm_uo, first_frag,
816					     last_frag);
817	if (retval) {
818		sin->fsn_uo = next_fsn;
819		sin->pd_mode_uo = 1;
820	}
821
822	return retval;
823}
824
825static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
826			       struct sctp_chunk *chunk, gfp_t gfp)
827{
828	struct sctp_ulpevent *event;
829	struct sk_buff_head temp;
830	int event_eor = 0;
831
832	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
833	if (!event)
834		return -ENOMEM;
835
836	event->mid = ntohl(chunk->subh.idata_hdr->mid);
837	if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
838		event->ppid = chunk->subh.idata_hdr->ppid;
839	else
840		event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
841
842	if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
843		event = sctp_intl_reasm(ulpq, event);
844		if (event) {
845			skb_queue_head_init(&temp);
846			__skb_queue_tail(&temp, sctp_event2skb(event));
847
848			if (event->msg_flags & MSG_EOR)
849				event = sctp_intl_order(ulpq, event);
850		}
851	} else {
852		event = sctp_intl_reasm_uo(ulpq, event);
853		if (event) {
854			skb_queue_head_init(&temp);
855			__skb_queue_tail(&temp, sctp_event2skb(event));
856		}
857	}
858
859	if (event) {
860		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
861		sctp_enqueue_event(ulpq, &temp);
862	}
863
864	return event_eor;
865}
866
867static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
868{
869	struct sctp_stream_in *csin, *sin = NULL;
870	struct sk_buff *first_frag = NULL;
871	struct sk_buff *last_frag = NULL;
872	struct sctp_ulpevent *retval;
873	struct sk_buff *pos;
874	__u32 next_fsn = 0;
875	__u16 sid = 0;
876
877	skb_queue_walk(&ulpq->reasm, pos) {
878		struct sctp_ulpevent *cevent = sctp_skb2event(pos);
879
880		csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
881		if (csin->pd_mode)
882			continue;
883
884		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
885		case SCTP_DATA_FIRST_FRAG:
886			if (first_frag)
887				goto out;
888			if (cevent->mid == csin->mid) {
889				first_frag = pos;
890				last_frag = pos;
891				next_fsn = 0;
892				sin = csin;
893				sid = cevent->stream;
894			}
895			break;
896		case SCTP_DATA_MIDDLE_FRAG:
897			if (!first_frag)
898				break;
899			if (cevent->stream == sid &&
900			    cevent->mid == sin->mid &&
901			    cevent->fsn == next_fsn) {
902				next_fsn++;
903				last_frag = pos;
904			} else {
905				goto out;
906			}
907			break;
908		case SCTP_DATA_LAST_FRAG:
909			if (first_frag)
910				goto out;
911			break;
912		default:
913			break;
914		}
915	}
916
917	if (!first_frag)
918		return NULL;
919
920out:
921	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
922					     &ulpq->reasm, first_frag,
923					     last_frag);
924	if (retval) {
925		sin->fsn = next_fsn;
926		sin->pd_mode = 1;
927	}
928
929	return retval;
930}
931
932static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
933{
934	struct sctp_ulpevent *event;
935	struct sk_buff_head temp;
936
937	if (!skb_queue_empty(&ulpq->reasm)) {
938		do {
939			event = sctp_intl_retrieve_first(ulpq);
940			if (event) {
941				skb_queue_head_init(&temp);
942				__skb_queue_tail(&temp, sctp_event2skb(event));
943				sctp_enqueue_event(ulpq, &temp);
944			}
945		} while (event);
946	}
947
948	if (!skb_queue_empty(&ulpq->reasm_uo)) {
949		do {
950			event = sctp_intl_retrieve_first_uo(ulpq);
951			if (event) {
952				skb_queue_head_init(&temp);
953				__skb_queue_tail(&temp, sctp_event2skb(event));
954				sctp_enqueue_event(ulpq, &temp);
955			}
956		} while (event);
957	}
958}
959
960static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
961			       gfp_t gfp)
962{
963	struct sctp_association *asoc = ulpq->asoc;
964	__u32 freed = 0;
965	__u16 needed;
966
967	needed = ntohs(chunk->chunk_hdr->length) -
968		 sizeof(struct sctp_idata_chunk);
969
970	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
971		freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
972		if (freed < needed)
973			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
974						       needed);
975		if (freed < needed)
976			freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
977						       needed);
978	}
979
980	if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
981		sctp_intl_start_pd(ulpq, gfp);
982
983	sk_mem_reclaim(asoc->base.sk);
984}
985
986static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
987				      __u32 mid, __u16 flags, gfp_t gfp)
988{
989	struct sock *sk = ulpq->asoc->base.sk;
990	struct sctp_ulpevent *ev = NULL;
991
992	if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
993					SCTP_PARTIAL_DELIVERY_EVENT))
994		return;
995
996	ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
997				      sid, mid, flags, gfp);
998	if (ev) {
999		struct sctp_sock *sp = sctp_sk(sk);
1000
1001		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1002
1003		if (!sp->data_ready_signalled) {
1004			sp->data_ready_signalled = 1;
1005			sk->sk_data_ready(sk);
1006		}
1007	}
1008}
1009
1010static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1011{
1012	struct sctp_stream *stream = &ulpq->asoc->stream;
1013	struct sctp_ulpevent *cevent, *event = NULL;
1014	struct sk_buff_head *lobby = &ulpq->lobby;
1015	struct sk_buff *pos, *tmp;
1016	struct sk_buff_head temp;
1017	__u16 csid;
1018	__u32 cmid;
1019
1020	skb_queue_head_init(&temp);
1021	sctp_skb_for_each(pos, lobby, tmp) {
1022		cevent = (struct sctp_ulpevent *)pos->cb;
1023		csid = cevent->stream;
1024		cmid = cevent->mid;
1025
1026		if (csid > sid)
1027			break;
1028
1029		if (csid < sid)
1030			continue;
1031
1032		if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1033			break;
1034
1035		__skb_unlink(pos, lobby);
1036		if (!event)
1037			event = sctp_skb2event(pos);
1038
1039		__skb_queue_tail(&temp, pos);
1040	}
1041
1042	if (!event && pos != (struct sk_buff *)lobby) {
1043		cevent = (struct sctp_ulpevent *)pos->cb;
1044		csid = cevent->stream;
1045		cmid = cevent->mid;
1046
1047		if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1048			sctp_mid_next(stream, in, csid);
1049			__skb_unlink(pos, lobby);
1050			__skb_queue_tail(&temp, pos);
1051			event = sctp_skb2event(pos);
1052		}
1053	}
1054
1055	if (event) {
1056		sctp_intl_retrieve_ordered(ulpq, event);
1057		sctp_enqueue_event(ulpq, &temp);
1058	}
1059}
1060
1061static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1062{
1063	struct sctp_stream *stream = &ulpq->asoc->stream;
1064	__u16 sid;
1065
1066	for (sid = 0; sid < stream->incnt; sid++) {
1067		struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1068		__u32 mid;
1069
1070		if (sin->pd_mode_uo) {
1071			sin->pd_mode_uo = 0;
1072
1073			mid = sin->mid_uo;
1074			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1075		}
1076
1077		if (sin->pd_mode) {
1078			sin->pd_mode = 0;
1079
1080			mid = sin->mid;
1081			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1082			sctp_mid_skip(stream, in, sid, mid);
1083
1084			sctp_intl_reap_ordered(ulpq, sid);
1085		}
1086	}
1087
1088	/* intl abort pd happens only when all data needs to be cleaned */
1089	sctp_ulpq_flush(ulpq);
1090}
1091
1092static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1093				    int nskips, __be16 stream, __u8 flags)
1094{
1095	int i;
1096
1097	for (i = 0; i < nskips; i++)
1098		if (skiplist[i].stream == stream &&
1099		    skiplist[i].flags == flags)
1100			return i;
1101
1102	return i;
1103}
1104
1105#define SCTP_FTSN_U_BIT	0x1
1106static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1107{
1108	struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1109	struct sctp_association *asoc = q->asoc;
1110	struct sctp_chunk *ftsn_chunk = NULL;
1111	struct list_head *lchunk, *temp;
1112	int nskips = 0, skip_pos;
1113	struct sctp_chunk *chunk;
1114	__u32 tsn;
1115
1116	if (!asoc->peer.prsctp_capable)
1117		return;
1118
1119	if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1120		asoc->adv_peer_ack_point = ctsn;
1121
1122	list_for_each_safe(lchunk, temp, &q->abandoned) {
1123		chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1124		tsn = ntohl(chunk->subh.data_hdr->tsn);
1125
1126		if (TSN_lte(tsn, ctsn)) {
1127			list_del_init(lchunk);
1128			sctp_chunk_free(chunk);
1129		} else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1130			__be16 sid = chunk->subh.idata_hdr->stream;
1131			__be32 mid = chunk->subh.idata_hdr->mid;
1132			__u8 flags = 0;
1133
1134			if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1135				flags |= SCTP_FTSN_U_BIT;
1136
1137			asoc->adv_peer_ack_point = tsn;
1138			skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1139						     sid, flags);
1140			ftsn_skip_arr[skip_pos].stream = sid;
1141			ftsn_skip_arr[skip_pos].reserved = 0;
1142			ftsn_skip_arr[skip_pos].flags = flags;
1143			ftsn_skip_arr[skip_pos].mid = mid;
1144			if (skip_pos == nskips)
1145				nskips++;
1146			if (nskips == 10)
1147				break;
1148		} else {
1149			break;
1150		}
1151	}
1152
1153	if (asoc->adv_peer_ack_point > ctsn)
1154		ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1155					       nskips, &ftsn_skip_arr[0]);
1156
1157	if (ftsn_chunk) {
1158		list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1159		SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1160	}
1161}
1162
1163#define _sctp_walk_ifwdtsn(pos, chunk, end) \
1164	for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1165	     (void *)pos <= (void *)chunk->subh.ifwdtsn_hdr->skip + (end) - \
1166			    sizeof(struct sctp_ifwdtsn_skip); pos++)
1167
1168#define sctp_walk_ifwdtsn(pos, ch) \
1169	_sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1170					sizeof(struct sctp_ifwdtsn_chunk))
1171
1172static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1173{
1174	struct sctp_fwdtsn_skip *skip;
1175	__u16 incnt;
1176
1177	if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1178		return false;
1179
1180	incnt = chunk->asoc->stream.incnt;
1181	sctp_walk_fwdtsn(skip, chunk)
1182		if (ntohs(skip->stream) >= incnt)
1183			return false;
1184
1185	return true;
1186}
1187
1188static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1189{
1190	struct sctp_ifwdtsn_skip *skip;
1191	__u16 incnt;
1192
1193	if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1194		return false;
1195
1196	incnt = chunk->asoc->stream.incnt;
1197	sctp_walk_ifwdtsn(skip, chunk)
1198		if (ntohs(skip->stream) >= incnt)
1199			return false;
1200
1201	return true;
1202}
1203
1204static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1205{
1206	/* Move the Cumulattive TSN Ack ahead. */
1207	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1208	/* purge the fragmentation queue */
1209	sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1210	/* Abort any in progress partial delivery. */
1211	sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1212}
1213
1214static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1215{
1216	struct sk_buff *pos, *tmp;
1217
1218	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1219		struct sctp_ulpevent *event = sctp_skb2event(pos);
1220		__u32 tsn = event->tsn;
1221
1222		if (TSN_lte(tsn, ftsn)) {
1223			__skb_unlink(pos, &ulpq->reasm);
1224			sctp_ulpevent_free(event);
1225		}
1226	}
1227
1228	skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1229		struct sctp_ulpevent *event = sctp_skb2event(pos);
1230		__u32 tsn = event->tsn;
1231
1232		if (TSN_lte(tsn, ftsn)) {
1233			__skb_unlink(pos, &ulpq->reasm_uo);
1234			sctp_ulpevent_free(event);
1235		}
1236	}
1237}
1238
1239static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1240{
1241	/* Move the Cumulattive TSN Ack ahead. */
1242	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1243	/* purge the fragmentation queue */
1244	sctp_intl_reasm_flushtsn(ulpq, ftsn);
1245	/* abort only when it's for all data */
1246	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1247		sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1248}
1249
1250static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1251{
1252	struct sctp_fwdtsn_skip *skip;
1253
1254	/* Walk through all the skipped SSNs */
1255	sctp_walk_fwdtsn(skip, chunk)
1256		sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1257}
1258
1259static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1260			   __u8 flags)
1261{
1262	struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1263	struct sctp_stream *stream  = &ulpq->asoc->stream;
1264
1265	if (flags & SCTP_FTSN_U_BIT) {
1266		if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1267			sin->pd_mode_uo = 0;
1268			sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1269						  GFP_ATOMIC);
1270		}
1271		return;
1272	}
1273
1274	if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1275		return;
1276
1277	if (sin->pd_mode) {
1278		sin->pd_mode = 0;
1279		sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1280	}
1281
1282	sctp_mid_skip(stream, in, sid, mid);
1283
1284	sctp_intl_reap_ordered(ulpq, sid);
1285}
1286
1287static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1288{
1289	struct sctp_ifwdtsn_skip *skip;
1290
1291	/* Walk through all the skipped MIDs and abort stream pd if possible */
1292	sctp_walk_ifwdtsn(skip, chunk)
1293		sctp_intl_skip(ulpq, ntohs(skip->stream),
1294			       ntohl(skip->mid), skip->flags);
1295}
1296
1297static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1298{
1299	struct sk_buff_head temp;
1300
1301	skb_queue_head_init(&temp);
1302	__skb_queue_tail(&temp, sctp_event2skb(event));
1303	return sctp_ulpq_tail_event(ulpq, &temp);
1304}
1305
1306static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1307	.data_chunk_len		= sizeof(struct sctp_data_chunk),
1308	.ftsn_chunk_len		= sizeof(struct sctp_fwdtsn_chunk),
1309	/* DATA process functions */
1310	.make_datafrag		= sctp_make_datafrag_empty,
1311	.assign_number		= sctp_chunk_assign_ssn,
1312	.validate_data		= sctp_validate_data,
1313	.ulpevent_data		= sctp_ulpq_tail_data,
1314	.enqueue_event		= do_ulpq_tail_event,
1315	.renege_events		= sctp_ulpq_renege,
1316	.start_pd		= sctp_ulpq_partial_delivery,
1317	.abort_pd		= sctp_ulpq_abort_pd,
1318	/* FORWARD-TSN process functions */
1319	.generate_ftsn		= sctp_generate_fwdtsn,
1320	.validate_ftsn		= sctp_validate_fwdtsn,
1321	.report_ftsn		= sctp_report_fwdtsn,
1322	.handle_ftsn		= sctp_handle_fwdtsn,
1323};
1324
1325static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1326				 struct sctp_ulpevent *event)
1327{
1328	struct sk_buff_head temp;
1329
1330	skb_queue_head_init(&temp);
1331	__skb_queue_tail(&temp, sctp_event2skb(event));
1332	return sctp_enqueue_event(ulpq, &temp);
1333}
1334
1335static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1336	.data_chunk_len		= sizeof(struct sctp_idata_chunk),
1337	.ftsn_chunk_len		= sizeof(struct sctp_ifwdtsn_chunk),
1338	/* I-DATA process functions */
1339	.make_datafrag		= sctp_make_idatafrag_empty,
1340	.assign_number		= sctp_chunk_assign_mid,
1341	.validate_data		= sctp_validate_idata,
1342	.ulpevent_data		= sctp_ulpevent_idata,
1343	.enqueue_event		= do_sctp_enqueue_event,
1344	.renege_events		= sctp_renege_events,
1345	.start_pd		= sctp_intl_start_pd,
1346	.abort_pd		= sctp_intl_abort_pd,
1347	/* I-FORWARD-TSN process functions */
1348	.generate_ftsn		= sctp_generate_iftsn,
1349	.validate_ftsn		= sctp_validate_iftsn,
1350	.report_ftsn		= sctp_report_iftsn,
1351	.handle_ftsn		= sctp_handle_iftsn,
1352};
1353
1354void sctp_stream_interleave_init(struct sctp_stream *stream)
1355{
1356	struct sctp_association *asoc;
1357
1358	asoc = container_of(stream, struct sctp_association, stream);
1359	stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
1360					     : &sctp_stream_interleave_0;
1361}
1362