Lines Matching defs:ulpq
124 static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
130 pos = skb_peek_tail(&ulpq->reasm);
132 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
143 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
150 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
155 skb_queue_walk(&ulpq->reasm, pos) {
175 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
177 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
181 struct sctp_ulpq *ulpq,
192 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
194 skb_queue_walk(&ulpq->reasm, pos) {
244 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
258 struct sctp_ulpq *ulpq,
261 struct sctp_association *asoc = ulpq->asoc;
272 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
274 skb_queue_walk(&ulpq->reasm, pos) {
329 &ulpq->reasm,
339 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
348 static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
359 sctp_intl_store_reasm(ulpq, event);
361 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
364 retval = sctp_intl_retrieve_partial(ulpq, event);
367 retval = sctp_intl_retrieve_reassembled(ulpq, event);
372 static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
378 pos = skb_peek_tail(&ulpq->lobby);
380 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
387 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
392 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
397 skb_queue_walk(&ulpq->lobby, pos) {
412 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
414 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
417 static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
425 stream = &ulpq->asoc->stream;
428 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
442 __skb_unlink(pos, &ulpq->lobby);
448 static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
454 stream = &ulpq->asoc->stream;
458 sctp_intl_store_ordered(ulpq, event);
464 sctp_intl_retrieve_ordered(ulpq, event);
469 static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
472 struct sock *sk = ulpq->asoc->base.sk;
490 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
515 static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
521 pos = skb_peek_tail(&ulpq->reasm_uo);
523 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
534 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
541 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
545 skb_queue_walk(&ulpq->reasm_uo, pos) {
561 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
565 struct sctp_ulpq *ulpq,
576 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
578 skb_queue_walk(&ulpq->reasm_uo, pos) {
631 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
632 &ulpq->reasm_uo, first_frag,
646 struct sctp_ulpq *ulpq,
649 struct sctp_association *asoc = ulpq->asoc;
660 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
662 skb_queue_walk(&ulpq->reasm_uo, pos) {
718 &ulpq->reasm_uo,
728 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
737 static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
748 sctp_intl_store_reasm_uo(ulpq, event);
750 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
753 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
756 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
761 static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
771 skb_queue_walk(&ulpq->reasm_uo, pos) {
774 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
814 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
815 &ulpq->reasm_uo, first_frag,
825 static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
843 event = sctp_intl_reasm(ulpq, event);
849 event = sctp_intl_order(ulpq, event);
852 event = sctp_intl_reasm_uo(ulpq, event);
861 sctp_enqueue_event(ulpq, &temp);
867 static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
877 skb_queue_walk(&ulpq->reasm, pos) {
880 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
921 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
922 &ulpq->reasm, first_frag,
932 static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
937 if (!skb_queue_empty(&ulpq->reasm)) {
939 event = sctp_intl_retrieve_first(ulpq);
943 sctp_enqueue_event(ulpq, &temp);
948 if (!skb_queue_empty(&ulpq->reasm_uo)) {
950 event = sctp_intl_retrieve_first_uo(ulpq);
954 sctp_enqueue_event(ulpq, &temp);
960 static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
963 struct sctp_association *asoc = ulpq->asoc;
971 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
973 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
976 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
980 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
981 sctp_intl_start_pd(ulpq, gfp);
986 static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
989 struct sock *sk = ulpq->asoc->base.sk;
992 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
996 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
1010 static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1012 struct sctp_stream *stream = &ulpq->asoc->stream;
1014 struct sk_buff_head *lobby = &ulpq->lobby;
1056 sctp_intl_retrieve_ordered(ulpq, event);
1057 sctp_enqueue_event(ulpq, &temp);
1061 static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1063 struct sctp_stream *stream = &ulpq->asoc->stream;
1074 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1081 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1084 sctp_intl_reap_ordered(ulpq, sid);
1089 sctp_ulpq_flush(ulpq);
1204 static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1207 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1209 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1211 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1214 static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1218 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1223 __skb_unlink(pos, &ulpq->reasm);
1228 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1233 __skb_unlink(pos, &ulpq->reasm_uo);
1239 static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1242 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1244 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1246 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1247 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1250 static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1256 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1259 static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1262 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1263 struct sctp_stream *stream = &ulpq->asoc->stream;
1268 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1279 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1284 sctp_intl_reap_ordered(ulpq, sid);
1287 static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1293 sctp_intl_skip(ulpq, ntohs(skip->stream),
1297 static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1303 return sctp_ulpq_tail_event(ulpq, &temp);
1325 static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1332 return sctp_enqueue_event(ulpq, &temp);