Lines Matching defs:ulpq
32 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
36 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
41 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
44 memset(ulpq, 0, sizeof(struct sctp_ulpq));
46 ulpq->asoc = asoc;
47 skb_queue_head_init(&ulpq->reasm);
48 skb_queue_head_init(&ulpq->reasm_uo);
49 skb_queue_head_init(&ulpq->lobby);
50 ulpq->pd_mode = 0;
52 return ulpq;
57 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
62 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
67 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
72 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
79 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
81 sctp_ulpq_flush(ulpq);
85 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
101 event = sctp_ulpq_reasm(ulpq, event);
110 event = sctp_ulpq_order(ulpq, event);
118 sctp_ulpq_tail_event(ulpq, &temp);
165 /* Set the pd_mode on the socket and ulpq */
166 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
168 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
171 ulpq->pd_mode = 1;
175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
177 ulpq->pd_mode = 0;
178 sctp_ulpq_reasm_drain(ulpq);
179 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
182 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
184 struct sock *sk = ulpq->asoc->base.sk;
207 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
218 if (ulpq->pd_mode) {
252 sctp_ulpq_clear_pd(ulpq);
273 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
283 pos = skb_peek_tail(&ulpq->reasm);
285 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
293 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
298 skb_queue_walk(&ulpq->reasm, pos) {
307 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
399 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
431 skb_queue_walk(&ulpq->reasm, pos) {
441 if (skb_queue_is_first(&ulpq->reasm, pos)) {
475 asoc = ulpq->asoc;
490 &ulpq->reasm,
493 sctp_ulpq_set_pd(ulpq);
499 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
500 &ulpq->reasm, first_frag, pos);
507 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
520 if (skb_queue_empty(&ulpq->reasm))
528 skb_queue_walk(&ulpq->reasm, pos) {
565 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
577 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
588 sctp_ulpq_store_reasm(ulpq, event);
589 if (!ulpq->pd_mode)
590 retval = sctp_ulpq_retrieve_reassembled(ulpq);
598 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
600 retval = sctp_ulpq_retrieve_partial(ulpq);
607 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
619 if (skb_queue_empty(&ulpq->reasm))
626 skb_queue_walk(&ulpq->reasm, pos) {
666 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
685 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
691 if (skb_queue_empty(&ulpq->reasm))
694 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
704 __skb_unlink(pos, &ulpq->reasm);
716 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
720 if (skb_queue_empty(&ulpq->reasm))
723 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
731 event = sctp_ulpq_order(ulpq, event);
737 sctp_ulpq_tail_event(ulpq, &temp);
745 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
755 stream = &ulpq->asoc->stream;
760 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
779 __skb_unlink(pos, &ulpq->lobby);
787 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
795 pos = skb_peek_tail(&ulpq->lobby);
797 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
808 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
813 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
820 skb_queue_walk(&ulpq->lobby, pos) {
833 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
836 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
849 stream = &ulpq->asoc->stream;
856 sctp_ulpq_store_ordered(ulpq, event);
866 sctp_ulpq_retrieve_ordered(ulpq, event);
874 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
881 struct sk_buff_head *lobby = &ulpq->lobby;
884 stream = &ulpq->asoc->stream;
936 sctp_ulpq_retrieve_ordered(ulpq, event);
937 sctp_ulpq_tail_event(ulpq, &temp);
944 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
949 stream = &ulpq->asoc->stream;
961 sctp_ulpq_reap_ordered(ulpq, sid);
964 __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
973 tsnmap = &ulpq->asoc->peer.tsn_map;
1013 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1015 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1019 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1021 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1025 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1034 asoc = ulpq->asoc;
1040 if (ulpq->pd_mode)
1046 skb = skb_peek(&asoc->ulpq.reasm);
1060 event = sctp_ulpq_retrieve_first(ulpq);
1067 sctp_ulpq_tail_event(ulpq, &temp);
1068 sctp_ulpq_set_pd(ulpq);
1075 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1078 struct sctp_association *asoc = ulpq->asoc;
1086 freed = sctp_ulpq_renege_order(ulpq, needed);
1088 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1093 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1099 sctp_ulpq_partial_delivery(ulpq, gfp);
1101 sctp_ulpq_reasm_drain(ulpq);
1112 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1118 if (!ulpq->pd_mode)
1121 sk = ulpq->asoc->base.sk;
1123 if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1125 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1132 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {