Lines Matching refs:asoc
44 struct sctp_association *asoc,
53 struct sctp_association **asoc,
64 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
69 asoc->last_ecne_tsn = lowest_tsn;
70 asoc->need_ecne = 1;
85 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
101 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
107 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
113 asoc->last_cwr_tsn = lowest_tsn;
119 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
128 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
134 asoc->need_ecne = 0;
138 static int sctp_gen_sack(struct sctp_association *asoc, int force,
141 struct sctp_transport *trans = asoc->peer.last_data_from;
147 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
149 asoc->peer.sack_needed = 1;
151 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
152 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
165 asoc->peer.sack_needed = 1;
175 if (!asoc->peer.sack_needed) {
176 asoc->peer.sack_cnt++;
185 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
186 asoc->peer.sack_needed = 1;
188 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
192 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
193 asoc->peer.sack_needed = 1;
195 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
196 asoc->sackdelay;
203 __u32 old_a_rwnd = asoc->a_rwnd;
205 asoc->a_rwnd = asoc->rwnd;
206 sack = sctp_make_sack(asoc);
208 asoc->a_rwnd = old_a_rwnd;
212 asoc->peer.sack_needed = 0;
213 asoc->peer.sack_cnt = 0;
235 struct sctp_association *asoc = transport->asoc;
236 struct sock *sk = asoc->base.sk;
255 asoc->state,
256 asoc->ep, asoc,
270 static void sctp_generate_timeout_event(struct sctp_association *asoc,
273 struct sock *sk = asoc->base.sk;
283 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
284 sctp_association_hold(asoc);
291 if (asoc->base.dead)
297 asoc->state, asoc->ep, asoc,
305 sctp_association_put(asoc);
310 struct sctp_association *asoc =
311 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
313 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
318 struct sctp_association *asoc =
319 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
321 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
326 struct sctp_association *asoc =
327 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
329 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
334 struct sctp_association *asoc =
335 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
337 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
342 struct sctp_association *asoc =
343 from_timer(asoc, t,
346 sctp_generate_timeout_event(asoc,
353 struct sctp_association *asoc =
354 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
356 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
365 struct sctp_association *asoc = transport->asoc;
366 struct sock *sk = asoc->base.sk;
393 asoc->state, asoc->ep, asoc,
411 struct sctp_association *asoc = transport->asoc;
412 struct sock *sk = asoc->base.sk;
429 if (asoc->base.dead)
434 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
446 struct sctp_association *asoc = transport->asoc;
447 struct sock *sk = asoc->base.sk;
462 if (!asoc->strreset_chunk)
467 asoc->state, asoc->ep, asoc,
481 struct sctp_association *asoc =
482 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
484 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
519 struct sctp_association *asoc,
534 asoc->overall_error_count++;
539 asoc->overall_error_count++;
549 if (asoc->base.net->sctp.pf_enable &&
554 sctp_assoc_control_transport(asoc, transport,
565 __func__, asoc, &transport->ipaddr.sa);
567 sctp_assoc_control_transport(asoc, transport,
573 asoc->peer.primary_path == transport &&
574 asoc->peer.active_path != transport)
575 sctp_assoc_set_primary(asoc, asoc->peer.active_path);
587 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
588 sctp_max_rto(asoc, transport);
594 struct sctp_association *asoc,
599 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
611 asoc->outqueue.error = error;
617 struct sctp_association *asoc,
627 asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
630 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
634 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
641 if (asoc->overall_error_count >= asoc->max_retrans) {
642 abort = sctp_make_violation_max_retrans(asoc, chunk);
652 asoc->outqueue.error = error;
662 struct sctp_association *asoc,
674 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
684 struct sctp_association *asoc)
692 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
697 struct sctp_association *asoc)
703 list_for_each_entry(t, &asoc->peer.transport_addr_list,
712 struct sctp_association *asoc)
716 list_for_each_entry(t, &asoc->peer.transport_addr_list,
726 struct sctp_association *asoc,
747 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
748 t->asoc->overall_error_count = 0;
760 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
765 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
790 if (was_unconfirmed && asoc->peer.transport_count == 1)
797 struct sctp_association *asoc,
802 if (sctp_outq_sack(&asoc->outqueue, chunk)) {
804 err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER,
806 asoc->state, asoc->ep, asoc, NULL,
817 struct sctp_association *asoc,
825 t = sctp_assoc_choose_alter_transport(asoc,
826 asoc->shutdown_last_sent_to);
829 asoc->shutdown_last_sent_to = t;
830 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
834 struct sctp_association *asoc,
837 struct net *net = asoc->base.net;
840 if (!sctp_assoc_update(asoc, new))
843 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
857 struct sctp_association *asoc,
860 struct sock *sk = asoc->base.sk;
862 asoc->state = state;
864 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
870 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
874 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
881 if (sctp_state(asoc, COOKIE_WAIT)) {
885 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
886 asoc->rto_initial;
887 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
888 asoc->rto_initial;
891 if (sctp_state(asoc, ESTABLISHED)) {
892 kfree(asoc->peer.cookie);
893 asoc->peer.cookie = NULL;
896 if (sctp_state(asoc, ESTABLISHED) ||
897 sctp_state(asoc, CLOSED) ||
898 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
899 /* Wake up any processes waiting in the asoc's wait queue in
902 if (waitqueue_active(&asoc->wait))
903 wake_up_interruptible(&asoc->wait);
915 if (sctp_state(asoc, SHUTDOWN_PENDING) &&
916 !sctp_outq_is_empty(&asoc->outqueue))
917 sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
922 struct sctp_association *asoc)
924 struct sock *sk = asoc->base.sk;
931 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
934 sctp_association_free(asoc);
944 struct sctp_association *asoc,
949 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
950 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
956 struct sctp_association *asoc,
965 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
970 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
987 if (asoc->peer.asconf_capable == 0)
990 asoc->peer.asconf_capable = 0;
1008 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
1014 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1017 &asoc->peer.primary_addr)) {
1018 sctp_assoc_rm_peer(asoc, t);
1024 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
1026 struct sock *sk = asoc->base.sk;
1034 struct sctp_association *asoc,
1039 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
1040 asoc->c.sinit_num_ostreams,
1041 asoc->c.sinit_max_instreams,
1044 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1048 struct sctp_association *asoc)
1052 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
1054 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1059 struct sctp_association *asoc)
1063 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1066 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1070 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1076 t = asoc->init_last_sent_to;
1077 asoc->init_err_counter++;
1079 if (t->init_sent_count > (asoc->init_cycle + 1)) {
1080 asoc->timeouts[timer] *= 2;
1081 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1082 asoc->timeouts[timer] = asoc->max_init_timeo;
1084 asoc->init_cycle++;
1088 asoc->init_err_counter, asoc->init_cycle,
1089 asoc->timeouts[timer]);
1098 static void sctp_cmd_send_msg(struct sctp_association *asoc,
1104 sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1106 asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
1115 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1117 asoc, sctp_state_tbl[state], state_fn->name)
1120 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1124 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1125 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1126 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1136 struct sctp_endpoint *ep, struct sctp_association *asoc,
1157 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1161 ep, &asoc, event_arg, status,
1175 struct sctp_association **asoc,
1190 ep, *asoc,
1215 *asoc = NULL;
1264 struct sctp_association *asoc,
1301 sctp_outq_uncork(&asoc->outqueue, gfp);
1306 asoc = cmd->obj.asoc;
1307 BUG_ON(asoc->peer.primary_path == NULL);
1308 sctp_endpoint_add_asoc(ep, asoc);
1312 sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
1316 sctp_outq_teardown(&asoc->outqueue);
1321 sctp_outq_uncork(&asoc->outqueue, gfp);
1325 sctp_cmd_delete_tcb(commands, asoc);
1326 asoc = NULL;
1331 sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1336 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1341 asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
1345 asoc->stream.si->handle_ftsn(&asoc->ulpq,
1356 error = sctp_gen_sack(asoc, force, commands);
1361 error = sctp_cmd_process_sack(commands, asoc,
1367 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1384 error = sctp_cmd_process_init(commands, asoc, chunk,
1390 new_obj = sctp_make_cookie_echo(asoc, chunk);
1409 asoc->init_last_sent_to = new_obj->transport;
1415 * command plays with asoc->peer.retran_path to
1424 if ((asoc->peer.retran_path !=
1425 asoc->peer.primary_path) &&
1426 (asoc->init_err_counter > 0)) {
1438 asoc->overall_error_count = 0;
1441 new_obj = sctp_make_shutdown(asoc, chunk);
1453 __func__, cmd->obj.chunk, &asoc->ulpq);
1455 asoc->stream.si->ulpevent_data(&asoc->ulpq,
1463 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1465 asoc->stream.si->enqueue_event(&asoc->ulpq,
1471 if (!asoc->outqueue.cork) {
1472 sctp_outq_cork(&asoc->outqueue);
1476 sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
1488 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1494 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1500 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1505 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1514 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1518 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1522 timer = &asoc->timers[cmd->obj.to];
1529 timer = &asoc->timers[cmd->obj.to];
1530 timeout = asoc->timeouts[cmd->obj.to];
1542 sctp_association_hold(asoc);
1547 timer = &asoc->timers[cmd->obj.to];
1548 timeout = asoc->timeouts[cmd->obj.to];
1550 sctp_association_hold(asoc);
1554 timer = &asoc->timers[cmd->obj.to];
1556 sctp_association_put(asoc);
1561 t = sctp_assoc_choose_alter_transport(asoc,
1562 asoc->init_last_sent_to);
1563 asoc->init_last_sent_to = t;
1567 sctp_assoc_set_primary(asoc, t);
1577 sctp_cmd_t1_timer_update(asoc,
1592 sctp_cmd_t1_timer_update(asoc,
1599 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1601 sctp_retransmit_mark(&asoc->outqueue, t,
1611 sctp_cmd_init_failed(commands, asoc, cmd->obj.u16);
1615 sctp_cmd_assoc_failed(commands, asoc, event_type,
1620 asoc->init_err_counter++;
1624 asoc->init_err_counter = 0;
1625 asoc->init_cycle = 0;
1626 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1633 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1643 sctp_do_8_2_transport_strike(commands, asoc,
1654 sctp_do_8_2_transport_strike(commands, asoc,
1661 sctp_cmd_transport_on(commands, asoc, t, chunk);
1665 sctp_cmd_hb_timers_start(commands, asoc);
1674 sctp_cmd_hb_timers_stop(commands, asoc);
1684 sackh.a_rwnd = htonl(asoc->peer.rwnd +
1685 asoc->outqueue.outstanding_bytes);
1699 if (asoc) {
1700 sctp_outq_uncork(&asoc->outqueue, gfp);
1711 asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
1715 asoc->stream.si->renege_events(&asoc->ulpq,
1721 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1725 sctp_cmd_process_operr(commands, asoc, chunk);
1728 asoc->peer.i.init_tag = 0;
1731 sctp_cmd_del_non_primary(asoc);
1734 sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1737 t = asoc->peer.retran_path;
1738 asoc->peer.retran_path = asoc->peer.primary_path;
1739 sctp_outq_uncork(&asoc->outqueue, gfp);
1741 asoc->peer.retran_path = t;
1744 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1747 sctp_cmd_assoc_change(commands, asoc,
1751 sctp_cmd_adaptation_ind(commands, asoc);
1754 sctp_cmd_peer_no_auth(commands, asoc);
1758 error = sctp_auth_asoc_init_active_key(asoc,
1762 asoc->peer.i.init_tag = cmd->obj.u32;
1765 if (!asoc->outqueue.cork) {
1766 sctp_outq_cork(&asoc->outqueue);
1769 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1772 sctp_asconf_queue_teardown(asoc);
1776 if (asoc && local_cork) {
1777 sctp_outq_uncork(&asoc->outqueue, gfp);
1780 asoc = cmd->obj.asoc;
1804 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1806 sctp_outq_uncork(&asoc->outqueue, gfp);
1808 sctp_outq_uncork(&asoc->outqueue, gfp);