1// SPDX-License-Identifier: GPL-2.0-or-later 2/* SCTP kernel implementation 3 * (C) Copyright IBM Corp. 2001, 2004 4 * Copyright (c) 1999 Cisco, Inc. 5 * Copyright (c) 1999-2001 Motorola, Inc. 6 * 7 * This file is part of the SCTP kernel implementation 8 * 9 * These functions work with the state functions in sctp_sm_statefuns.c 10 * to implement that state operations. These functions implement the 11 * steps which require modifying existing data structures. 12 * 13 * Please send any bug reports or fixes you make to the 14 * email address(es): 15 * lksctp developers <linux-sctp@vger.kernel.org> 16 * 17 * Written or modified by: 18 * La Monte H.P. Yarroll <piggy@acm.org> 19 * Karl Knutson <karl@athena.chicago.il.us> 20 * Jon Grimm <jgrimm@austin.ibm.com> 21 * Hui Huang <hui.huang@nokia.com> 22 * Dajiang Zhang <dajiang.zhang@nokia.com> 23 * Daisy Chang <daisyc@us.ibm.com> 24 * Sridhar Samudrala <sri@us.ibm.com> 25 * Ardelle Fan <ardelle.fan@intel.com> 26 */ 27 28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 29 30#include <linux/skbuff.h> 31#include <linux/types.h> 32#include <linux/socket.h> 33#include <linux/ip.h> 34#include <linux/gfp.h> 35#include <net/sock.h> 36#include <net/sctp/sctp.h> 37#include <net/sctp/sm.h> 38#include <net/sctp/stream_sched.h> 39 40static int sctp_cmd_interpreter(enum sctp_event_type event_type, 41 union sctp_subtype subtype, 42 enum sctp_state state, 43 struct sctp_endpoint *ep, 44 struct sctp_association *asoc, 45 void *event_arg, 46 enum sctp_disposition status, 47 struct sctp_cmd_seq *commands, 48 gfp_t gfp); 49static int sctp_side_effects(enum sctp_event_type event_type, 50 union sctp_subtype subtype, 51 enum sctp_state state, 52 struct sctp_endpoint *ep, 53 struct sctp_association **asoc, 54 void *event_arg, 55 enum sctp_disposition status, 56 struct sctp_cmd_seq *commands, 57 gfp_t gfp); 58 59/******************************************************************** 60 * Helper functions 61 ********************************************************************/ 62 63/* A helper function for delayed processing of INET ECN CE bit. */ 64static void sctp_do_ecn_ce_work(struct sctp_association *asoc, 65 __u32 lowest_tsn) 66{ 67 /* Save the TSN away for comparison when we receive CWR */ 68 69 asoc->last_ecne_tsn = lowest_tsn; 70 asoc->need_ecne = 1; 71} 72 73/* Helper function for delayed processing of SCTP ECNE chunk. */ 74/* RFC 2960 Appendix A 75 * 76 * RFC 2481 details a specific bit for a sender to send in 77 * the header of its next outbound TCP segment to indicate to 78 * its peer that it has reduced its congestion window. This 79 * is termed the CWR bit. For SCTP the same indication is made 80 * by including the CWR chunk. This chunk contains one data 81 * element, i.e. the TSN number that was sent in the ECNE chunk. 82 * This element represents the lowest TSN number in the datagram 83 * that was originally marked with the CE bit. 84 */ 85static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, 86 __u32 lowest_tsn, 87 struct sctp_chunk *chunk) 88{ 89 struct sctp_chunk *repl; 90 91 /* Our previously transmitted packet ran into some congestion 92 * so we should take action by reducing cwnd and ssthresh 93 * and then ACK our peer that we we've done so by 94 * sending a CWR. 95 */ 96 97 /* First, try to determine if we want to actually lower 98 * our cwnd variables. Only lower them if the ECNE looks more 99 * recent than the last response. 100 */ 101 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { 102 struct sctp_transport *transport; 103 104 /* Find which transport's congestion variables 105 * need to be adjusted. 106 */ 107 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); 108 109 /* Update the congestion variables. */ 110 if (transport) 111 sctp_transport_lower_cwnd(transport, 112 SCTP_LOWER_CWND_ECNE); 113 asoc->last_cwr_tsn = lowest_tsn; 114 } 115 116 /* Always try to quiet the other end. In case of lost CWR, 117 * resend last_cwr_tsn. 118 */ 119 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); 120 121 /* If we run out of memory, it will look like a lost CWR. We'll 122 * get back in sync eventually. 123 */ 124 return repl; 125} 126 127/* Helper function to do delayed processing of ECN CWR chunk. */ 128static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, 129 __u32 lowest_tsn) 130{ 131 /* Turn off ECNE getting auto-prepended to every outgoing 132 * packet 133 */ 134 asoc->need_ecne = 0; 135} 136 137/* Generate SACK if necessary. We call this at the end of a packet. */ 138static int sctp_gen_sack(struct sctp_association *asoc, int force, 139 struct sctp_cmd_seq *commands) 140{ 141 struct sctp_transport *trans = asoc->peer.last_data_from; 142 __u32 ctsn, max_tsn_seen; 143 struct sctp_chunk *sack; 144 int error = 0; 145 146 if (force || 147 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) || 148 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE))) 149 asoc->peer.sack_needed = 1; 150 151 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); 152 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); 153 154 /* From 12.2 Parameters necessary per association (i.e. the TCB): 155 * 156 * Ack State : This flag indicates if the next received packet 157 * : is to be responded to with a SACK. ... 158 * : When DATA chunks are out of order, SACK's 159 * : are not delayed (see Section 6). 160 * 161 * [This is actually not mentioned in Section 6, but we 162 * implement it here anyway. --piggy] 163 */ 164 if (max_tsn_seen != ctsn) 165 asoc->peer.sack_needed = 1; 166 167 /* From 6.2 Acknowledgement on Reception of DATA Chunks: 168 * 169 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, 170 * an acknowledgement SHOULD be generated for at least every 171 * second packet (not every second DATA chunk) received, and 172 * SHOULD be generated within 200 ms of the arrival of any 173 * unacknowledged DATA chunk. ... 174 */ 175 if (!asoc->peer.sack_needed) { 176 asoc->peer.sack_cnt++; 177 178 /* Set the SACK delay timeout based on the 179 * SACK delay for the last transport 180 * data was received from, or the default 181 * for the association. 182 */ 183 if (trans) { 184 /* We will need a SACK for the next packet. */ 185 if (asoc->peer.sack_cnt >= trans->sackfreq - 1) 186 asoc->peer.sack_needed = 1; 187 188 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 189 trans->sackdelay; 190 } else { 191 /* We will need a SACK for the next packet. */ 192 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1) 193 asoc->peer.sack_needed = 1; 194 195 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 196 asoc->sackdelay; 197 } 198 199 /* Restart the SACK timer. */ 200 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 201 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 202 } else { 203 __u32 old_a_rwnd = asoc->a_rwnd; 204 205 asoc->a_rwnd = asoc->rwnd; 206 sack = sctp_make_sack(asoc); 207 if (!sack) { 208 asoc->a_rwnd = old_a_rwnd; 209 goto nomem; 210 } 211 212 asoc->peer.sack_needed = 0; 213 asoc->peer.sack_cnt = 0; 214 215 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack)); 216 217 /* Stop the SACK timer. */ 218 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, 219 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); 220 } 221 222 return error; 223nomem: 224 error = -ENOMEM; 225 return error; 226} 227 228/* When the T3-RTX timer expires, it calls this function to create the 229 * relevant state machine event. 230 */ 231void sctp_generate_t3_rtx_event(struct timer_list *t) 232{ 233 struct sctp_transport *transport = 234 from_timer(transport, t, T3_rtx_timer); 235 struct sctp_association *asoc = transport->asoc; 236 struct sock *sk = asoc->base.sk; 237 struct net *net = sock_net(sk); 238 int error; 239 240 /* Check whether a task is in the sock. */ 241 242 bh_lock_sock(sk); 243 if (sock_owned_by_user(sk)) { 244 pr_debug("%s: sock is busy\n", __func__); 245 246 /* Try again later. */ 247 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) 248 sctp_transport_hold(transport); 249 goto out_unlock; 250 } 251 252 /* Run through the state machine. */ 253 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 254 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), 255 asoc->state, 256 asoc->ep, asoc, 257 transport, GFP_ATOMIC); 258 259 if (error) 260 sk->sk_err = -error; 261 262out_unlock: 263 bh_unlock_sock(sk); 264 sctp_transport_put(transport); 265} 266 267/* This is a sa interface for producing timeout events. It works 268 * for timeouts which use the association as their parameter. 269 */ 270static void sctp_generate_timeout_event(struct sctp_association *asoc, 271 enum sctp_event_timeout timeout_type) 272{ 273 struct sock *sk = asoc->base.sk; 274 struct net *net = sock_net(sk); 275 int error = 0; 276 277 bh_lock_sock(sk); 278 if (sock_owned_by_user(sk)) { 279 pr_debug("%s: sock is busy: timer %d\n", __func__, 280 timeout_type); 281 282 /* Try again later. */ 283 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) 284 sctp_association_hold(asoc); 285 goto out_unlock; 286 } 287 288 /* Is this association really dead and just waiting around for 289 * the timer to let go of the reference? 290 */ 291 if (asoc->base.dead) 292 goto out_unlock; 293 294 /* Run through the state machine. */ 295 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 296 SCTP_ST_TIMEOUT(timeout_type), 297 asoc->state, asoc->ep, asoc, 298 (void *)timeout_type, GFP_ATOMIC); 299 300 if (error) 301 sk->sk_err = -error; 302 303out_unlock: 304 bh_unlock_sock(sk); 305 sctp_association_put(asoc); 306} 307 308static void sctp_generate_t1_cookie_event(struct timer_list *t) 309{ 310 struct sctp_association *asoc = 311 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]); 312 313 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); 314} 315 316static void sctp_generate_t1_init_event(struct timer_list *t) 317{ 318 struct sctp_association *asoc = 319 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]); 320 321 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); 322} 323 324static void sctp_generate_t2_shutdown_event(struct timer_list *t) 325{ 326 struct sctp_association *asoc = 327 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]); 328 329 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); 330} 331 332static void sctp_generate_t4_rto_event(struct timer_list *t) 333{ 334 struct sctp_association *asoc = 335 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]); 336 337 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); 338} 339 340static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t) 341{ 342 struct sctp_association *asoc = 343 from_timer(asoc, t, 344 timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]); 345 346 sctp_generate_timeout_event(asoc, 347 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); 348 349} /* sctp_generate_t5_shutdown_guard_event() */ 350 351static void sctp_generate_autoclose_event(struct timer_list *t) 352{ 353 struct sctp_association *asoc = 354 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]); 355 356 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); 357} 358 359/* Generate a heart beat event. If the sock is busy, reschedule. Make 360 * sure that the transport is still valid. 361 */ 362void sctp_generate_heartbeat_event(struct timer_list *t) 363{ 364 struct sctp_transport *transport = from_timer(transport, t, hb_timer); 365 struct sctp_association *asoc = transport->asoc; 366 struct sock *sk = asoc->base.sk; 367 struct net *net = sock_net(sk); 368 u32 elapsed, timeout; 369 int error = 0; 370 371 bh_lock_sock(sk); 372 if (sock_owned_by_user(sk)) { 373 pr_debug("%s: sock is busy\n", __func__); 374 375 /* Try again later. */ 376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) 377 sctp_transport_hold(transport); 378 goto out_unlock; 379 } 380 381 /* Check if we should still send the heartbeat or reschedule */ 382 elapsed = jiffies - transport->last_time_sent; 383 timeout = sctp_transport_timeout(transport); 384 if (elapsed < timeout) { 385 elapsed = timeout - elapsed; 386 if (!mod_timer(&transport->hb_timer, jiffies + elapsed)) 387 sctp_transport_hold(transport); 388 goto out_unlock; 389 } 390 391 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), 393 asoc->state, asoc->ep, asoc, 394 transport, GFP_ATOMIC); 395 396 if (error) 397 sk->sk_err = -error; 398 399out_unlock: 400 bh_unlock_sock(sk); 401 sctp_transport_put(transport); 402} 403 404/* Handle the timeout of the ICMP protocol unreachable timer. Trigger 405 * the correct state machine transition that will close the association. 406 */ 407void sctp_generate_proto_unreach_event(struct timer_list *t) 408{ 409 struct sctp_transport *transport = 410 from_timer(transport, t, proto_unreach_timer); 411 struct sctp_association *asoc = transport->asoc; 412 struct sock *sk = asoc->base.sk; 413 struct net *net = sock_net(sk); 414 415 bh_lock_sock(sk); 416 if (sock_owned_by_user(sk)) { 417 pr_debug("%s: sock is busy\n", __func__); 418 419 /* Try again later. */ 420 if (!mod_timer(&transport->proto_unreach_timer, 421 jiffies + (HZ/20))) 422 sctp_transport_hold(transport); 423 goto out_unlock; 424 } 425 426 /* Is this structure just waiting around for us to actually 427 * get destroyed? 428 */ 429 if (asoc->base.dead) 430 goto out_unlock; 431 432 sctp_do_sm(net, SCTP_EVENT_T_OTHER, 433 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), 434 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC); 435 436out_unlock: 437 bh_unlock_sock(sk); 438 sctp_transport_put(transport); 439} 440 441 /* Handle the timeout of the RE-CONFIG timer. */ 442void sctp_generate_reconf_event(struct timer_list *t) 443{ 444 struct sctp_transport *transport = 445 from_timer(transport, t, reconf_timer); 446 struct sctp_association *asoc = transport->asoc; 447 struct sock *sk = asoc->base.sk; 448 struct net *net = sock_net(sk); 449 int error = 0; 450 451 bh_lock_sock(sk); 452 if (sock_owned_by_user(sk)) { 453 pr_debug("%s: sock is busy\n", __func__); 454 455 /* Try again later. */ 456 if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20))) 457 sctp_transport_hold(transport); 458 goto out_unlock; 459 } 460 461 /* This happens when the response arrives after the timer is triggered. */ 462 if (!asoc->strreset_chunk) 463 goto out_unlock; 464 465 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT, 466 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF), 467 asoc->state, asoc->ep, asoc, 468 transport, GFP_ATOMIC); 469 470 if (error) 471 sk->sk_err = -error; 472 473out_unlock: 474 bh_unlock_sock(sk); 475 sctp_transport_put(transport); 476} 477 478/* Inject a SACK Timeout event into the state machine. */ 479static void sctp_generate_sack_event(struct timer_list *t) 480{ 481 struct sctp_association *asoc = 482 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]); 483 484 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); 485} 486 487sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { 488 [SCTP_EVENT_TIMEOUT_NONE] = NULL, 489 [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event, 490 [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event, 491 [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event, 492 [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL, 493 [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event, 494 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] = 495 sctp_generate_t5_shutdown_guard_event, 496 [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL, 497 [SCTP_EVENT_TIMEOUT_RECONF] = NULL, 498 [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event, 499 [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event, 500}; 501 502 503/* RFC 2960 8.2 Path Failure Detection 504 * 505 * When its peer endpoint is multi-homed, an endpoint should keep a 506 * error counter for each of the destination transport addresses of the 507 * peer endpoint. 508 * 509 * Each time the T3-rtx timer expires on any address, or when a 510 * HEARTBEAT sent to an idle address is not acknowledged within a RTO, 511 * the error counter of that destination address will be incremented. 512 * When the value in the error counter exceeds the protocol parameter 513 * 'Path.Max.Retrans' of that destination address, the endpoint should 514 * mark the destination transport address as inactive, and a 515 * notification SHOULD be sent to the upper layer. 516 * 517 */ 518static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands, 519 struct sctp_association *asoc, 520 struct sctp_transport *transport, 521 int is_hb) 522{ 523 /* The check for association's overall error counter exceeding the 524 * threshold is done in the state function. 525 */ 526 /* We are here due to a timer expiration. If the timer was 527 * not a HEARTBEAT, then normal error tracking is done. 528 * If the timer was a heartbeat, we only increment error counts 529 * when we already have an outstanding HEARTBEAT that has not 530 * been acknowledged. 531 * Additionally, some tranport states inhibit error increments. 532 */ 533 if (!is_hb) { 534 asoc->overall_error_count++; 535 if (transport->state != SCTP_INACTIVE) 536 transport->error_count++; 537 } else if (transport->hb_sent) { 538 if (transport->state != SCTP_UNCONFIRMED) 539 asoc->overall_error_count++; 540 if (transport->state != SCTP_INACTIVE) 541 transport->error_count++; 542 } 543 544 /* If the transport error count is greater than the pf_retrans 545 * threshold, and less than pathmaxrtx, and if the current state 546 * is SCTP_ACTIVE, then mark this transport as Partially Failed, 547 * see SCTP Quick Failover Draft, section 5.1 548 */ 549 if (asoc->base.net->sctp.pf_enable && 550 transport->state == SCTP_ACTIVE && 551 transport->error_count < transport->pathmaxrxt && 552 transport->error_count > transport->pf_retrans) { 553 554 sctp_assoc_control_transport(asoc, transport, 555 SCTP_TRANSPORT_PF, 556 0); 557 558 /* Update the hb timer to resend a heartbeat every rto */ 559 sctp_transport_reset_hb_timer(transport); 560 } 561 562 if (transport->state != SCTP_INACTIVE && 563 (transport->error_count > transport->pathmaxrxt)) { 564 pr_debug("%s: association:%p transport addr:%pISpc failed\n", 565 __func__, asoc, &transport->ipaddr.sa); 566 567 sctp_assoc_control_transport(asoc, transport, 568 SCTP_TRANSPORT_DOWN, 569 SCTP_FAILED_THRESHOLD); 570 } 571 572 if (transport->error_count > transport->ps_retrans && 573 asoc->peer.primary_path == transport && 574 asoc->peer.active_path != transport) 575 sctp_assoc_set_primary(asoc, asoc->peer.active_path); 576 577 /* E2) For the destination address for which the timer 578 * expires, set RTO <- RTO * 2 ("back off the timer"). The 579 * maximum value discussed in rule C7 above (RTO.max) may be 580 * used to provide an upper bound to this doubling operation. 581 * 582 * Special Case: the first HB doesn't trigger exponential backoff. 583 * The first unacknowledged HB triggers it. We do this with a flag 584 * that indicates that we have an outstanding HB. 585 */ 586 if (!is_hb || transport->hb_sent) { 587 transport->rto = min((transport->rto * 2), transport->asoc->rto_max); 588 sctp_max_rto(asoc, transport); 589 } 590} 591 592/* Worker routine to handle INIT command failure. */ 593static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands, 594 struct sctp_association *asoc, 595 unsigned int error) 596{ 597 struct sctp_ulpevent *event; 598 599 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC, 600 (__u16)error, 0, 0, NULL, 601 GFP_ATOMIC); 602 603 if (event) 604 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 605 SCTP_ULPEVENT(event)); 606 607 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 608 SCTP_STATE(SCTP_STATE_CLOSED)); 609 610 /* SEND_FAILED sent later when cleaning up the association. */ 611 asoc->outqueue.error = error; 612 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 613} 614 615/* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ 616static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands, 617 struct sctp_association *asoc, 618 enum sctp_event_type event_type, 619 union sctp_subtype subtype, 620 struct sctp_chunk *chunk, 621 unsigned int error) 622{ 623 struct sctp_ulpevent *event; 624 struct sctp_chunk *abort; 625 626 /* Cancel any partial delivery in progress. */ 627 asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC); 628 629 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT) 630 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, 631 (__u16)error, 0, 0, chunk, 632 GFP_ATOMIC); 633 else 634 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, 635 (__u16)error, 0, 0, NULL, 636 GFP_ATOMIC); 637 if (event) 638 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, 639 SCTP_ULPEVENT(event)); 640 641 if (asoc->overall_error_count >= asoc->max_retrans) { 642 abort = sctp_make_violation_max_retrans(asoc, chunk); 643 if (abort) 644 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 645 SCTP_CHUNK(abort)); 646 } 647 648 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, 649 SCTP_STATE(SCTP_STATE_CLOSED)); 650 651 /* SEND_FAILED sent later when cleaning up the association. */ 652 asoc->outqueue.error = error; 653 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); 654} 655 656/* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT 657 * inside the cookie. In reality, this is only used for INIT-ACK processing 658 * since all other cases use "temporary" associations and can do all 659 * their work in statefuns directly. 660 */ 661static int sctp_cmd_process_init(struct sctp_cmd_seq *commands, 662 struct sctp_association *asoc, 663 struct sctp_chunk *chunk, 664 struct sctp_init_chunk *peer_init, 665 gfp_t gfp) 666{ 667 int error; 668 669 /* We only process the init as a sideeffect in a single 670 * case. This is when we process the INIT-ACK. If we 671 * fail during INIT processing (due to malloc problems), 672 * just return the error and stop processing the stack. 673 */ 674 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp)) 675 error = -ENOMEM; 676 else 677 error = 0; 678 679 return error; 680} 681 682/* Helper function to break out starting up of heartbeat timers. */ 683static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds, 684 struct sctp_association *asoc) 685{ 686 struct sctp_transport *t; 687 688 /* Start a heartbeat timer for each transport on the association. 689 * hold a reference on the transport to make sure none of 690 * the needed data structures go away. 691 */ 692 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) 693 sctp_transport_reset_hb_timer(t); 694} 695 696static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds, 697 struct sctp_association *asoc) 698{ 699 struct sctp_transport *t; 700 701 /* Stop all heartbeat timers. */ 702 703 list_for_each_entry(t, &asoc->peer.transport_addr_list, 704 transports) { 705 if (del_timer(&t->hb_timer)) 706 sctp_transport_put(t); 707 } 708} 709 710/* Helper function to stop any pending T3-RTX timers */ 711static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds, 712 struct sctp_association *asoc) 713{ 714 struct sctp_transport *t; 715 716 list_for_each_entry(t, &asoc->peer.transport_addr_list, 717 transports) { 718 if (del_timer(&t->T3_rtx_timer)) 719 sctp_transport_put(t); 720 } 721} 722 723 724/* Helper function to handle the reception of an HEARTBEAT ACK. */ 725static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds, 726 struct sctp_association *asoc, 727 struct sctp_transport *t, 728 struct sctp_chunk *chunk) 729{ 730 struct sctp_sender_hb_info *hbinfo; 731 int was_unconfirmed = 0; 732 733 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the 734 * HEARTBEAT should clear the error counter of the destination 735 * transport address to which the HEARTBEAT was sent. 736 */ 737 t->error_count = 0; 738 739 /* 740 * Although RFC4960 specifies that the overall error count must 741 * be cleared when a HEARTBEAT ACK is received, we make an 742 * exception while in SHUTDOWN PENDING. If the peer keeps its 743 * window shut forever, we may never be able to transmit our 744 * outstanding data and rely on the retransmission limit be reached 745 * to shutdown the association. 746 */ 747 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) 748 t->asoc->overall_error_count = 0; 749 750 /* Clear the hb_sent flag to signal that we had a good 751 * acknowledgement. 752 */ 753 t->hb_sent = 0; 754 755 /* Mark the destination transport address as active if it is not so 756 * marked. 757 */ 758 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) { 759 was_unconfirmed = 1; 760 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 761 SCTP_HEARTBEAT_SUCCESS); 762 } 763 764 if (t->state == SCTP_PF) 765 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, 766 SCTP_HEARTBEAT_SUCCESS); 767 768 /* HB-ACK was received for a the proper HB. Consider this 769 * forward progress. 770 */ 771 if (t->dst) 772 sctp_transport_dst_confirm(t); 773 774 /* The receiver of the HEARTBEAT ACK should also perform an 775 * RTT measurement for that destination transport address 776 * using the time value carried in the HEARTBEAT ACK chunk. 777 * If the transport's rto_pending variable has been cleared, 778 * it was most likely due to a retransmit. However, we want 779 * to re-enable it to properly update the rto. 780 */ 781 if (t->rto_pending == 0) 782 t->rto_pending = 1; 783 784 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data; 785 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); 786 787 /* Update the heartbeat timer. */ 788 sctp_transport_reset_hb_timer(t); 789 790 if (was_unconfirmed && asoc->peer.transport_count == 1) 791 sctp_transport_immediate_rtx(t); 792} 793 794 795/* Helper function to process the process SACK command. */ 796static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds, 797 struct sctp_association *asoc, 798 struct sctp_chunk *chunk) 799{ 800 int err = 0; 801 802 if (sctp_outq_sack(&asoc->outqueue, chunk)) { 803 /* There are no more TSNs awaiting SACK. */ 804 err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER, 805 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 806 asoc->state, asoc->ep, asoc, NULL, 807 GFP_ATOMIC); 808 } 809 810 return err; 811} 812 813/* Helper function to set the timeout value for T2-SHUTDOWN timer and to set 814 * the transport for a shutdown chunk. 815 */ 816static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds, 817 struct sctp_association *asoc, 818 struct sctp_chunk *chunk) 819{ 820 struct sctp_transport *t; 821 822 if (chunk->transport) 823 t = chunk->transport; 824 else { 825 t = sctp_assoc_choose_alter_transport(asoc, 826 asoc->shutdown_last_sent_to); 827 chunk->transport = t; 828 } 829 asoc->shutdown_last_sent_to = t; 830 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; 831} 832 833static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds, 834 struct sctp_association *asoc, 835 struct sctp_association *new) 836{ 837 struct net *net = asoc->base.net; 838 struct sctp_chunk *abort; 839 840 if (!sctp_assoc_update(asoc, new)) 841 return; 842 843 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr)); 844 if (abort) { 845 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0); 846 sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); 847 } 848 sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED)); 849 sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED, 850 SCTP_PERR(SCTP_ERROR_RSRC_LOW)); 851 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS); 852 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB); 853} 854 855/* Helper function to change the state of an association. */ 856static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds, 857 struct sctp_association *asoc, 858 enum sctp_state state) 859{ 860 struct sock *sk = asoc->base.sk; 861 862 asoc->state = state; 863 864 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]); 865 866 if (sctp_style(sk, TCP)) { 867 /* Change the sk->sk_state of a TCP-style socket that has 868 * successfully completed a connect() call. 869 */ 870 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) 871 inet_sk_set_state(sk, SCTP_SS_ESTABLISHED); 872 873 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ 874 if (sctp_state(asoc, SHUTDOWN_RECEIVED) && 875 sctp_sstate(sk, ESTABLISHED)) { 876 inet_sk_set_state(sk, SCTP_SS_CLOSING); 877 sk->sk_shutdown |= RCV_SHUTDOWN; 878 } 879 } 880 881 if (sctp_state(asoc, COOKIE_WAIT)) { 882 /* Reset init timeouts since they may have been 883 * increased due to timer expirations. 884 */ 885 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 886 asoc->rto_initial; 887 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 888 asoc->rto_initial; 889 } 890 891 if (sctp_state(asoc, ESTABLISHED)) { 892 kfree(asoc->peer.cookie); 893 asoc->peer.cookie = NULL; 894 } 895 896 if (sctp_state(asoc, ESTABLISHED) || 897 sctp_state(asoc, CLOSED) || 898 sctp_state(asoc, SHUTDOWN_RECEIVED)) { 899 /* Wake up any processes waiting in the asoc's wait queue in 900 * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). 901 */ 902 if (waitqueue_active(&asoc->wait)) 903 wake_up_interruptible(&asoc->wait); 904 905 /* Wake up any processes waiting in the sk's sleep queue of 906 * a TCP-style or UDP-style peeled-off socket in 907 * sctp_wait_for_accept() or sctp_wait_for_packet(). 908 * For a UDP-style socket, the waiters are woken up by the 909 * notifications. 910 */ 911 if (!sctp_style(sk, UDP)) 912 sk->sk_state_change(sk); 913 } 914 915 if (sctp_state(asoc, SHUTDOWN_PENDING) && 916 !sctp_outq_is_empty(&asoc->outqueue)) 917 sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC); 918} 919 920/* Helper function to delete an association. */ 921static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds, 922 struct sctp_association *asoc) 923{ 924 struct sock *sk = asoc->base.sk; 925 926 /* If it is a non-temporary association belonging to a TCP-style 927 * listening socket that is not closed, do not free it so that accept() 928 * can pick it up later. 929 */ 930 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && 931 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) 932 return; 933 934 sctp_association_free(asoc); 935} 936 937/* 938 * ADDIP Section 4.1 ASCONF Chunk Procedures 939 * A4) Start a T-4 RTO timer, using the RTO value of the selected 940 * destination address (we use active path instead of primary path just 941 * because primary path may be inactive. 942 */ 943static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds, 944 struct sctp_association *asoc, 945 struct sctp_chunk *chunk) 946{ 947 struct sctp_transport *t; 948 949 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport); 950 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; 951 chunk->transport = t; 952} 953 954/* Process an incoming Operation Error Chunk. */ 955static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds, 956 struct sctp_association *asoc, 957 struct sctp_chunk *chunk) 958{ 959 struct sctp_errhdr *err_hdr; 960 struct sctp_ulpevent *ev; 961 962 while (chunk->chunk_end > chunk->skb->data) { 963 err_hdr = (struct sctp_errhdr *)(chunk->skb->data); 964 965 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, 966 GFP_ATOMIC); 967 if (!ev) 968 return; 969 970 asoc->stream.si->enqueue_event(&asoc->ulpq, ev); 971 972 switch (err_hdr->cause) { 973 case SCTP_ERROR_UNKNOWN_CHUNK: 974 { 975 struct sctp_chunkhdr *unk_chunk_hdr; 976 977 unk_chunk_hdr = (struct sctp_chunkhdr *) 978 err_hdr->variable; 979 switch (unk_chunk_hdr->type) { 980 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with 981 * an ERROR chunk reporting that it did not recognized 982 * the ASCONF chunk type, the sender of the ASCONF MUST 983 * NOT send any further ASCONF chunks and MUST stop its 984 * T-4 timer. 985 */ 986 case SCTP_CID_ASCONF: 987 if (asoc->peer.asconf_capable == 0) 988 break; 989 990 asoc->peer.asconf_capable = 0; 991 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, 992 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); 993 break; 994 default: 995 break; 996 } 997 break; 998 } 999 default: 1000 break; 1001 } 1002 } 1003} 1004 1005/* Helper function to remove the association non-primary peer 1006 * transports. 1007 */ 1008static void sctp_cmd_del_non_primary(struct sctp_association *asoc) 1009{ 1010 struct sctp_transport *t; 1011 struct list_head *temp; 1012 struct list_head *pos; 1013 1014 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { 1015 t = list_entry(pos, struct sctp_transport, transports); 1016 if (!sctp_cmp_addr_exact(&t->ipaddr, 1017 &asoc->peer.primary_addr)) { 1018 sctp_assoc_rm_peer(asoc, t); 1019 } 1020 } 1021} 1022 1023/* Helper function to set sk_err on a 1-1 style socket. */ 1024static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) 1025{ 1026 struct sock *sk = asoc->base.sk; 1027 1028 if (!sctp_style(sk, UDP)) 1029 sk->sk_err = error; 1030} 1031 1032/* Helper function to generate an association change event */ 1033static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands, 1034 struct sctp_association *asoc, 1035 u8 state) 1036{ 1037 struct sctp_ulpevent *ev; 1038 1039 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0, 1040 asoc->c.sinit_num_ostreams, 1041 asoc->c.sinit_max_instreams, 1042 NULL, GFP_ATOMIC); 1043 if (ev) 1044 asoc->stream.si->enqueue_event(&asoc->ulpq, ev); 1045} 1046 1047static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands, 1048 struct sctp_association *asoc) 1049{ 1050 struct sctp_ulpevent *ev; 1051 1052 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC); 1053 if (ev) 1054 asoc->stream.si->enqueue_event(&asoc->ulpq, ev); 1055} 1056 1057/* Helper function to generate an adaptation indication event */ 1058static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands, 1059 struct sctp_association *asoc) 1060{ 1061 struct sctp_ulpevent *ev; 1062 1063 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC); 1064 1065 if (ev) 1066 asoc->stream.si->enqueue_event(&asoc->ulpq, ev); 1067} 1068 1069 1070static void sctp_cmd_t1_timer_update(struct sctp_association *asoc, 1071 enum sctp_event_timeout timer, 1072 char *name) 1073{ 1074 struct sctp_transport *t; 1075 1076 t = asoc->init_last_sent_to; 1077 asoc->init_err_counter++; 1078 1079 if (t->init_sent_count > (asoc->init_cycle + 1)) { 1080 asoc->timeouts[timer] *= 2; 1081 if (asoc->timeouts[timer] > asoc->max_init_timeo) { 1082 asoc->timeouts[timer] = asoc->max_init_timeo; 1083 } 1084 asoc->init_cycle++; 1085 1086 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d" 1087 " cycle:%d timeout:%ld\n", __func__, name, 1088 asoc->init_err_counter, asoc->init_cycle, 1089 asoc->timeouts[timer]); 1090 } 1091 1092} 1093 1094/* Send the whole message, chunk by chunk, to the outqueue. 1095 * This way the whole message is queued up and bundling if 1096 * encouraged for small fragments. 1097 */ 1098static void sctp_cmd_send_msg(struct sctp_association *asoc, 1099 struct sctp_datamsg *msg, gfp_t gfp) 1100{ 1101 struct sctp_chunk *chunk; 1102 1103 list_for_each_entry(chunk, &msg->chunks, frag_list) 1104 sctp_outq_tail(&asoc->outqueue, chunk, gfp); 1105 1106 asoc->outqueue.sched->enqueue(&asoc->outqueue, msg); 1107} 1108 1109 1110/* These three macros allow us to pull the debugging code out of the 1111 * main flow of sctp_do_sm() to keep attention focused on the real 1112 * functionality there. 1113 */ 1114#define debug_pre_sfn() \ 1115 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \ 1116 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \ 1117 asoc, sctp_state_tbl[state], state_fn->name) 1118 1119#define debug_post_sfn() \ 1120 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \ 1121 sctp_status_tbl[status]) 1122 1123#define debug_post_sfx() \ 1124 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \ 1125 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ 1126 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED]) 1127 1128/* 1129 * This is the master state machine processing function. 1130 * 1131 * If you want to understand all of lksctp, this is a 1132 * good place to start. 1133 */ 1134int sctp_do_sm(struct net *net, enum sctp_event_type event_type, 1135 union sctp_subtype subtype, enum sctp_state state, 1136 struct sctp_endpoint *ep, struct sctp_association *asoc, 1137 void *event_arg, gfp_t gfp) 1138{ 1139 typedef const char *(printfn_t)(union sctp_subtype); 1140 static printfn_t *table[] = { 1141 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, 1142 }; 1143 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; 1144 const struct sctp_sm_table_entry *state_fn; 1145 struct sctp_cmd_seq commands; 1146 enum sctp_disposition status; 1147 int error = 0; 1148 1149 /* Look up the state function, run it, and then process the 1150 * side effects. These three steps are the heart of lksctp. 1151 */ 1152 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype); 1153 1154 sctp_init_cmd_seq(&commands); 1155 1156 debug_pre_sfn(); 1157 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands); 1158 debug_post_sfn(); 1159 1160 error = sctp_side_effects(event_type, subtype, state, 1161 ep, &asoc, event_arg, status, 1162 &commands, gfp); 1163 debug_post_sfx(); 1164 1165 return error; 1166} 1167 1168/***************************************************************** 1169 * This the master state function side effect processing function. 1170 *****************************************************************/ 1171static int sctp_side_effects(enum sctp_event_type event_type, 1172 union sctp_subtype subtype, 1173 enum sctp_state state, 1174 struct sctp_endpoint *ep, 1175 struct sctp_association **asoc, 1176 void *event_arg, 1177 enum sctp_disposition status, 1178 struct sctp_cmd_seq *commands, 1179 gfp_t gfp) 1180{ 1181 int error; 1182 1183 /* FIXME - Most of the dispositions left today would be categorized 1184 * as "exceptional" dispositions. For those dispositions, it 1185 * may not be proper to run through any of the commands at all. 1186 * For example, the command interpreter might be run only with 1187 * disposition SCTP_DISPOSITION_CONSUME. 1188 */ 1189 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, 1190 ep, *asoc, 1191 event_arg, status, 1192 commands, gfp))) 1193 goto bail; 1194 1195 switch (status) { 1196 case SCTP_DISPOSITION_DISCARD: 1197 pr_debug("%s: ignored sctp protocol event - state:%d, " 1198 "event_type:%d, event_id:%d\n", __func__, state, 1199 event_type, subtype.chunk); 1200 break; 1201 1202 case SCTP_DISPOSITION_NOMEM: 1203 /* We ran out of memory, so we need to discard this 1204 * packet. 1205 */ 1206 /* BUG--we should now recover some memory, probably by 1207 * reneging... 1208 */ 1209 error = -ENOMEM; 1210 break; 1211 1212 case SCTP_DISPOSITION_DELETE_TCB: 1213 case SCTP_DISPOSITION_ABORT: 1214 /* This should now be a command. */ 1215 *asoc = NULL; 1216 break; 1217 1218 case SCTP_DISPOSITION_CONSUME: 1219 /* 1220 * We should no longer have much work to do here as the 1221 * real work has been done as explicit commands above. 1222 */ 1223 break; 1224 1225 case SCTP_DISPOSITION_VIOLATION: 1226 net_err_ratelimited("protocol violation state %d chunkid %d\n", 1227 state, subtype.chunk); 1228 break; 1229 1230 case SCTP_DISPOSITION_NOT_IMPL: 1231 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n", 1232 state, event_type, subtype.chunk); 1233 break; 1234 1235 case SCTP_DISPOSITION_BUG: 1236 pr_err("bug in state %d, event_type %d, event_id %d\n", 1237 state, event_type, subtype.chunk); 1238 BUG(); 1239 break; 1240 1241 default: 1242 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n", 1243 status, state, event_type, subtype.chunk); 1244 error = status; 1245 if (error >= 0) 1246 error = -EINVAL; 1247 WARN_ON_ONCE(1); 1248 break; 1249 } 1250 1251bail: 1252 return error; 1253} 1254 1255/******************************************************************** 1256 * 2nd Level Abstractions 1257 ********************************************************************/ 1258 1259/* This is the side-effect interpreter. */ 1260static int sctp_cmd_interpreter(enum sctp_event_type event_type, 1261 union sctp_subtype subtype, 1262 enum sctp_state state, 1263 struct sctp_endpoint *ep, 1264 struct sctp_association *asoc, 1265 void *event_arg, 1266 enum sctp_disposition status, 1267 struct sctp_cmd_seq *commands, 1268 gfp_t gfp) 1269{ 1270 struct sctp_sock *sp = sctp_sk(ep->base.sk); 1271 struct sctp_chunk *chunk = NULL, *new_obj; 1272 struct sctp_packet *packet; 1273 struct sctp_sackhdr sackh; 1274 struct timer_list *timer; 1275 struct sctp_transport *t; 1276 unsigned long timeout; 1277 struct sctp_cmd *cmd; 1278 int local_cork = 0; 1279 int error = 0; 1280 int force; 1281 1282 if (SCTP_EVENT_T_TIMEOUT != event_type) 1283 chunk = event_arg; 1284 1285 /* Note: This whole file is a huge candidate for rework. 1286 * For example, each command could either have its own handler, so 1287 * the loop would look like: 1288 * while (cmds) 1289 * cmd->handle(x, y, z) 1290 * --jgrimm 1291 */ 1292 while (NULL != (cmd = sctp_next_cmd(commands))) { 1293 switch (cmd->verb) { 1294 case SCTP_CMD_NOP: 1295 /* Do nothing. */ 1296 break; 1297 1298 case SCTP_CMD_NEW_ASOC: 1299 /* Register a new association. */ 1300 if (local_cork) { 1301 sctp_outq_uncork(&asoc->outqueue, gfp); 1302 local_cork = 0; 1303 } 1304 1305 /* Register with the endpoint. */ 1306 asoc = cmd->obj.asoc; 1307 BUG_ON(asoc->peer.primary_path == NULL); 1308 sctp_endpoint_add_asoc(ep, asoc); 1309 break; 1310 1311 case SCTP_CMD_UPDATE_ASSOC: 1312 sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc); 1313 break; 1314 1315 case SCTP_CMD_PURGE_OUTQUEUE: 1316 sctp_outq_teardown(&asoc->outqueue); 1317 break; 1318 1319 case SCTP_CMD_DELETE_TCB: 1320 if (local_cork) { 1321 sctp_outq_uncork(&asoc->outqueue, gfp); 1322 local_cork = 0; 1323 } 1324 /* Delete the current association. */ 1325 sctp_cmd_delete_tcb(commands, asoc); 1326 asoc = NULL; 1327 break; 1328 1329 case SCTP_CMD_NEW_STATE: 1330 /* Enter a new state. */ 1331 sctp_cmd_new_state(commands, asoc, cmd->obj.state); 1332 break; 1333 1334 case SCTP_CMD_REPORT_TSN: 1335 /* Record the arrival of a TSN. */ 1336 error = sctp_tsnmap_mark(&asoc->peer.tsn_map, 1337 cmd->obj.u32, NULL); 1338 break; 1339 1340 case SCTP_CMD_REPORT_FWDTSN: 1341 asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32); 1342 break; 1343 1344 case SCTP_CMD_PROCESS_FWDTSN: 1345 asoc->stream.si->handle_ftsn(&asoc->ulpq, 1346 cmd->obj.chunk); 1347 break; 1348 1349 case SCTP_CMD_GEN_SACK: 1350 /* Generate a Selective ACK. 1351 * The argument tells us whether to just count 1352 * the packet and MAYBE generate a SACK, or 1353 * force a SACK out. 1354 */ 1355 force = cmd->obj.i32; 1356 error = sctp_gen_sack(asoc, force, commands); 1357 break; 1358 1359 case SCTP_CMD_PROCESS_SACK: 1360 /* Process an inbound SACK. */ 1361 error = sctp_cmd_process_sack(commands, asoc, 1362 cmd->obj.chunk); 1363 break; 1364 1365 case SCTP_CMD_GEN_INIT_ACK: 1366 /* Generate an INIT ACK chunk. */ 1367 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, 1368 0); 1369 if (!new_obj) { 1370 error = -ENOMEM; 1371 break; 1372 } 1373 1374 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1375 SCTP_CHUNK(new_obj)); 1376 break; 1377 1378 case SCTP_CMD_PEER_INIT: 1379 /* Process a unified INIT from the peer. 1380 * Note: Only used during INIT-ACK processing. If 1381 * there is an error just return to the outter 1382 * layer which will bail. 1383 */ 1384 error = sctp_cmd_process_init(commands, asoc, chunk, 1385 cmd->obj.init, gfp); 1386 break; 1387 1388 case SCTP_CMD_GEN_COOKIE_ECHO: 1389 /* Generate a COOKIE ECHO chunk. */ 1390 new_obj = sctp_make_cookie_echo(asoc, chunk); 1391 if (!new_obj) { 1392 if (cmd->obj.chunk) 1393 sctp_chunk_free(cmd->obj.chunk); 1394 error = -ENOMEM; 1395 break; 1396 } 1397 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1398 SCTP_CHUNK(new_obj)); 1399 1400 /* If there is an ERROR chunk to be sent along with 1401 * the COOKIE_ECHO, send it, too. 1402 */ 1403 if (cmd->obj.chunk) 1404 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1405 SCTP_CHUNK(cmd->obj.chunk)); 1406 1407 if (new_obj->transport) { 1408 new_obj->transport->init_sent_count++; 1409 asoc->init_last_sent_to = new_obj->transport; 1410 } 1411 1412 /* FIXME - Eventually come up with a cleaner way to 1413 * enabling COOKIE-ECHO + DATA bundling during 1414 * multihoming stale cookie scenarios, the following 1415 * command plays with asoc->peer.retran_path to 1416 * avoid the problem of sending the COOKIE-ECHO and 1417 * DATA in different paths, which could result 1418 * in the association being ABORTed if the DATA chunk 1419 * is processed first by the server. Checking the 1420 * init error counter simply causes this command 1421 * to be executed only during failed attempts of 1422 * association establishment. 1423 */ 1424 if ((asoc->peer.retran_path != 1425 asoc->peer.primary_path) && 1426 (asoc->init_err_counter > 0)) { 1427 sctp_add_cmd_sf(commands, 1428 SCTP_CMD_FORCE_PRIM_RETRAN, 1429 SCTP_NULL()); 1430 } 1431 1432 break; 1433 1434 case SCTP_CMD_GEN_SHUTDOWN: 1435 /* Generate SHUTDOWN when in SHUTDOWN_SENT state. 1436 * Reset error counts. 1437 */ 1438 asoc->overall_error_count = 0; 1439 1440 /* Generate a SHUTDOWN chunk. */ 1441 new_obj = sctp_make_shutdown(asoc, chunk); 1442 if (!new_obj) { 1443 error = -ENOMEM; 1444 break; 1445 } 1446 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1447 SCTP_CHUNK(new_obj)); 1448 break; 1449 1450 case SCTP_CMD_CHUNK_ULP: 1451 /* Send a chunk to the sockets layer. */ 1452 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n", 1453 __func__, cmd->obj.chunk, &asoc->ulpq); 1454 1455 asoc->stream.si->ulpevent_data(&asoc->ulpq, 1456 cmd->obj.chunk, 1457 GFP_ATOMIC); 1458 break; 1459 1460 case SCTP_CMD_EVENT_ULP: 1461 /* Send a notification to the sockets layer. */ 1462 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n", 1463 __func__, cmd->obj.ulpevent, &asoc->ulpq); 1464 1465 asoc->stream.si->enqueue_event(&asoc->ulpq, 1466 cmd->obj.ulpevent); 1467 break; 1468 1469 case SCTP_CMD_REPLY: 1470 /* If an caller has not already corked, do cork. */ 1471 if (!asoc->outqueue.cork) { 1472 sctp_outq_cork(&asoc->outqueue); 1473 local_cork = 1; 1474 } 1475 /* Send a chunk to our peer. */ 1476 sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp); 1477 break; 1478 1479 case SCTP_CMD_SEND_PKT: 1480 /* Send a full packet to our peer. */ 1481 packet = cmd->obj.packet; 1482 sctp_packet_transmit(packet, gfp); 1483 sctp_ootb_pkt_free(packet); 1484 break; 1485 1486 case SCTP_CMD_T1_RETRAN: 1487 /* Mark a transport for retransmission. */ 1488 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1489 SCTP_RTXR_T1_RTX); 1490 break; 1491 1492 case SCTP_CMD_RETRAN: 1493 /* Mark a transport for retransmission. */ 1494 sctp_retransmit(&asoc->outqueue, cmd->obj.transport, 1495 SCTP_RTXR_T3_RTX); 1496 break; 1497 1498 case SCTP_CMD_ECN_CE: 1499 /* Do delayed CE processing. */ 1500 sctp_do_ecn_ce_work(asoc, cmd->obj.u32); 1501 break; 1502 1503 case SCTP_CMD_ECN_ECNE: 1504 /* Do delayed ECNE processing. */ 1505 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, 1506 chunk); 1507 if (new_obj) 1508 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, 1509 SCTP_CHUNK(new_obj)); 1510 break; 1511 1512 case SCTP_CMD_ECN_CWR: 1513 /* Do delayed CWR processing. */ 1514 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); 1515 break; 1516 1517 case SCTP_CMD_SETUP_T2: 1518 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk); 1519 break; 1520 1521 case SCTP_CMD_TIMER_START_ONCE: 1522 timer = &asoc->timers[cmd->obj.to]; 1523 1524 if (timer_pending(timer)) 1525 break; 1526 fallthrough; 1527 1528 case SCTP_CMD_TIMER_START: 1529 timer = &asoc->timers[cmd->obj.to]; 1530 timeout = asoc->timeouts[cmd->obj.to]; 1531 BUG_ON(!timeout); 1532 1533 /* 1534 * SCTP has a hard time with timer starts. Because we process 1535 * timer starts as side effects, it can be hard to tell if we 1536 * have already started a timer or not, which leads to BUG 1537 * halts when we call add_timer. So here, instead of just starting 1538 * a timer, if the timer is already started, and just mod 1539 * the timer with the shorter of the two expiration times 1540 */ 1541 if (!timer_pending(timer)) 1542 sctp_association_hold(asoc); 1543 timer_reduce(timer, jiffies + timeout); 1544 break; 1545 1546 case SCTP_CMD_TIMER_RESTART: 1547 timer = &asoc->timers[cmd->obj.to]; 1548 timeout = asoc->timeouts[cmd->obj.to]; 1549 if (!mod_timer(timer, jiffies + timeout)) 1550 sctp_association_hold(asoc); 1551 break; 1552 1553 case SCTP_CMD_TIMER_STOP: 1554 timer = &asoc->timers[cmd->obj.to]; 1555 if (del_timer(timer)) 1556 sctp_association_put(asoc); 1557 break; 1558 1559 case SCTP_CMD_INIT_CHOOSE_TRANSPORT: 1560 chunk = cmd->obj.chunk; 1561 t = sctp_assoc_choose_alter_transport(asoc, 1562 asoc->init_last_sent_to); 1563 asoc->init_last_sent_to = t; 1564 chunk->transport = t; 1565 t->init_sent_count++; 1566 /* Set the new transport as primary */ 1567 sctp_assoc_set_primary(asoc, t); 1568 break; 1569 1570 case SCTP_CMD_INIT_RESTART: 1571 /* Do the needed accounting and updates 1572 * associated with restarting an initialization 1573 * timer. Only multiply the timeout by two if 1574 * all transports have been tried at the current 1575 * timeout. 1576 */ 1577 sctp_cmd_t1_timer_update(asoc, 1578 SCTP_EVENT_TIMEOUT_T1_INIT, 1579 "INIT"); 1580 1581 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, 1582 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); 1583 break; 1584 1585 case SCTP_CMD_COOKIEECHO_RESTART: 1586 /* Do the needed accounting and updates 1587 * associated with restarting an initialization 1588 * timer. Only multiply the timeout by two if 1589 * all transports have been tried at the current 1590 * timeout. 1591 */ 1592 sctp_cmd_t1_timer_update(asoc, 1593 SCTP_EVENT_TIMEOUT_T1_COOKIE, 1594 "COOKIE"); 1595 1596 /* If we've sent any data bundled with 1597 * COOKIE-ECHO we need to resend. 1598 */ 1599 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1600 transports) { 1601 sctp_retransmit_mark(&asoc->outqueue, t, 1602 SCTP_RTXR_T1_RTX); 1603 } 1604 1605 sctp_add_cmd_sf(commands, 1606 SCTP_CMD_TIMER_RESTART, 1607 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); 1608 break; 1609 1610 case SCTP_CMD_INIT_FAILED: 1611 sctp_cmd_init_failed(commands, asoc, cmd->obj.u16); 1612 break; 1613 1614 case SCTP_CMD_ASSOC_FAILED: 1615 sctp_cmd_assoc_failed(commands, asoc, event_type, 1616 subtype, chunk, cmd->obj.u16); 1617 break; 1618 1619 case SCTP_CMD_INIT_COUNTER_INC: 1620 asoc->init_err_counter++; 1621 break; 1622 1623 case SCTP_CMD_INIT_COUNTER_RESET: 1624 asoc->init_err_counter = 0; 1625 asoc->init_cycle = 0; 1626 list_for_each_entry(t, &asoc->peer.transport_addr_list, 1627 transports) { 1628 t->init_sent_count = 0; 1629 } 1630 break; 1631 1632 case SCTP_CMD_REPORT_DUP: 1633 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, 1634 cmd->obj.u32); 1635 break; 1636 1637 case SCTP_CMD_REPORT_BAD_TAG: 1638 pr_debug("%s: vtag mismatch!\n", __func__); 1639 break; 1640 1641 case SCTP_CMD_STRIKE: 1642 /* Mark one strike against a transport. */ 1643 sctp_do_8_2_transport_strike(commands, asoc, 1644 cmd->obj.transport, 0); 1645 break; 1646 1647 case SCTP_CMD_TRANSPORT_IDLE: 1648 t = cmd->obj.transport; 1649 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); 1650 break; 1651 1652 case SCTP_CMD_TRANSPORT_HB_SENT: 1653 t = cmd->obj.transport; 1654 sctp_do_8_2_transport_strike(commands, asoc, 1655 t, 1); 1656 t->hb_sent = 1; 1657 break; 1658 1659 case SCTP_CMD_TRANSPORT_ON: 1660 t = cmd->obj.transport; 1661 sctp_cmd_transport_on(commands, asoc, t, chunk); 1662 break; 1663 1664 case SCTP_CMD_HB_TIMERS_START: 1665 sctp_cmd_hb_timers_start(commands, asoc); 1666 break; 1667 1668 case SCTP_CMD_HB_TIMER_UPDATE: 1669 t = cmd->obj.transport; 1670 sctp_transport_reset_hb_timer(t); 1671 break; 1672 1673 case SCTP_CMD_HB_TIMERS_STOP: 1674 sctp_cmd_hb_timers_stop(commands, asoc); 1675 break; 1676 1677 case SCTP_CMD_REPORT_ERROR: 1678 error = cmd->obj.error; 1679 break; 1680 1681 case SCTP_CMD_PROCESS_CTSN: 1682 /* Dummy up a SACK for processing. */ 1683 sackh.cum_tsn_ack = cmd->obj.be32; 1684 sackh.a_rwnd = htonl(asoc->peer.rwnd + 1685 asoc->outqueue.outstanding_bytes); 1686 sackh.num_gap_ack_blocks = 0; 1687 sackh.num_dup_tsns = 0; 1688 chunk->subh.sack_hdr = &sackh; 1689 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, 1690 SCTP_CHUNK(chunk)); 1691 break; 1692 1693 case SCTP_CMD_DISCARD_PACKET: 1694 /* We need to discard the whole packet. 1695 * Uncork the queue since there might be 1696 * responses pending 1697 */ 1698 chunk->pdiscard = 1; 1699 if (asoc) { 1700 sctp_outq_uncork(&asoc->outqueue, gfp); 1701 local_cork = 0; 1702 } 1703 break; 1704 1705 case SCTP_CMD_RTO_PENDING: 1706 t = cmd->obj.transport; 1707 t->rto_pending = 1; 1708 break; 1709 1710 case SCTP_CMD_PART_DELIVER: 1711 asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC); 1712 break; 1713 1714 case SCTP_CMD_RENEGE: 1715 asoc->stream.si->renege_events(&asoc->ulpq, 1716 cmd->obj.chunk, 1717 GFP_ATOMIC); 1718 break; 1719 1720 case SCTP_CMD_SETUP_T4: 1721 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk); 1722 break; 1723 1724 case SCTP_CMD_PROCESS_OPERR: 1725 sctp_cmd_process_operr(commands, asoc, chunk); 1726 break; 1727 case SCTP_CMD_CLEAR_INIT_TAG: 1728 asoc->peer.i.init_tag = 0; 1729 break; 1730 case SCTP_CMD_DEL_NON_PRIMARY: 1731 sctp_cmd_del_non_primary(asoc); 1732 break; 1733 case SCTP_CMD_T3_RTX_TIMERS_STOP: 1734 sctp_cmd_t3_rtx_timers_stop(commands, asoc); 1735 break; 1736 case SCTP_CMD_FORCE_PRIM_RETRAN: 1737 t = asoc->peer.retran_path; 1738 asoc->peer.retran_path = asoc->peer.primary_path; 1739 sctp_outq_uncork(&asoc->outqueue, gfp); 1740 local_cork = 0; 1741 asoc->peer.retran_path = t; 1742 break; 1743 case SCTP_CMD_SET_SK_ERR: 1744 sctp_cmd_set_sk_err(asoc, cmd->obj.error); 1745 break; 1746 case SCTP_CMD_ASSOC_CHANGE: 1747 sctp_cmd_assoc_change(commands, asoc, 1748 cmd->obj.u8); 1749 break; 1750 case SCTP_CMD_ADAPTATION_IND: 1751 sctp_cmd_adaptation_ind(commands, asoc); 1752 break; 1753 case SCTP_CMD_PEER_NO_AUTH: 1754 sctp_cmd_peer_no_auth(commands, asoc); 1755 break; 1756 1757 case SCTP_CMD_ASSOC_SHKEY: 1758 error = sctp_auth_asoc_init_active_key(asoc, 1759 GFP_ATOMIC); 1760 break; 1761 case SCTP_CMD_UPDATE_INITTAG: 1762 asoc->peer.i.init_tag = cmd->obj.u32; 1763 break; 1764 case SCTP_CMD_SEND_MSG: 1765 if (!asoc->outqueue.cork) { 1766 sctp_outq_cork(&asoc->outqueue); 1767 local_cork = 1; 1768 } 1769 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp); 1770 break; 1771 case SCTP_CMD_PURGE_ASCONF_QUEUE: 1772 sctp_asconf_queue_teardown(asoc); 1773 break; 1774 1775 case SCTP_CMD_SET_ASOC: 1776 if (asoc && local_cork) { 1777 sctp_outq_uncork(&asoc->outqueue, gfp); 1778 local_cork = 0; 1779 } 1780 asoc = cmd->obj.asoc; 1781 break; 1782 1783 default: 1784 pr_warn("Impossible command: %u\n", 1785 cmd->verb); 1786 break; 1787 } 1788 1789 if (error) { 1790 cmd = sctp_next_cmd(commands); 1791 while (cmd) { 1792 if (cmd->verb == SCTP_CMD_REPLY) 1793 sctp_chunk_free(cmd->obj.chunk); 1794 cmd = sctp_next_cmd(commands); 1795 } 1796 break; 1797 } 1798 } 1799 1800 /* If this is in response to a received chunk, wait until 1801 * we are done with the packet to open the queue so that we don't 1802 * send multiple packets in response to a single request. 1803 */ 1804 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) { 1805 if (chunk->end_of_packet || chunk->singleton) 1806 sctp_outq_uncork(&asoc->outqueue, gfp); 1807 } else if (local_cork) 1808 sctp_outq_uncork(&asoc->outqueue, gfp); 1809 1810 if (sp->data_ready_signalled) 1811 sp->data_ready_signalled = 0; 1812 1813 return error; 1814} 1815