1// SPDX-License-Identifier: GPL-2.0-or-later 2/* connection-level event handling 3 * 4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10#include <linux/module.h> 11#include <linux/net.h> 12#include <linux/skbuff.h> 13#include <linux/errqueue.h> 14#include <net/sock.h> 15#include <net/af_rxrpc.h> 16#include <net/ip.h> 17#include "ar-internal.h" 18 19/* 20 * Retransmit terminal ACK or ABORT of the previous call. 21 */ 22static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 23 struct sk_buff *skb, 24 unsigned int channel) 25{ 26 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; 27 struct rxrpc_channel *chan; 28 struct msghdr msg; 29 struct kvec iov[3]; 30 struct { 31 struct rxrpc_wire_header whdr; 32 union { 33 __be32 abort_code; 34 struct rxrpc_ackpacket ack; 35 }; 36 } __attribute__((packed)) pkt; 37 struct rxrpc_ackinfo ack_info; 38 size_t len; 39 int ret, ioc; 40 u32 serial, mtu, call_id, padding; 41 42 _enter("%d", conn->debug_id); 43 44 if (sp && sp->hdr.type == RXRPC_PACKET_TYPE_ACK) { 45 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 46 &pkt.ack, sizeof(pkt.ack)) < 0) 47 return; 48 if (pkt.ack.reason == RXRPC_ACK_PING_RESPONSE) 49 return; 50 } 51 52 chan = &conn->channels[channel]; 53 54 /* If the last call got moved on whilst we were waiting to run, just 55 * ignore this packet. 56 */ 57 call_id = READ_ONCE(chan->last_call); 58 /* Sync with __rxrpc_disconnect_call() */ 59 smp_rmb(); 60 if (skb && call_id != sp->hdr.callNumber) 61 return; 62 63 msg.msg_name = &conn->params.peer->srx.transport; 64 msg.msg_namelen = conn->params.peer->srx.transport_len; 65 msg.msg_control = NULL; 66 msg.msg_controllen = 0; 67 msg.msg_flags = 0; 68 69 iov[0].iov_base = &pkt; 70 iov[0].iov_len = sizeof(pkt.whdr); 71 iov[1].iov_base = &padding; 72 iov[1].iov_len = 3; 73 iov[2].iov_base = &ack_info; 74 iov[2].iov_len = sizeof(ack_info); 75 76 pkt.whdr.epoch = htonl(conn->proto.epoch); 77 pkt.whdr.cid = htonl(conn->proto.cid | channel); 78 pkt.whdr.callNumber = htonl(call_id); 79 pkt.whdr.seq = 0; 80 pkt.whdr.type = chan->last_type; 81 pkt.whdr.flags = conn->out_clientflag; 82 pkt.whdr.userStatus = 0; 83 pkt.whdr.securityIndex = conn->security_ix; 84 pkt.whdr._rsvd = 0; 85 pkt.whdr.serviceId = htons(conn->service_id); 86 87 len = sizeof(pkt.whdr); 88 switch (chan->last_type) { 89 case RXRPC_PACKET_TYPE_ABORT: 90 pkt.abort_code = htonl(chan->last_abort); 91 iov[0].iov_len += sizeof(pkt.abort_code); 92 len += sizeof(pkt.abort_code); 93 ioc = 1; 94 break; 95 96 case RXRPC_PACKET_TYPE_ACK: 97 mtu = conn->params.peer->if_mtu; 98 mtu -= conn->params.peer->hdrsize; 99 pkt.ack.bufferSpace = 0; 100 pkt.ack.maxSkew = htons(skb ? skb->priority : 0); 101 pkt.ack.firstPacket = htonl(chan->last_seq + 1); 102 pkt.ack.previousPacket = htonl(chan->last_seq); 103 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); 104 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; 105 pkt.ack.nAcks = 0; 106 ack_info.rxMTU = htonl(rxrpc_rx_mtu); 107 ack_info.maxMTU = htonl(mtu); 108 ack_info.rwind = htonl(rxrpc_rx_window_size); 109 ack_info.jumbo_max = htonl(rxrpc_rx_jumbo_max); 110 pkt.whdr.flags |= RXRPC_SLOW_START_OK; 111 padding = 0; 112 iov[0].iov_len += sizeof(pkt.ack); 113 len += sizeof(pkt.ack) + 3 + sizeof(ack_info); 114 ioc = 3; 115 break; 116 117 default: 118 return; 119 } 120 121 /* Resync with __rxrpc_disconnect_call() and check that the last call 122 * didn't get advanced whilst we were filling out the packets. 123 */ 124 smp_rmb(); 125 if (READ_ONCE(chan->last_call) != call_id) 126 return; 127 128 serial = atomic_inc_return(&conn->serial); 129 pkt.whdr.serial = htonl(serial); 130 131 switch (chan->last_type) { 132 case RXRPC_PACKET_TYPE_ABORT: 133 _proto("Tx ABORT %%%u { %d } [re]", serial, conn->abort_code); 134 break; 135 case RXRPC_PACKET_TYPE_ACK: 136 trace_rxrpc_tx_ack(chan->call_debug_id, serial, 137 ntohl(pkt.ack.firstPacket), 138 ntohl(pkt.ack.serial), 139 pkt.ack.reason, 0); 140 _proto("Tx ACK %%%u [re]", serial); 141 break; 142 } 143 144 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, ioc, len); 145 conn->params.peer->last_tx_at = ktime_get_seconds(); 146 if (ret < 0) 147 trace_rxrpc_tx_fail(chan->call_debug_id, serial, ret, 148 rxrpc_tx_point_call_final_resend); 149 else 150 trace_rxrpc_tx_packet(chan->call_debug_id, &pkt.whdr, 151 rxrpc_tx_point_call_final_resend); 152 153 _leave(""); 154} 155 156/* 157 * pass a connection-level abort onto all calls on that connection 158 */ 159static void rxrpc_abort_calls(struct rxrpc_connection *conn, 160 enum rxrpc_call_completion compl, 161 rxrpc_serial_t serial) 162{ 163 struct rxrpc_call *call; 164 int i; 165 166 _enter("{%d},%x", conn->debug_id, conn->abort_code); 167 168 spin_lock(&conn->bundle->channel_lock); 169 170 for (i = 0; i < RXRPC_MAXCALLS; i++) { 171 call = rcu_dereference_protected( 172 conn->channels[i].call, 173 lockdep_is_held(&conn->bundle->channel_lock)); 174 if (call) { 175 if (compl == RXRPC_CALL_LOCALLY_ABORTED) 176 trace_rxrpc_abort(call->debug_id, 177 "CON", call->cid, 178 call->call_id, 0, 179 conn->abort_code, 180 conn->error); 181 else 182 trace_rxrpc_rx_abort(call, serial, 183 conn->abort_code); 184 rxrpc_set_call_completion(call, compl, 185 conn->abort_code, 186 conn->error); 187 } 188 } 189 190 spin_unlock(&conn->bundle->channel_lock); 191 _leave(""); 192} 193 194/* 195 * generate a connection-level abort 196 */ 197static int rxrpc_abort_connection(struct rxrpc_connection *conn, 198 int error, u32 abort_code) 199{ 200 struct rxrpc_wire_header whdr; 201 struct msghdr msg; 202 struct kvec iov[2]; 203 __be32 word; 204 size_t len; 205 u32 serial; 206 int ret; 207 208 _enter("%d,,%u,%u", conn->debug_id, error, abort_code); 209 210 /* generate a connection-level abort */ 211 spin_lock_bh(&conn->state_lock); 212 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { 213 spin_unlock_bh(&conn->state_lock); 214 _leave(" = 0 [already dead]"); 215 return 0; 216 } 217 218 conn->error = error; 219 conn->abort_code = abort_code; 220 conn->state = RXRPC_CONN_LOCALLY_ABORTED; 221 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 222 spin_unlock_bh(&conn->state_lock); 223 224 msg.msg_name = &conn->params.peer->srx.transport; 225 msg.msg_namelen = conn->params.peer->srx.transport_len; 226 msg.msg_control = NULL; 227 msg.msg_controllen = 0; 228 msg.msg_flags = 0; 229 230 whdr.epoch = htonl(conn->proto.epoch); 231 whdr.cid = htonl(conn->proto.cid); 232 whdr.callNumber = 0; 233 whdr.seq = 0; 234 whdr.type = RXRPC_PACKET_TYPE_ABORT; 235 whdr.flags = conn->out_clientflag; 236 whdr.userStatus = 0; 237 whdr.securityIndex = conn->security_ix; 238 whdr._rsvd = 0; 239 whdr.serviceId = htons(conn->service_id); 240 241 word = htonl(conn->abort_code); 242 243 iov[0].iov_base = &whdr; 244 iov[0].iov_len = sizeof(whdr); 245 iov[1].iov_base = &word; 246 iov[1].iov_len = sizeof(word); 247 248 len = iov[0].iov_len + iov[1].iov_len; 249 250 serial = atomic_inc_return(&conn->serial); 251 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial); 252 whdr.serial = htonl(serial); 253 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); 254 255 ret = kernel_sendmsg(conn->params.local->socket, &msg, iov, 2, len); 256 if (ret < 0) { 257 trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 258 rxrpc_tx_point_conn_abort); 259 _debug("sendmsg failed: %d", ret); 260 return -EAGAIN; 261 } 262 263 trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort); 264 265 conn->params.peer->last_tx_at = ktime_get_seconds(); 266 267 _leave(" = 0"); 268 return 0; 269} 270 271/* 272 * mark a call as being on a now-secured channel 273 * - must be called with BH's disabled. 274 */ 275static void rxrpc_call_is_secure(struct rxrpc_call *call) 276{ 277 _enter("%p", call); 278 if (call) { 279 write_lock_bh(&call->state_lock); 280 if (call->state == RXRPC_CALL_SERVER_SECURING) { 281 call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 282 rxrpc_notify_socket(call); 283 } 284 write_unlock_bh(&call->state_lock); 285 } 286} 287 288/* 289 * connection-level Rx packet processor 290 */ 291static int rxrpc_process_event(struct rxrpc_connection *conn, 292 struct sk_buff *skb, 293 u32 *_abort_code) 294{ 295 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 296 __be32 wtmp; 297 u32 abort_code; 298 int loop, ret; 299 300 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { 301 _leave(" = -ECONNABORTED [%u]", conn->state); 302 return -ECONNABORTED; 303 } 304 305 _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); 306 307 switch (sp->hdr.type) { 308 case RXRPC_PACKET_TYPE_DATA: 309 case RXRPC_PACKET_TYPE_ACK: 310 rxrpc_conn_retransmit_call(conn, skb, 311 sp->hdr.cid & RXRPC_CHANNELMASK); 312 return 0; 313 314 case RXRPC_PACKET_TYPE_BUSY: 315 /* Just ignore BUSY packets for now. */ 316 return 0; 317 318 case RXRPC_PACKET_TYPE_ABORT: 319 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 320 &wtmp, sizeof(wtmp)) < 0) { 321 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 322 tracepoint_string("bad_abort")); 323 return -EPROTO; 324 } 325 abort_code = ntohl(wtmp); 326 _proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code); 327 328 conn->error = -ECONNABORTED; 329 conn->abort_code = abort_code; 330 conn->state = RXRPC_CONN_REMOTELY_ABORTED; 331 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 332 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial); 333 return -ECONNABORTED; 334 335 case RXRPC_PACKET_TYPE_CHALLENGE: 336 return conn->security->respond_to_challenge(conn, skb, 337 _abort_code); 338 339 case RXRPC_PACKET_TYPE_RESPONSE: 340 ret = conn->security->verify_response(conn, skb, _abort_code); 341 if (ret < 0) 342 return ret; 343 344 ret = conn->security->init_connection_security(conn); 345 if (ret < 0) 346 return ret; 347 348 ret = conn->security->prime_packet_security(conn); 349 if (ret < 0) 350 return ret; 351 352 spin_lock(&conn->bundle->channel_lock); 353 spin_lock_bh(&conn->state_lock); 354 355 if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 356 conn->state = RXRPC_CONN_SERVICE; 357 spin_unlock_bh(&conn->state_lock); 358 for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 359 rxrpc_call_is_secure( 360 rcu_dereference_protected( 361 conn->channels[loop].call, 362 lockdep_is_held(&conn->bundle->channel_lock))); 363 } else { 364 spin_unlock_bh(&conn->state_lock); 365 } 366 367 spin_unlock(&conn->bundle->channel_lock); 368 return 0; 369 370 default: 371 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 372 tracepoint_string("bad_conn_pkt")); 373 return -EPROTO; 374 } 375} 376 377/* 378 * set up security and issue a challenge 379 */ 380static void rxrpc_secure_connection(struct rxrpc_connection *conn) 381{ 382 u32 abort_code; 383 int ret; 384 385 _enter("{%d}", conn->debug_id); 386 387 ASSERT(conn->security_ix != 0); 388 ASSERT(conn->server_key); 389 390 if (conn->security->issue_challenge(conn) < 0) { 391 abort_code = RX_CALL_DEAD; 392 ret = -ENOMEM; 393 goto abort; 394 } 395 396 _leave(""); 397 return; 398 399abort: 400 _debug("abort %d, %d", ret, abort_code); 401 rxrpc_abort_connection(conn, ret, abort_code); 402 _leave(" [aborted]"); 403} 404 405/* 406 * Process delayed final ACKs that we haven't subsumed into a subsequent call. 407 */ 408void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force) 409{ 410 unsigned long j = jiffies, next_j; 411 unsigned int channel; 412 bool set; 413 414again: 415 next_j = j + LONG_MAX; 416 set = false; 417 for (channel = 0; channel < RXRPC_MAXCALLS; channel++) { 418 struct rxrpc_channel *chan = &conn->channels[channel]; 419 unsigned long ack_at; 420 421 if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags)) 422 continue; 423 424 smp_rmb(); /* vs rxrpc_disconnect_client_call */ 425 ack_at = READ_ONCE(chan->final_ack_at); 426 427 if (time_before(j, ack_at) && !force) { 428 if (time_before(ack_at, next_j)) { 429 next_j = ack_at; 430 set = true; 431 } 432 continue; 433 } 434 435 if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, 436 &conn->flags)) 437 rxrpc_conn_retransmit_call(conn, NULL, channel); 438 } 439 440 j = jiffies; 441 if (time_before_eq(next_j, j)) 442 goto again; 443 if (set) 444 rxrpc_reduce_conn_timer(conn, next_j); 445} 446 447/* 448 * connection-level event processor 449 */ 450static void rxrpc_do_process_connection(struct rxrpc_connection *conn) 451{ 452 struct sk_buff *skb; 453 u32 abort_code = RX_PROTOCOL_ERROR; 454 int ret; 455 456 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 457 rxrpc_secure_connection(conn); 458 459 /* Process delayed ACKs whose time has come. */ 460 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 461 rxrpc_process_delayed_final_acks(conn, false); 462 463 /* go through the conn-level event packets, releasing the ref on this 464 * connection that each one has when we've finished with it */ 465 while ((skb = skb_dequeue(&conn->rx_queue))) { 466 rxrpc_see_skb(skb, rxrpc_skb_seen); 467 ret = rxrpc_process_event(conn, skb, &abort_code); 468 switch (ret) { 469 case -EPROTO: 470 case -EKEYEXPIRED: 471 case -EKEYREJECTED: 472 goto protocol_error; 473 case -ENOMEM: 474 case -EAGAIN: 475 goto requeue_and_leave; 476 case -ECONNABORTED: 477 default: 478 rxrpc_free_skb(skb, rxrpc_skb_freed); 479 break; 480 } 481 } 482 483 return; 484 485requeue_and_leave: 486 skb_queue_head(&conn->rx_queue, skb); 487 return; 488 489protocol_error: 490 if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 491 goto requeue_and_leave; 492 rxrpc_free_skb(skb, rxrpc_skb_freed); 493 return; 494} 495 496void rxrpc_process_connection(struct work_struct *work) 497{ 498 struct rxrpc_connection *conn = 499 container_of(work, struct rxrpc_connection, processor); 500 501 rxrpc_see_connection(conn); 502 503 if (__rxrpc_use_local(conn->params.local)) { 504 rxrpc_do_process_connection(conn); 505 rxrpc_unuse_local(conn->params.local); 506 } 507 508 rxrpc_put_connection(conn); 509 _leave(""); 510 return; 511} 512