Lines Matching refs:call

27 			      struct rxrpc_call *call, rxrpc_seq_t seq)
29 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
30 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
31 rxrpc_queue_call(call);
38 static void rxrpc_congestion_management(struct rxrpc_call *call,
44 unsigned int cumulative_acks = call->cong_cumul_acks;
45 unsigned int cwnd = call->cong_cwnd;
49 (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
51 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
53 call->cong_ssthresh = max_t(unsigned int,
56 if (cwnd >= call->cong_ssthresh &&
57 call->cong_mode == RXRPC_CALL_SLOW_START) {
58 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
59 call->cong_tstamp = skb->tstamp;
69 summary->mode = call->cong_mode;
70 summary->cwnd = call->cong_cwnd;
71 summary->ssthresh = call->cong_ssthresh;
73 summary->dup_acks = call->cong_dup_acks;
75 switch (call->cong_mode) {
81 if (cwnd >= call->cong_ssthresh) {
82 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
83 call->cong_tstamp = skb->tstamp;
94 if (call->peer->rtt_count == 0)
97 ktime_add_us(call->cong_tstamp,
98 call->peer->srtt_us >> 3)))
101 call->cong_tstamp = skb->tstamp;
112 call->cong_dup_acks = 1;
113 if (call->cong_extra > 1)
114 call->cong_extra = 1;
118 call->cong_dup_acks++;
119 if (call->cong_dup_acks < 3)
123 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
124 call->cong_ssthresh = max_t(unsigned int,
126 cwnd = call->cong_ssthresh + 3;
127 call->cong_extra = 0;
128 call->cong_dup_acks = 0;
136 call->cong_dup_acks++;
137 if (call->cong_dup_acks == 2) {
139 call->cong_dup_acks = 0;
144 cwnd = call->cong_ssthresh;
157 call->cong_dup_acks = 0;
158 call->cong_extra = 0;
159 call->cong_tstamp = skb->tstamp;
160 if (cwnd < call->cong_ssthresh)
161 call->cong_mode = RXRPC_CALL_SLOW_START;
163 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
169 call->cong_cwnd = cwnd;
170 call->cong_cumul_acks = cumulative_acks;
171 trace_rxrpc_congest(call, summary, acked_serial, change);
172 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
173 rxrpc_queue_call(call);
178 call->cong_mode = RXRPC_CALL_PACKET_LOSS;
179 call->cong_dup_acks = 0;
186 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
188 summary->nr_acks != call->tx_top - call->tx_hard_ack) {
189 call->cong_extra++;
190 wake_up(&call->waitq);
198 static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
206 if (call->acks_lowest_nak == call->tx_hard_ack) {
207 call->acks_lowest_nak = to;
208 } else if (before_eq(call->acks_lowest_nak, to)) {
210 call->acks_lowest_nak = to;
213 spin_lock(&call->lock);
215 while (before(call->tx_hard_ack, to)) {
216 call->tx_hard_ack++;
217 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
218 skb = call->rxtx_buffer[ix];
219 annotation = call->rxtx_annotations[ix];
221 call->rxtx_buffer[ix] = NULL;
222 call->rxtx_annotations[ix] = 0;
227 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
234 spin_unlock(&call->lock);
236 trace_rxrpc_transmit(call, (rot_last ?
239 wake_up(&call->waitq);
252 * End the transmission phase of a call.
257 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
262 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
264 write_lock(&call->state_lock);
266 state = call->state;
271 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
273 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
277 __rxrpc_call_completed(call);
278 state = call->state;
285 write_unlock(&call->state_lock);
287 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
289 trace_rxrpc_transmit(call, rxrpc_transmit_end);
294 write_unlock(&call->state_lock);
295 kdebug("end_tx %s", rxrpc_call_states[call->state]);
296 rxrpc_proto_abort(abort_why, call, call->tx_top);
301 * Begin the reply reception phase of a call.
303 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
307 rxrpc_seq_t top = READ_ONCE(call->tx_top);
309 if (call->ackr_reason) {
310 spin_lock_bh(&call->lock);
311 call->ackr_reason = 0;
312 spin_unlock_bh(&call->lock);
315 WRITE_ONCE(call->resend_at, timo);
316 WRITE_ONCE(call->ack_at, timo);
317 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
320 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
321 if (!rxrpc_rotate_tx_window(call, top, &summary)) {
322 rxrpc_proto_abort("TXL", call, top);
326 if (!rxrpc_end_tx_phase(call, true, "ETD"))
328 call->tx_phase = false;
384 * space until the call times out.
387 * call. After that, we tell the other side we're no longer accepting jumbos
390 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
399 * jumbos for this call.
402 call->nr_jumbo_bad++;
411 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
422 call->rx_hard_ack, call->rx_top, skb->len, seq0);
427 state = READ_ONCE(call->state);
434 unsigned long timo = READ_ONCE(call->next_req_timo);
440 WRITE_ONCE(call->expect_req_by, expect_req_by);
441 rxrpc_reduce_call_timer(call, expect_req_by, now,
446 spin_lock(&call->input_lock);
453 !rxrpc_receiving_reply(call))
456 hard_ack = READ_ONCE(call->rx_hard_ack);
460 if (call->nr_jumbo_bad > 3) {
479 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
480 seq != call->rx_top) {
481 rxrpc_proto_abort("LSN", call, seq);
485 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
486 after_eq(seq, call->rx_top)) {
487 rxrpc_proto_abort("LSA", call, seq);
499 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
507 if (call->rxtx_buffer[ix]) {
508 rxrpc_input_dup_data(call, seq, nr_subpackets > 1,
518 if (after(seq, hard_ack + call->rx_winsize)) {
523 call->nr_jumbo_bad++;
536 if (after(seq0, call->ackr_highest_seq))
537 call->ackr_highest_seq = seq0;
549 call->rxtx_annotations[ix] = annotation;
551 call->rxtx_buffer[ix] = skb;
552 if (after(seq, call->rx_top)) {
553 smp_store_release(&call->rx_top, seq);
554 } else if (before(seq, call->rx_top)) {
575 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
580 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
582 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
585 if (after_eq(seq, call->rx_expect_next)) {
586 if (after(seq, call->rx_expect_next)) {
587 _net("OOS %u > %u", seq, call->rx_expect_next);
591 call->rx_expect_next = seq + 1;
598 if (atomic_add_return(nr_unacked, &call->ackr_nr_unacked) > 2 && !ack)
602 rxrpc_propose_ACK(call, ack, ack_serial,
606 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, serial,
610 trace_rxrpc_notify_socket(call->debug_id, serial);
611 rxrpc_notify_socket(call);
614 spin_unlock(&call->input_lock);
622 static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
634 avail = READ_ONCE(call->rtt_avail);
637 for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
641 sent_at = call->rtt_sent_at[i];
642 orig_serial = call->rtt_serial[i];
645 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
647 set_bit(i, &call->rtt_avail);
649 rxrpc_peer_add_rtt(call, type, i, acked_serial, ack_serial,
652 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_cancel, i,
661 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
663 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
665 set_bit(i, &call->rtt_avail);
670 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0);
680 static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
685 spin_lock_bh(&call->lock);
687 bottom = call->tx_hard_ack + 1;
688 top = call->acks_lost_top;
692 u8 annotation = call->rxtx_annotations[ix];
699 call->rxtx_annotations[ix] = annotation;
704 spin_unlock_bh(&call->lock);
706 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
707 rxrpc_queue_call(call);
713 static void rxrpc_input_ping_response(struct rxrpc_call *call,
718 if (acked_serial == call->acks_lost_ping)
719 rxrpc_input_check_for_lost_ack(call);
725 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
741 if (call->tx_winsize != rwind) {
742 if (rwind > call->tx_winsize)
744 trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
745 call->tx_winsize = rwind;
748 if (call->cong_ssthresh > rwind)
749 call->cong_ssthresh = rwind;
753 peer = call->peer;
763 wake_up(&call->waitq);
775 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
784 annotation = call->rxtx_annotations[ix];
793 call->rxtx_annotations[ix] =
798 call->acks_lowest_nak != seq) {
799 call->acks_lowest_nak = seq;
808 call->rxtx_annotations[ix] =
812 return rxrpc_proto_abort("SFT", call, 0);
821 static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
824 rxrpc_seq_t base = READ_ONCE(call->acks_first_seq);
832 if (after_eq(prev_pkt, call->acks_prev_seq))
836 if (after_eq(prev_pkt, base + call->tx_winsize))
851 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
869 return rxrpc_proto_abort("XAK", call, 0);
882 trace_rxrpc_rx_ack(call, ack_serial, acked_serial,
888 rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
890 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
894 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
899 rxrpc_complete_rtt_probe(call, skb->tstamp, acked_serial, ack_serial,
906 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
910 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
916 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
917 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
918 first_soft_ack, call->acks_first_seq,
919 prev_pkt, call->acks_prev_seq);
927 return rxrpc_proto_abort("XAI", call, 0);
929 spin_lock(&call->input_lock);
932 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
933 trace_rxrpc_rx_discard_ack(call->debug_id, ack_serial,
934 first_soft_ack, call->acks_first_seq,
935 prev_pkt, call->acks_prev_seq);
938 call->acks_latest_ts = skb->tstamp;
940 call->acks_first_seq = first_soft_ack;
941 call->acks_prev_seq = prev_pkt;
945 rxrpc_input_ackinfo(call, skb, &buf.info);
948 rxrpc_proto_abort("AK0", call, 0);
953 switch (READ_ONCE(call->state)) {
963 if (before(hard_ack, call->tx_hard_ack) ||
964 after(hard_ack, call->tx_top)) {
965 rxrpc_proto_abort("AKW", call, 0);
968 if (nr_acks > call->tx_top - hard_ack) {
969 rxrpc_proto_abort("AKN", call, 0);
973 if (after(hard_ack, call->tx_hard_ack)) {
974 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
975 rxrpc_end_tx_phase(call, false, "ETA");
982 rxrpc_proto_abort("XSA", call, 0);
985 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
989 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
991 summary.nr_acks == call->tx_top - hard_ack &&
992 rxrpc_is_client_call(call))
993 rxrpc_propose_ACK(call, RXRPC_ACK_PING, ack_serial,
997 rxrpc_congestion_management(call, skb, &summary, acked_serial);
999 spin_unlock(&call->input_lock);
1005 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
1012 spin_lock(&call->input_lock);
1014 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
1015 rxrpc_end_tx_phase(call, false, "ETL");
1017 spin_unlock(&call->input_lock);
1021 * Process an ABORT packet directed at a call.
1023 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1036 trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
1040 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
1045 * Process an incoming call packet.
1047 static void rxrpc_input_call_packet(struct rxrpc_call *call,
1053 _enter("%p,%p", call, skb);
1055 timo = READ_ONCE(call->next_rx_timo);
1060 WRITE_ONCE(call->expect_rx_by, expect_rx_by);
1061 rxrpc_reduce_call_timer(call, expect_rx_by, now,
1067 rxrpc_input_data(call, skb);
1071 rxrpc_input_ack(call, skb);
1084 rxrpc_input_abort(call, skb);
1088 rxrpc_input_ackall(call, skb);
1101 * Handle a new service call on a channel implicitly completing the preceding
1102 * call on that channel. This does not apply to client conns.
1108 struct rxrpc_call *call)
1110 switch (READ_ONCE(call->state)) {
1112 rxrpc_call_completed(call);
1117 if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
1118 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
1119 rxrpc_queue_call(call);
1121 trace_rxrpc_improper_term(call);
1126 __rxrpc_disconnect_call(conn, call);
1132 * - this includes challenges, responses, some aborts and call terminal packet
1219 struct rxrpc_call *call = NULL;
1330 * that would begin a call are explicitly rejected and the rest
1380 if (chan->call ||
1384 /* For the previous service call, if completed
1403 call = rcu_dereference(chan->call);
1408 if (call)
1409 rxrpc_input_implicit_end_call(rx, conn, call);
1410 call = NULL;
1413 if (call) {
1414 if (sp->hdr.serviceId != call->service_id)
1415 call->service_id = sp->hdr.serviceId;
1416 if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
1417 call->rx_serial = sp->hdr.serial;
1418 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1419 set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1423 if (!call || refcount_read(&call->ref) == 0) {
1429 call = rxrpc_new_incoming_call(local, rx, skb);
1430 if (!call)
1434 /* Process a call packet; this either discards or passes on the ref
1437 rxrpc_input_call_packet(call, skb);