Lines Matching defs:call

2 /* incoming call handling
25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
31 * Preallocate a single service call, connection and peer and, if possible,
42 struct rxrpc_call *call, *xcall;
100 call = rxrpc_alloc_call(rx, gfp, debug_id);
101 if (!call)
103 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
104 call->state = RXRPC_CALL_SERVER_PREALLOC;
106 trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
107 refcount_read(&call->ref),
126 call->user_call_ID = user_call_ID;
127 call->notify_rx = notify_rx;
129 rxrpc_get_call(call, rxrpc_call_got_kernel);
130 user_attach_call(call, user_call_ID);
133 rxrpc_get_call(call, rxrpc_call_got_userid);
134 rb_link_node(&call->sock_node, parent, pp);
135 rb_insert_color(&call->sock_node, &rx->calls);
136 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
138 list_add(&call->sock_link, &rx->sock_calls);
142 rxnet = call->rxnet;
144 list_add_tail(&call->link, &rxnet->calls);
147 b->call_backlog[call_head] = call;
149 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
154 rxrpc_cleanup_call(call);
222 struct rxrpc_call *call = b->call_backlog[tail];
223 rcu_assign_pointer(call->socket, rx);
225 _debug("discard %lx", call->user_call_ID);
226 rx->discard_new_call(call, call->user_call_ID);
227 if (call->notify_rx)
228 call->notify_rx = rxrpc_dummy_notify;
229 rxrpc_put_call(call, rxrpc_call_put_kernel);
231 rxrpc_call_completed(call);
232 rxrpc_release_call(rx, call);
233 rxrpc_put_call(call, rxrpc_call_put);
244 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
249 if (call->peer->rtt_count < 3 ||
250 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
251 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
257 * Allocate a new incoming call from the prealloc pool, along with a connection
269 struct rxrpc_call *call;
318 /* And now we can allocate and set up a new call */
319 call = b->call_backlog[call_tail];
324 rxrpc_see_call(call);
325 call->conn = conn;
326 call->security = conn->security;
327 call->security_ix = conn->security_ix;
328 call->peer = rxrpc_get_peer(conn->params.peer);
329 call->cong_cwnd = call->peer->cong_cwnd;
330 return call;
334 * Set up a new incoming call. Called in BH context with the RCU read lock
337 * If this is for a kernel service, when we allocate the call, it will have
346 * The call is returned with the user access mutex held.
356 struct rxrpc_call *call = NULL;
371 /* The peer, connection and call may all have sprung into existence due
381 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
383 if (!call) {
388 trace_rxrpc_receive(call, rxrpc_receive_incoming,
391 /* Make the call live. */
392 rxrpc_incoming_call(rx, call, skb);
393 conn = call->conn;
396 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
402 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
403 rxrpc_queue_conn(call->conn);
407 write_lock(&call->state_lock);
408 if (call->state < RXRPC_CALL_COMPLETE)
409 call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
410 write_unlock(&call->state_lock);
414 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
418 rxrpc_abort_call("CON", call, sp->hdr.seq,
427 rxrpc_send_ping(call, skb);
432 * service to prevent the call from being deallocated too early.
434 rxrpc_put_call(call, rxrpc_call_put);
436 _leave(" = %p{%d}", call, call->debug_id);
437 return call;
446 * Charge up socket with preallocated calls, attaching user call IDs.
463 * @notify_rx: Event notification function for the call
464 * @user_attach_call: Func to attach call to user_call_ID
465 * @user_call_ID: The tag to attach to the preallocated call
471 * The user is given a ref to hold on the call.
473 * Note that the call may be come connected before this function returns.