Lines Matching defs:call

2 /* incoming call handling
25 static void rxrpc_dummy_notify(struct sock *sk, struct rxrpc_call *call,
31 * Preallocate a single service call, connection and peer and, if possible,
41 struct rxrpc_call *call, *xcall;
98 call = rxrpc_alloc_call(rx, gfp, debug_id);
99 if (!call)
101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
102 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
103 __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
105 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
124 call->user_call_ID = user_call_ID;
125 call->notify_rx = notify_rx;
127 rxrpc_get_call(call, rxrpc_call_get_kernel_service);
128 user_attach_call(call, user_call_ID);
131 rxrpc_get_call(call, rxrpc_call_get_userid);
132 rb_link_node(&call->sock_node, parent, pp);
133 rb_insert_color(&call->sock_node, &rx->calls);
134 set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
136 list_add(&call->sock_link, &rx->sock_calls);
140 rxnet = call->rxnet;
142 list_add_tail_rcu(&call->link, &rxnet->calls);
145 b->call_backlog[call_head] = call;
147 _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID);
152 rxrpc_cleanup_call(call);
220 struct rxrpc_call *call = b->call_backlog[tail];
221 rcu_assign_pointer(call->socket, rx);
223 _debug("discard %lx", call->user_call_ID);
224 rx->discard_new_call(call, call->user_call_ID);
225 if (call->notify_rx)
226 call->notify_rx = rxrpc_dummy_notify;
227 rxrpc_put_call(call, rxrpc_call_put_kernel);
229 rxrpc_call_completed(call);
230 rxrpc_release_call(rx, call);
231 rxrpc_put_call(call, rxrpc_call_put_discard_prealloc);
239 * Allocate a new incoming call from the prealloc pool, along with a connection
251 struct rxrpc_call *call;
300 /* And now we can allocate and set up a new call */
301 call = b->call_backlog[call_tail];
306 rxrpc_see_call(call, rxrpc_call_see_accept);
307 call->local = rxrpc_get_local(conn->local, rxrpc_local_get_call);
308 call->conn = conn;
309 call->security = conn->security;
310 call->security_ix = conn->security_ix;
311 call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_accept);
312 call->dest_srx = peer->srx;
313 call->cong_ssthresh = call->peer->cong_ssthresh;
314 call->tx_last_sent = ktime_get_real();
315 return call;
319 * Set up a new incoming call. Called from the I/O thread.
321 * If this is for a kernel service, when we allocate the call, it will have
337 struct rxrpc_call *call = NULL;
342 /* Don't set up a call for anything other than a DATA packet. */
349 * begin a call are explicitly rejected and the rest are just
376 call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, peer_srx,
378 if (!call) {
383 trace_rxrpc_receive(call, rxrpc_receive_incoming,
386 /* Make the call live. */
387 rxrpc_incoming_call(rx, call, skb);
388 conn = call->conn;
391 rx->notify_new_call(&rx->sk, call, call->user_call_ID);
396 set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events);
397 rxrpc_queue_conn(call->conn, rxrpc_conn_queue_challenge);
404 if (hlist_unhashed(&call->error_link)) {
405 spin_lock(&call->peer->lock);
406 hlist_add_head(&call->error_link, &call->peer->error_targets);
407 spin_unlock(&call->peer->lock);
410 _leave(" = %p{%d}", call, call->debug_id);
411 rxrpc_input_call_event(call, skb);
412 rxrpc_put_call(call, rxrpc_call_put_input);
434 * Charge up socket with preallocated calls, attaching user call IDs.
451 * @notify_rx: Event notification function for the call
452 * @user_attach_call: Func to attach call to user_call_ID
453 * @user_call_ID: The tag to attach to the preallocated call
459 * The user is given a ref to hold on the call.
461 * Note that the call may be come connected before this function returns.