Lines Matching refs:sk
71 void bt_sock_reclassify_lock(struct sock *sk, int proto)
73 BUG_ON(!sk);
74 BUG_ON(!sock_allow_reclassification(sk));
76 sock_lock_init_class_and_name(sk,
134 bt_sock_reclassify_lock(sock->sk, proto);
146 struct sock *sk;
148 sk = sk_alloc(net, PF_BLUETOOTH, prio, prot, kern);
149 if (!sk)
152 sock_init_data(sock, sk);
153 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
155 sock_reset_flag(sk, SOCK_ZAPPED);
157 sk->sk_protocol = proto;
158 sk->sk_state = BT_OPEN;
162 spin_lock(&sk->sk_peer_lock);
163 sk->sk_peer_pid = get_pid(task_tgid(current));
164 sk->sk_peer_cred = get_current_cred();
165 spin_unlock(&sk->sk_peer_lock);
168 return sk;
172 void bt_sock_link(struct bt_sock_list *l, struct sock *sk)
175 sk_add_node(sk, &l->head);
180 void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
183 sk_del_node_init(sk);
188 void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
193 BT_DBG("parent %p, sk %p", parent, sk);
195 sock_hold(sk);
198 bh_lock_sock_nested(sk);
200 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
202 list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
203 bt_sk(sk)->parent = parent;
208 spin_lock(&sk->sk_peer_lock);
209 old_pid = sk->sk_peer_pid;
210 old_cred = sk->sk_peer_cred;
211 sk->sk_peer_pid = get_pid(parent->sk_peer_pid);
212 sk->sk_peer_cred = get_cred(parent->sk_peer_cred);
213 spin_unlock(&sk->sk_peer_lock);
219 bh_unlock_sock(sk);
221 release_sock(sk);
227 /* Calling function must hold the sk lock.
228 * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list.
230 void bt_accept_unlink(struct sock *sk)
232 BT_DBG("sk %p state %d", sk, sk->sk_state);
234 list_del_init(&bt_sk(sk)->accept_q);
235 sk_acceptq_removed(bt_sk(sk)->parent);
236 bt_sk(sk)->parent = NULL;
237 sock_put(sk);
244 struct sock *sk;
250 sk = (struct sock *)s;
252 /* Prevent early freeing of sk due to unlink and sock_kill */
253 sock_hold(sk);
254 lock_sock(sk);
256 /* Check sk has not already been unlinked via
257 * bt_accept_unlink() due to serialisation caused by sk locking
259 if (!bt_sk(sk)->parent) {
260 BT_DBG("sk %p, already unlinked", sk);
261 release_sock(sk);
262 sock_put(sk);
264 /* Restart the loop as sk is no longer in the list
271 /* sk is safely in the parent list so reduce reference count */
272 sock_put(sk);
275 if (sk->sk_state == BT_CLOSED) {
276 bt_accept_unlink(sk);
277 release_sock(sk);
281 if (sk->sk_state == BT_CONNECTED || !newsock ||
283 bt_accept_unlink(sk);
285 sock_graft(sk, newsock);
287 release_sock(sk);
288 return sk;
291 release_sock(sk);
301 struct sock *sk = sock->sk;
307 BT_DBG("sock %p sk %p len %zu", sock, sk, len);
312 skb = skb_recv_datagram(sk, flags, &err);
314 if (sk->sk_shutdown & RCV_SHUTDOWN)
330 sock_recv_cmsgs(msg, sk, skb);
332 if (msg->msg_name && bt_sk(sk)->skb_msg_name)
333 bt_sk(sk)->skb_msg_name(skb, msg->msg_name,
336 if (test_bit(BT_SK_PKT_STATUS, &bt_sk(sk)->flags)) {
344 skb_free_datagram(sk, skb);
353 static long bt_sock_data_wait(struct sock *sk, long timeo)
357 add_wait_queue(sk_sleep(sk), &wait);
361 if (!skb_queue_empty(&sk->sk_receive_queue))
364 if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN))
370 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
371 release_sock(sk);
373 lock_sock(sk);
374 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
378 remove_wait_queue(sk_sleep(sk), &wait);
385 struct sock *sk = sock->sk;
393 BT_DBG("sk %p size %zu", sk, size);
395 lock_sock(sk);
397 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
398 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
404 skb = skb_dequeue(&sk->sk_receive_queue);
409 err = sock_error(sk);
412 if (sk->sk_shutdown & RCV_SHUTDOWN)
419 timeo = bt_sock_data_wait(sk, timeo);
430 skb_queue_head(&sk->sk_receive_queue, skb);
438 sock_recv_cmsgs(msg, sk, skb);
469 skb_queue_head(&sk->sk_receive_queue, skb);
476 skb_queue_head(&sk->sk_receive_queue, skb);
482 release_sock(sk);
490 struct sock *sk;
493 sk = (struct sock *)s;
494 if (sk->sk_state == BT_CONNECTED ||
496 sk->sk_state == BT_CONNECT2))
506 struct sock *sk = sock->sk;
509 poll_wait(file, sk_sleep(sk), wait);
511 if (sk->sk_state == BT_LISTEN)
512 return bt_accept_poll(sk);
514 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
516 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
518 if (sk->sk_shutdown & RCV_SHUTDOWN)
521 if (sk->sk_shutdown == SHUTDOWN_MASK)
524 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
527 if (sk->sk_state == BT_CLOSED)
530 if (sk->sk_state == BT_CONNECT ||
531 sk->sk_state == BT_CONNECT2 ||
532 sk->sk_state == BT_CONFIG)
535 if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk))
538 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
546 struct sock *sk = sock->sk;
551 BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg);
555 if (sk->sk_state == BT_LISTEN)
558 amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
565 if (sk->sk_state == BT_LISTEN)
568 spin_lock(&sk->sk_receive_queue.lock);
569 skb = skb_peek(&sk->sk_receive_queue);
571 spin_unlock(&sk->sk_receive_queue.lock);
585 /* This function expects the sk lock to be held when called */
586 int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
591 BT_DBG("sk %p", sk);
593 add_wait_queue(sk_sleep(sk), &wait);
595 while (sk->sk_state != state) {
606 release_sock(sk);
608 lock_sock(sk);
611 err = sock_error(sk);
616 remove_wait_queue(sk_sleep(sk), &wait);
621 /* This function expects the sk lock to be held when called */
622 int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags)
628 BT_DBG("sk %p", sk);
630 timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT));
632 add_wait_queue(sk_sleep(sk), &wait);
634 while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) {
645 release_sock(sk);
647 lock_sock(sk);
650 err = sock_error(sk);
655 remove_wait_queue(sk_sleep(sk), &wait);
691 seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent");
700 struct sock *sk = sk_entry(v);
701 struct bt_sock *bt = bt_sk(sk);
705 sk,
706 refcount_read(&sk->sk_refcnt),
707 sk_rmem_alloc_get(sk),
708 sk_wmem_alloc_get(sk),
709 from_kuid(seq_user_ns(seq), sock_i_uid(sk)),
710 sock_i_ino(sk),