Lines Matching refs:sk
54 #define __iucv_sock_wait(sk, condition, timeo, ret) \
59 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
69 release_sock(sk); \
71 lock_sock(sk); \
72 ret = sock_error(sk); \
76 finish_wait(sk_sleep(sk), &__wait); \
79 #define iucv_sock_wait(sk, condition, timeo) \
83 __iucv_sock_wait(sk, condition, timeo, __ret); \
89 static void iucv_sock_kill(struct sock *sk);
90 static void iucv_sock_close(struct sock *sk);
159 * @sk: sock structure
160 * @state: first iucv sk state
161 * @state: second iucv sk state
165 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
167 return (sk->sk_state == state || sk->sk_state == state2);
172 * @sk: sock structure
178 static inline int iucv_below_msglim(struct sock *sk)
180 struct iucv_sock *iucv = iucv_sk(sk);
182 if (sk->sk_state != IUCV_CONNECTED)
194 static void iucv_sock_wake_msglim(struct sock *sk)
199 wq = rcu_dereference(sk->sk_wq);
202 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
289 struct sock *sk;
291 sk_for_each(sk, &iucv_sk_list.head)
292 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
293 return sk;
298 static void iucv_sock_destruct(struct sock *sk)
300 skb_queue_purge(&sk->sk_receive_queue);
301 skb_queue_purge(&sk->sk_error_queue);
303 sk_mem_reclaim(sk);
305 if (!sock_flag(sk, SOCK_DEAD)) {
306 pr_err("Attempt to release alive iucv socket %p\n", sk);
310 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
311 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
312 WARN_ON(sk->sk_wmem_queued);
313 WARN_ON(sk->sk_forward_alloc);
319 struct sock *sk;
322 while ((sk = iucv_accept_dequeue(parent, NULL))) {
323 iucv_sock_close(sk);
324 iucv_sock_kill(sk);
330 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
333 sk_add_node(sk, &l->head);
337 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
340 sk_del_node_init(sk);
345 static void iucv_sock_kill(struct sock *sk)
347 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
350 iucv_sock_unlink(&iucv_sk_list, sk);
351 sock_set_flag(sk, SOCK_DEAD);
352 sock_put(sk);
356 static void iucv_sever_path(struct sock *sk, int with_user_data)
359 struct iucv_sock *iucv = iucv_sk(sk);
376 static int iucv_send_ctrl(struct sock *sk, u8 flags)
378 struct iucv_sock *iucv = iucv_sk(sk);
386 if (sk->sk_shutdown & SEND_SHUTDOWN) {
388 shutdown = sk->sk_shutdown;
389 sk->sk_shutdown &= RCV_SHUTDOWN;
391 skb = sock_alloc_send_skb(sk, blen, 1, &err);
394 err = afiucv_hs_send(NULL, sk, skb, flags);
397 sk->sk_shutdown = shutdown;
402 static void iucv_sock_close(struct sock *sk)
404 struct iucv_sock *iucv = iucv_sk(sk);
408 lock_sock(sk);
410 switch (sk->sk_state) {
412 iucv_sock_cleanup_listen(sk);
417 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
418 sk->sk_state = IUCV_DISCONN;
419 sk->sk_state_change(sk);
424 sk->sk_state = IUCV_CLOSING;
425 sk->sk_state_change(sk);
428 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
429 timeo = sk->sk_lingertime;
432 iucv_sock_wait(sk,
433 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
439 sk->sk_state = IUCV_CLOSED;
440 sk->sk_state_change(sk);
442 sk->sk_err = ECONNRESET;
443 sk->sk_state_change(sk);
450 iucv_sever_path(sk, 1);
456 sk->sk_bound_dev_if = 0;
460 sock_set_flag(sk, SOCK_ZAPPED);
462 release_sock(sk);
465 static void iucv_sock_init(struct sock *sk, struct sock *parent)
468 sk->sk_type = parent->sk_type;
469 security_sk_clone(parent, sk);
475 struct sock *sk;
478 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
479 if (!sk)
481 iucv = iucv_sk(sk);
483 sock_init_data(sock, sk);
504 sk->sk_destruct = iucv_sock_destruct;
505 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
507 sock_reset_flag(sk, SOCK_ZAPPED);
509 sk->sk_protocol = proto;
510 sk->sk_state = IUCV_OPEN;
512 iucv_sock_link(&iucv_sk_list, sk);
513 return sk;
516 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
521 sock_hold(sk);
523 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
525 iucv_sk(sk)->parent = parent;
529 static void iucv_accept_unlink(struct sock *sk)
532 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
535 list_del_init(&iucv_sk(sk)->accept_q);
537 sk_acceptq_removed(iucv_sk(sk)->parent);
538 iucv_sk(sk)->parent = NULL;
539 sock_put(sk);
546 struct sock *sk;
549 sk = (struct sock *) isk;
550 lock_sock(sk);
552 if (sk->sk_state == IUCV_CLOSED) {
553 iucv_accept_unlink(sk);
554 release_sock(sk);
558 if (sk->sk_state == IUCV_CONNECTED ||
559 sk->sk_state == IUCV_DISCONN ||
561 iucv_accept_unlink(sk);
563 sock_graft(sk, newsock);
565 release_sock(sk);
566 return sk;
569 release_sock(sk);
592 struct sock *sk = sock->sk;
602 lock_sock(sk);
603 if (sk->sk_state != IUCV_OPEN) {
610 iucv = iucv_sk(sk);
635 sk->sk_bound_dev_if = dev->ifindex;
638 sk->sk_state = IUCV_BOUND;
652 sk->sk_state = IUCV_BOUND;
654 sk->sk_allocation |= GFP_DMA;
665 release_sock(sk);
670 static int iucv_sock_autobind(struct sock *sk)
672 struct iucv_sock *iucv = iucv_sk(sk);
680 sk->sk_allocation |= GFP_DMA;
695 struct sock *sk = sock->sk;
696 struct iucv_sock *iucv = iucv_sk(sk);
713 sk);
742 struct sock *sk = sock->sk;
743 struct iucv_sock *iucv = iucv_sk(sk);
749 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
752 if (sk->sk_state == IUCV_OPEN &&
756 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
759 if (sk->sk_state == IUCV_OPEN) {
760 err = iucv_sock_autobind(sk);
765 lock_sock(sk);
772 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
778 if (sk->sk_state != IUCV_CONNECTED)
779 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
781 sock_sndtimeo(sk, flags & O_NONBLOCK));
783 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
787 iucv_sever_path(sk, 0);
790 release_sock(sk);
797 struct sock *sk = sock->sk;
800 lock_sock(sk);
803 if (sk->sk_state != IUCV_BOUND)
809 sk->sk_max_ack_backlog = backlog;
810 sk->sk_ack_backlog = 0;
811 sk->sk_state = IUCV_LISTEN;
815 release_sock(sk);
824 struct sock *sk = sock->sk, *nsk;
828 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
830 if (sk->sk_state != IUCV_LISTEN) {
835 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
838 add_wait_queue_exclusive(sk_sleep(sk), &wait);
839 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
846 release_sock(sk);
848 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
850 if (sk->sk_state != IUCV_LISTEN) {
862 remove_wait_queue(sk_sleep(sk), &wait);
870 release_sock(sk);
878 struct sock *sk = sock->sk;
879 struct iucv_sock *iucv = iucv_sk(sk);
924 struct sock *sk = sock->sk;
925 struct iucv_sock *iucv = iucv_sk(sk);
938 err = sock_error(sk);
946 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
949 lock_sock(sk);
951 if (sk->sk_shutdown & SEND_SHUTDOWN) {
957 if (sk->sk_state != IUCV_CONNECTED) {
1020 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1034 timeo = sock_sndtimeo(sk, noblock);
1035 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1040 if (sk->sk_state != IUCV_CONNECTED) {
1051 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1117 release_sock(sk);
1123 release_sock(sk);
1159 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1208 if (sk_filter(sk, skb)) {
1209 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
1213 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
1214 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1221 static void iucv_process_message_q(struct sock *sk)
1223 struct iucv_sock *iucv = iucv_sk(sk);
1231 iucv_process_message(sk, skb, p->path, &p->msg);
1243 struct sock *sk = sock->sk;
1244 struct iucv_sock *iucv = iucv_sk(sk);
1250 if ((sk->sk_state == IUCV_DISCONN) &&
1252 skb_queue_empty(&sk->sk_receive_queue) &&
1261 skb = skb_recv_datagram(sk, flags, noblock, &err);
1263 if (sk->sk_shutdown & RCV_SHUTDOWN)
1272 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1277 skb_queue_head(&sk->sk_receive_queue, skb);
1282 if (sk->sk_type == SOCK_SEQPACKET) {
1297 skb_queue_head(&sk->sk_receive_queue, skb);
1305 if (sk->sk_type == SOCK_STREAM) {
1308 skb_queue_head(&sk->sk_receive_queue, skb);
1318 iucv_sock_close(sk);
1328 if (__sock_queue_rcv_skb(sk, rskb)) {
1338 iucv_process_message_q(sk);
1341 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1343 sk->sk_state = IUCV_DISCONN;
1344 sk->sk_state_change(sk);
1353 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1362 struct sock *sk;
1365 sk = (struct sock *) isk;
1367 if (sk->sk_state == IUCV_CONNECTED)
1377 struct sock *sk = sock->sk;
1382 if (sk->sk_state == IUCV_LISTEN)
1383 return iucv_accept_poll(sk);
1385 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1387 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1389 if (sk->sk_shutdown & RCV_SHUTDOWN)
1392 if (sk->sk_shutdown == SHUTDOWN_MASK)
1395 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1396 (sk->sk_shutdown & RCV_SHUTDOWN))
1399 if (sk->sk_state == IUCV_CLOSED)
1402 if (sk->sk_state == IUCV_DISCONN)
1405 if (sock_writeable(sk) && iucv_below_msglim(sk))
1408 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1415 struct sock *sk = sock->sk;
1416 struct iucv_sock *iucv = iucv_sk(sk);
1425 lock_sock(sk);
1426 switch (sk->sk_state) {
1438 sk->sk_state == IUCV_CONNECTED) {
1458 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1461 sk->sk_shutdown |= how;
1468 /* skb_queue_purge(&sk->sk_receive_queue); */
1470 skb_queue_purge(&sk->sk_receive_queue);
1474 sk->sk_state_change(sk);
1477 release_sock(sk);
1483 struct sock *sk = sock->sk;
1486 if (!sk)
1489 iucv_sock_close(sk);
1491 sock_orphan(sk);
1492 iucv_sock_kill(sk);
1500 struct sock *sk = sock->sk;
1501 struct iucv_sock *iucv = iucv_sk(sk);
1516 lock_sock(sk);
1525 switch (sk->sk_state) {
1542 release_sock(sk);
1550 struct sock *sk = sock->sk;
1551 struct iucv_sock *iucv = iucv_sk(sk);
1571 lock_sock(sk);
1574 release_sock(sk);
1577 if (sk->sk_state == IUCV_OPEN)
1603 struct sock *sk, *nsk;
1612 sk = NULL;
1613 sk_for_each(sk, &iucv_sk_list.head)
1614 if (sk->sk_state == IUCV_LISTEN &&
1615 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1620 iucv = iucv_sk(sk);
1628 bh_lock_sock(sk);
1634 if (sk->sk_state != IUCV_LISTEN) {
1641 if (sk_acceptq_is_full(sk)) {
1648 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1656 iucv_sock_init(nsk, sk);
1683 iucv_accept_enqueue(sk, nsk);
1687 sk->sk_data_ready(sk);
1690 bh_unlock_sock(sk);
1696 struct sock *sk = path->private;
1698 sk->sk_state = IUCV_CONNECTED;
1699 sk->sk_state_change(sk);
1704 struct sock *sk = path->private;
1705 struct iucv_sock *iucv = iucv_sk(sk);
1710 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1721 len = atomic_read(&sk->sk_rmem_alloc);
1723 if (len > sk->sk_rcvbuf)
1730 iucv_process_message(sk, skb, path, msg);
1749 struct sock *sk = path->private;
1751 struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q;
1755 bh_lock_sock(sk);
1771 iucv_sock_wake_msglim(sk);
1774 if (sk->sk_state == IUCV_CLOSING) {
1775 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
1776 sk->sk_state = IUCV_CLOSED;
1777 sk->sk_state_change(sk);
1780 bh_unlock_sock(sk);
1786 struct sock *sk = path->private;
1788 if (sk->sk_state == IUCV_CLOSED)
1791 bh_lock_sock(sk);
1792 iucv_sever_path(sk, 1);
1793 sk->sk_state = IUCV_DISCONN;
1795 sk->sk_state_change(sk);
1796 bh_unlock_sock(sk);
1804 struct sock *sk = path->private;
1806 bh_lock_sock(sk);
1807 if (sk->sk_state != IUCV_CLOSED) {
1808 sk->sk_shutdown |= SEND_SHUTDOWN;
1809 sk->sk_state_change(sk);
1811 bh_unlock_sock(sk);
1838 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1845 iucv = iucv_sk(sk);
1854 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1855 bh_lock_sock(sk);
1856 if ((sk->sk_state != IUCV_LISTEN) ||
1857 sk_acceptq_is_full(sk) ||
1864 bh_unlock_sock(sk);
1869 iucv_sock_init(nsk, sk);
1880 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1889 iucv_accept_enqueue(sk, nsk);
1891 sk->sk_data_ready(sk);
1894 bh_unlock_sock(sk);
1903 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1905 struct iucv_sock *iucv = iucv_sk(sk);
1909 if (sk->sk_state != IUCV_BOUND)
1911 bh_lock_sock(sk);
1913 sk->sk_state = IUCV_CONNECTED;
1914 sk->sk_state_change(sk);
1915 bh_unlock_sock(sk);
1924 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
1926 struct iucv_sock *iucv = iucv_sk(sk);
1930 if (sk->sk_state != IUCV_BOUND)
1932 bh_lock_sock(sk);
1933 sk->sk_state = IUCV_DISCONN;
1934 sk->sk_state_change(sk);
1935 bh_unlock_sock(sk);
1944 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
1946 struct iucv_sock *iucv = iucv_sk(sk);
1951 bh_lock_sock(sk);
1952 if (sk->sk_state == IUCV_CONNECTED) {
1953 sk->sk_state = IUCV_DISCONN;
1954 sk->sk_state_change(sk);
1956 bh_unlock_sock(sk);
1965 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
1967 struct iucv_sock *iucv = iucv_sk(sk);
1972 if (sk->sk_state != IUCV_CONNECTED)
1976 iucv_sock_wake_msglim(sk);
1983 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
1985 struct iucv_sock *iucv = iucv_sk(sk);
1992 if (sk->sk_state != IUCV_CONNECTED) {
1997 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2007 if (sk_filter(sk, skb)) {
2008 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
2015 if (__sock_queue_rcv_skb(sk, skb))
2019 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2032 struct sock *sk;
2050 sk = NULL;
2052 sk_for_each(sk, &iucv_sk_list.head) {
2054 if ((!memcmp(&iucv_sk(sk)->src_name,
2056 (!memcmp(&iucv_sk(sk)->src_user_id,
2058 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2059 (!memcmp(&iucv_sk(sk)->dst_user_id,
2061 iucv = iucv_sk(sk);
2065 if ((!memcmp(&iucv_sk(sk)->src_name,
2067 (!memcmp(&iucv_sk(sk)->src_user_id,
2069 (!memcmp(&iucv_sk(sk)->dst_name,
2071 (!memcmp(&iucv_sk(sk)->dst_user_id,
2073 iucv = iucv_sk(sk);
2080 sk = NULL;
2094 err = afiucv_hs_callback_syn(sk, skb);
2098 err = afiucv_hs_callback_synack(sk, skb);
2102 err = afiucv_hs_callback_synfin(sk, skb);
2106 err = afiucv_hs_callback_fin(sk, skb);
2109 err = afiucv_hs_callback_win(sk, skb);
2121 err = afiucv_hs_callback_rx(sk, skb);
2137 struct sock *isk = skb->sk;
2138 struct sock *sk = NULL;
2146 sk_for_each(sk, &iucv_sk_list.head)
2147 if (sk == isk) {
2148 iucv = iucv_sk(sk);
2153 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2164 iucv_sock_wake_msglim(sk);
2173 iucv_sock_wake_msglim(sk);
2183 if (sk->sk_state == IUCV_CONNECTED) {
2184 sk->sk_state = IUCV_DISCONN;
2185 sk->sk_state_change(sk);
2194 if (sk->sk_state == IUCV_CLOSING) {
2195 if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) {
2196 sk->sk_state = IUCV_CLOSED;
2197 sk->sk_state_change(sk);
2210 struct sock *sk;
2216 sk_for_each(sk, &iucv_sk_list.head) {
2217 iucv = iucv_sk(sk);
2219 (sk->sk_state == IUCV_CONNECTED)) {
2221 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2222 sk->sk_state = IUCV_DISCONN;
2223 sk->sk_state_change(sk);
2262 struct sock *sk;
2272 /* currently, proto ops can handle both sk types */
2279 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
2280 if (!sk)
2283 iucv_sock_init(sk, NULL);