Lines Matching refs:sk
56 #define __iucv_sock_wait(sk, condition, timeo, ret) \
61 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
71 release_sock(sk); \
73 lock_sock(sk); \
74 ret = sock_error(sk); \
78 finish_wait(sk_sleep(sk), &__wait); \
81 #define iucv_sock_wait(sk, condition, timeo) \
85 __iucv_sock_wait(sk, condition, timeo, __ret); \
91 static void iucv_sock_kill(struct sock *sk);
92 static void iucv_sock_close(struct sock *sk);
94 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify);
144 * @sk: sock structure
145 * @state: first iucv sk state
146 * @state2: second iucv sk state
150 static int iucv_sock_in_state(struct sock *sk, int state, int state2)
152 return (sk->sk_state == state || sk->sk_state == state2);
157 * @sk: sock structure
163 static inline int iucv_below_msglim(struct sock *sk)
165 struct iucv_sock *iucv = iucv_sk(sk);
167 if (sk->sk_state != IUCV_CONNECTED)
179 static void iucv_sock_wake_msglim(struct sock *sk)
184 wq = rcu_dereference(sk->sk_wq);
187 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
267 struct sock *sk;
269 sk_for_each(sk, &iucv_sk_list.head)
270 if (!memcmp(&iucv_sk(sk)->src_name, nm, 8))
271 return sk;
276 static void iucv_sock_destruct(struct sock *sk)
278 skb_queue_purge(&sk->sk_receive_queue);
279 skb_queue_purge(&sk->sk_error_queue);
281 if (!sock_flag(sk, SOCK_DEAD)) {
282 pr_err("Attempt to release alive iucv socket %p\n", sk);
286 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
287 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
288 WARN_ON(sk->sk_wmem_queued);
289 WARN_ON(sk->sk_forward_alloc);
295 struct sock *sk;
298 while ((sk = iucv_accept_dequeue(parent, NULL))) {
299 iucv_sock_close(sk);
300 iucv_sock_kill(sk);
306 static void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk)
309 sk_add_node(sk, &l->head);
313 static void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk)
316 sk_del_node_init(sk);
321 static void iucv_sock_kill(struct sock *sk)
323 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
326 iucv_sock_unlink(&iucv_sk_list, sk);
327 sock_set_flag(sk, SOCK_DEAD);
328 sock_put(sk);
332 static void iucv_sever_path(struct sock *sk, int with_user_data)
335 struct iucv_sock *iucv = iucv_sk(sk);
352 static int iucv_send_ctrl(struct sock *sk, u8 flags)
354 struct iucv_sock *iucv = iucv_sk(sk);
362 if (sk->sk_shutdown & SEND_SHUTDOWN) {
364 shutdown = sk->sk_shutdown;
365 sk->sk_shutdown &= RCV_SHUTDOWN;
367 skb = sock_alloc_send_skb(sk, blen, 1, &err);
370 err = afiucv_hs_send(NULL, sk, skb, flags);
373 sk->sk_shutdown = shutdown;
378 static void iucv_sock_close(struct sock *sk)
380 struct iucv_sock *iucv = iucv_sk(sk);
384 lock_sock(sk);
386 switch (sk->sk_state) {
388 iucv_sock_cleanup_listen(sk);
393 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
394 sk->sk_state = IUCV_DISCONN;
395 sk->sk_state_change(sk);
400 sk->sk_state = IUCV_CLOSING;
401 sk->sk_state_change(sk);
404 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
405 timeo = sk->sk_lingertime;
408 iucv_sock_wait(sk,
409 iucv_sock_in_state(sk, IUCV_CLOSED, 0),
415 sk->sk_state = IUCV_CLOSED;
416 sk->sk_state_change(sk);
418 sk->sk_err = ECONNRESET;
419 sk->sk_state_change(sk);
426 iucv_sever_path(sk, 1);
432 sk->sk_bound_dev_if = 0;
436 sock_set_flag(sk, SOCK_ZAPPED);
438 release_sock(sk);
441 static void iucv_sock_init(struct sock *sk, struct sock *parent)
444 sk->sk_type = parent->sk_type;
445 security_sk_clone(parent, sk);
451 struct sock *sk;
454 sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto, kern);
455 if (!sk)
457 iucv = iucv_sk(sk);
459 sock_init_data(sock, sk);
481 sk->sk_destruct = iucv_sock_destruct;
482 sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
484 sock_reset_flag(sk, SOCK_ZAPPED);
486 sk->sk_protocol = proto;
487 sk->sk_state = IUCV_OPEN;
489 iucv_sock_link(&iucv_sk_list, sk);
490 return sk;
493 static void iucv_accept_enqueue(struct sock *parent, struct sock *sk)
498 sock_hold(sk);
500 list_add_tail(&iucv_sk(sk)->accept_q, &par->accept_q);
502 iucv_sk(sk)->parent = parent;
506 static void iucv_accept_unlink(struct sock *sk)
509 struct iucv_sock *par = iucv_sk(iucv_sk(sk)->parent);
512 list_del_init(&iucv_sk(sk)->accept_q);
514 sk_acceptq_removed(iucv_sk(sk)->parent);
515 iucv_sk(sk)->parent = NULL;
516 sock_put(sk);
523 struct sock *sk;
526 sk = (struct sock *) isk;
527 lock_sock(sk);
529 if (sk->sk_state == IUCV_CLOSED) {
530 iucv_accept_unlink(sk);
531 release_sock(sk);
535 if (sk->sk_state == IUCV_CONNECTED ||
536 sk->sk_state == IUCV_DISCONN ||
538 iucv_accept_unlink(sk);
540 sock_graft(sk, newsock);
542 release_sock(sk);
543 return sk;
546 release_sock(sk);
569 struct sock *sk = sock->sk;
579 lock_sock(sk);
580 if (sk->sk_state != IUCV_OPEN) {
587 iucv = iucv_sk(sk);
612 sk->sk_bound_dev_if = dev->ifindex;
615 sk->sk_state = IUCV_BOUND;
629 sk->sk_state = IUCV_BOUND;
631 sk->sk_allocation |= GFP_DMA;
642 release_sock(sk);
647 static int iucv_sock_autobind(struct sock *sk)
649 struct iucv_sock *iucv = iucv_sk(sk);
657 sk->sk_allocation |= GFP_DMA;
672 struct sock *sk = sock->sk;
673 struct iucv_sock *iucv = iucv_sk(sk);
690 sk);
719 struct sock *sk = sock->sk;
720 struct iucv_sock *iucv = iucv_sk(sk);
726 if (sk->sk_state != IUCV_OPEN && sk->sk_state != IUCV_BOUND)
729 if (sk->sk_state == IUCV_OPEN &&
733 if (sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_SEQPACKET)
736 if (sk->sk_state == IUCV_OPEN) {
737 err = iucv_sock_autobind(sk);
742 lock_sock(sk);
749 err = iucv_send_ctrl(sock->sk, AF_IUCV_FLAG_SYN);
755 if (sk->sk_state != IUCV_CONNECTED)
756 err = iucv_sock_wait(sk, iucv_sock_in_state(sk, IUCV_CONNECTED,
758 sock_sndtimeo(sk, flags & O_NONBLOCK));
760 if (sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_CLOSED)
764 iucv_sever_path(sk, 0);
767 release_sock(sk);
774 struct sock *sk = sock->sk;
777 lock_sock(sk);
780 if (sk->sk_state != IUCV_BOUND)
786 sk->sk_max_ack_backlog = backlog;
787 sk->sk_ack_backlog = 0;
788 sk->sk_state = IUCV_LISTEN;
792 release_sock(sk);
801 struct sock *sk = sock->sk, *nsk;
805 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
807 if (sk->sk_state != IUCV_LISTEN) {
812 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
815 add_wait_queue_exclusive(sk_sleep(sk), &wait);
816 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
823 release_sock(sk);
825 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
827 if (sk->sk_state != IUCV_LISTEN) {
839 remove_wait_queue(sk_sleep(sk), &wait);
847 release_sock(sk);
855 struct sock *sk = sock->sk;
856 struct iucv_sock *iucv = iucv_sk(sk);
901 struct sock *sk = sock->sk;
902 struct iucv_sock *iucv = iucv_sk(sk);
915 err = sock_error(sk);
923 if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR))
926 lock_sock(sk);
928 if (sk->sk_shutdown & SEND_SHUTDOWN) {
934 if (sk->sk_state != IUCV_CONNECTED) {
997 skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
1011 timeo = sock_sndtimeo(sk, noblock);
1012 err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo);
1017 if (sk->sk_state != IUCV_CONNECTED) {
1028 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1099 release_sock(sk);
1105 release_sock(sk);
1141 static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1190 if (sk_filter(sk, skb)) {
1191 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
1195 if (__sock_queue_rcv_skb(sk, skb)) /* handle rcv queue full */
1196 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
1203 static void iucv_process_message_q(struct sock *sk)
1205 struct iucv_sock *iucv = iucv_sk(sk);
1213 iucv_process_message(sk, skb, p->path, &p->msg);
1224 struct sock *sk = sock->sk;
1225 struct iucv_sock *iucv = iucv_sk(sk);
1231 if ((sk->sk_state == IUCV_DISCONN) &&
1233 skb_queue_empty(&sk->sk_receive_queue) &&
1242 skb = skb_recv_datagram(sk, flags, &err);
1244 if (sk->sk_shutdown & RCV_SHUTDOWN)
1253 sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;
1258 skb_queue_head(&sk->sk_receive_queue, skb);
1263 if (sk->sk_type == SOCK_SEQPACKET) {
1278 skb_queue_head(&sk->sk_receive_queue, skb);
1286 if (sk->sk_type == SOCK_STREAM) {
1289 skb_queue_head(&sk->sk_receive_queue, skb);
1299 iucv_sock_close(sk);
1309 if (__sock_queue_rcv_skb(sk, rskb)) {
1319 iucv_process_message_q(sk);
1322 err = iucv_send_ctrl(sk, AF_IUCV_FLAG_WIN);
1324 sk->sk_state = IUCV_DISCONN;
1325 sk->sk_state_change(sk);
1334 if (sk->sk_type == SOCK_SEQPACKET && (flags & MSG_TRUNC))
1343 struct sock *sk;
1346 sk = (struct sock *) isk;
1348 if (sk->sk_state == IUCV_CONNECTED)
1358 struct sock *sk = sock->sk;
1363 if (sk->sk_state == IUCV_LISTEN)
1364 return iucv_accept_poll(sk);
1366 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1368 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
1370 if (sk->sk_shutdown & RCV_SHUTDOWN)
1373 if (sk->sk_shutdown == SHUTDOWN_MASK)
1376 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1377 (sk->sk_shutdown & RCV_SHUTDOWN))
1380 if (sk->sk_state == IUCV_CLOSED)
1383 if (sk->sk_state == IUCV_DISCONN)
1386 if (sock_writeable(sk) && iucv_below_msglim(sk))
1389 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1396 struct sock *sk = sock->sk;
1397 struct iucv_sock *iucv = iucv_sk(sk);
1406 lock_sock(sk);
1407 switch (sk->sk_state) {
1419 sk->sk_state == IUCV_CONNECTED) {
1439 iucv_send_ctrl(sk, AF_IUCV_FLAG_SHT);
1442 sk->sk_shutdown |= how;
1449 /* skb_queue_purge(&sk->sk_receive_queue); */
1451 skb_queue_purge(&sk->sk_receive_queue);
1455 sk->sk_state_change(sk);
1458 release_sock(sk);
1464 struct sock *sk = sock->sk;
1467 if (!sk)
1470 iucv_sock_close(sk);
1472 sock_orphan(sk);
1473 iucv_sock_kill(sk);
1481 struct sock *sk = sock->sk;
1482 struct iucv_sock *iucv = iucv_sk(sk);
1497 lock_sock(sk);
1506 switch (sk->sk_state) {
1523 release_sock(sk);
1531 struct sock *sk = sock->sk;
1532 struct iucv_sock *iucv = iucv_sk(sk);
1552 lock_sock(sk);
1555 release_sock(sk);
1558 if (sk->sk_state == IUCV_OPEN)
1584 struct sock *sk, *nsk;
1593 sk = NULL;
1594 sk_for_each(sk, &iucv_sk_list.head)
1595 if (sk->sk_state == IUCV_LISTEN &&
1596 !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) {
1601 iucv = iucv_sk(sk);
1609 bh_lock_sock(sk);
1615 if (sk->sk_state != IUCV_LISTEN) {
1622 if (sk_acceptq_is_full(sk)) {
1629 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1637 iucv_sock_init(nsk, sk);
1664 iucv_accept_enqueue(sk, nsk);
1668 sk->sk_data_ready(sk);
1671 bh_unlock_sock(sk);
1677 struct sock *sk = path->private;
1679 sk->sk_state = IUCV_CONNECTED;
1680 sk->sk_state_change(sk);
1685 struct sock *sk = path->private;
1686 struct iucv_sock *iucv = iucv_sk(sk);
1691 if (sk->sk_shutdown & RCV_SHUTDOWN) {
1702 len = atomic_read(&sk->sk_rmem_alloc);
1704 if (len > sk->sk_rcvbuf)
1711 iucv_process_message(sk, skb, path, msg);
1730 struct sock *sk = path->private;
1737 iucv = iucv_sk(sk);
1740 bh_lock_sock(sk);
1759 iucv_sock_wake_msglim(sk);
1762 if (sk->sk_state == IUCV_CLOSING) {
1764 sk->sk_state = IUCV_CLOSED;
1765 sk->sk_state_change(sk);
1768 bh_unlock_sock(sk);
1774 struct sock *sk = path->private;
1776 if (sk->sk_state == IUCV_CLOSED)
1779 bh_lock_sock(sk);
1780 iucv_sever_path(sk, 1);
1781 sk->sk_state = IUCV_DISCONN;
1783 sk->sk_state_change(sk);
1784 bh_unlock_sock(sk);
1792 struct sock *sk = path->private;
1794 bh_lock_sock(sk);
1795 if (sk->sk_state != IUCV_CLOSED) {
1796 sk->sk_shutdown |= SEND_SHUTDOWN;
1797 sk->sk_state_change(sk);
1799 bh_unlock_sock(sk);
1835 static int afiucv_hs_callback_syn(struct sock *sk, struct sk_buff *skb)
1842 iucv = iucv_sk(sk);
1851 nsk = iucv_sock_alloc(NULL, sk->sk_protocol, GFP_ATOMIC, 0);
1852 bh_lock_sock(sk);
1853 if ((sk->sk_state != IUCV_LISTEN) ||
1854 sk_acceptq_is_full(sk) ||
1861 bh_unlock_sock(sk);
1866 iucv_sock_init(nsk, sk);
1877 nsk->sk_bound_dev_if = sk->sk_bound_dev_if;
1886 iucv_accept_enqueue(sk, nsk);
1888 sk->sk_data_ready(sk);
1891 bh_unlock_sock(sk);
1900 static int afiucv_hs_callback_synack(struct sock *sk, struct sk_buff *skb)
1902 struct iucv_sock *iucv = iucv_sk(sk);
1904 if (!iucv || sk->sk_state != IUCV_BOUND) {
1909 bh_lock_sock(sk);
1911 sk->sk_state = IUCV_CONNECTED;
1912 sk->sk_state_change(sk);
1913 bh_unlock_sock(sk);
1921 static int afiucv_hs_callback_synfin(struct sock *sk, struct sk_buff *skb)
1923 struct iucv_sock *iucv = iucv_sk(sk);
1925 if (!iucv || sk->sk_state != IUCV_BOUND) {
1930 bh_lock_sock(sk);
1931 sk->sk_state = IUCV_DISCONN;
1932 sk->sk_state_change(sk);
1933 bh_unlock_sock(sk);
1941 static int afiucv_hs_callback_fin(struct sock *sk, struct sk_buff *skb)
1943 struct iucv_sock *iucv = iucv_sk(sk);
1951 bh_lock_sock(sk);
1952 if (sk->sk_state == IUCV_CONNECTED) {
1953 sk->sk_state = IUCV_DISCONN;
1954 sk->sk_state_change(sk);
1956 bh_unlock_sock(sk);
1964 static int afiucv_hs_callback_win(struct sock *sk, struct sk_buff *skb)
1966 struct iucv_sock *iucv = iucv_sk(sk);
1971 if (sk->sk_state != IUCV_CONNECTED)
1975 iucv_sock_wake_msglim(sk);
1982 static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
1984 struct iucv_sock *iucv = iucv_sk(sk);
1991 if (sk->sk_state != IUCV_CONNECTED) {
1996 if (sk->sk_shutdown & RCV_SHUTDOWN) {
2006 if (sk_filter(sk, skb)) {
2007 atomic_inc(&sk->sk_drops); /* skb rejected by filter */
2014 if (__sock_queue_rcv_skb(sk, skb))
2018 skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, skb);
2031 struct sock *sk;
2049 sk = NULL;
2051 sk_for_each(sk, &iucv_sk_list.head) {
2053 if ((!memcmp(&iucv_sk(sk)->src_name,
2055 (!memcmp(&iucv_sk(sk)->src_user_id,
2057 (!memcmp(&iucv_sk(sk)->dst_name, nullstring, 8)) &&
2058 (!memcmp(&iucv_sk(sk)->dst_user_id,
2060 iucv = iucv_sk(sk);
2064 if ((!memcmp(&iucv_sk(sk)->src_name,
2066 (!memcmp(&iucv_sk(sk)->src_user_id,
2068 (!memcmp(&iucv_sk(sk)->dst_name,
2070 (!memcmp(&iucv_sk(sk)->dst_user_id,
2072 iucv = iucv_sk(sk);
2079 sk = NULL;
2093 err = afiucv_hs_callback_syn(sk, skb);
2097 err = afiucv_hs_callback_synack(sk, skb);
2101 err = afiucv_hs_callback_synfin(sk, skb);
2105 err = afiucv_hs_callback_fin(sk, skb);
2108 err = afiucv_hs_callback_win(sk, skb);
2120 err = afiucv_hs_callback_rx(sk, skb);
2133 static void afiucv_hs_callback_txnotify(struct sock *sk, enum iucv_tx_notify n)
2135 struct iucv_sock *iucv = iucv_sk(sk);
2137 if (sock_flag(sk, SOCK_ZAPPED))
2143 iucv_sock_wake_msglim(sk);
2151 iucv_sock_wake_msglim(sk);
2155 if (sk->sk_state == IUCV_CONNECTED) {
2156 sk->sk_state = IUCV_DISCONN;
2157 sk->sk_state_change(sk);
2161 if (sk->sk_state == IUCV_CLOSING) {
2163 sk->sk_state = IUCV_CLOSED;
2164 sk->sk_state_change(sk);
2176 struct sock *sk;
2182 sk_for_each(sk, &iucv_sk_list.head) {
2183 iucv = iucv_sk(sk);
2185 (sk->sk_state == IUCV_CONNECTED)) {
2187 iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN);
2188 sk->sk_state = IUCV_DISCONN;
2189 sk->sk_state_change(sk);
2228 struct sock *sk;
2238 /* currently, proto ops can handle both sk types */
2245 sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL, kern);
2246 if (!sk)
2249 iucv_sock_init(sk, NULL);