Lines Matching defs:iucv

35 #include <net/iucv/af_iucv.h>
112 * iucv_msg_length() - Returns the length of an iucv message.
115 * The function returns the length of the specified iucv message @msg of data
129 * Use this function to allocate socket buffers to store iucv message data.
145 * @state: first iucv sk state
146 * @state2: second iucv sk state
160 * Always returns true if the socket is not connected (no iucv path for
165 struct iucv_sock *iucv = iucv_sk(sk);
169 if (iucv->transport == AF_IUCV_TRANS_IUCV)
170 return (atomic_read(&iucv->skbs_in_xmit) < iucv->path->msglim);
172 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
173 (atomic_read(&iucv->pendings) <= 0));
197 struct iucv_sock *iucv = iucv_sk(sock);
209 phs_hdr->window = iucv->msglimit;
211 confirm_recv = atomic_read(&iucv->msg_recv);
216 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
217 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
218 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
219 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
227 skb->dev = iucv->hs_dev;
250 atomic_inc(&iucv->skbs_in_xmit);
253 atomic_dec(&iucv->skbs_in_xmit);
255 atomic_sub(confirm_recv, &iucv->msg_recv);
256 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
282 pr_err("Attempt to release alive iucv socket %p\n", sk);
335 struct iucv_sock *iucv = iucv_sk(sk);
336 struct iucv_path *path = iucv->path;
338 if (iucv->path) {
339 iucv->path = NULL;
341 low_nmcpy(user_data, iucv->src_name);
342 high_nmcpy(user_data, iucv->dst_name);
354 struct iucv_sock *iucv = iucv_sk(sk);
361 LL_RESERVED_SPACE(iucv->hs_dev);
380 struct iucv_sock *iucv = iucv_sk(sk);
392 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
403 if (!err && atomic_read(&iucv->skbs_in_xmit) > 0) {
421 skb_queue_purge(&iucv->send_skb_q);
422 skb_queue_purge(&iucv->backlog_skb_q);
429 if (iucv->hs_dev) {
430 dev_put(iucv->hs_dev);
431 iucv->hs_dev = NULL;
452 struct iucv_sock *iucv;
457 iucv = iucv_sk(sk);
460 INIT_LIST_HEAD(&iucv->accept_q);
461 spin_lock_init(&iucv->accept_q_lock);
462 skb_queue_head_init(&iucv->send_skb_q);
463 INIT_LIST_HEAD(&iucv->message_q.list);
464 spin_lock_init(&iucv->message_q.lock);
465 skb_queue_head_init(&iucv->backlog_skb_q);
466 iucv->send_tag = 0;
467 atomic_set(&iucv->pendings, 0);
468 iucv->flags = 0;
469 iucv->msglimit = 0;
470 atomic_set(&iucv->skbs_in_xmit, 0);
471 atomic_set(&iucv->msg_sent, 0);
472 atomic_set(&iucv->msg_recv, 0);
473 iucv->path = NULL;
474 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
475 memset(&iucv->init, 0, sizeof(iucv->init));
477 iucv->transport = AF_IUCV_TRANS_IUCV;
479 iucv->transport = AF_IUCV_TRANS_HIPER;
551 static void __iucv_auto_name(struct iucv_sock *iucv)
560 memcpy(iucv->src_name, name, 8);
570 struct iucv_sock *iucv;
587 iucv = iucv_sk(sk);
592 if (iucv->path)
606 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
609 __iucv_auto_name(iucv);
611 memcpy(iucv->src_name, sa->siucv_name, 8);
613 iucv->hs_dev = dev;
616 iucv->transport = AF_IUCV_TRANS_HIPER;
617 if (!iucv->msglimit)
618 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
627 memcpy(iucv->src_name, sa->siucv_name, 8);
628 memcpy(iucv->src_user_id, iucv_userid, 8);
630 iucv->transport = AF_IUCV_TRANS_IUCV;
632 if (!iucv->msglimit)
633 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
649 struct iucv_sock *iucv = iucv_sk(sk);
655 memcpy(iucv->src_user_id, iucv_userid, 8);
656 iucv->transport = AF_IUCV_TRANS_IUCV;
660 __iucv_auto_name(iucv);
663 if (!iucv->msglimit)
664 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
673 struct iucv_sock *iucv = iucv_sk(sk);
678 low_nmcpy(user_data, iucv->src_name);
682 iucv->path = iucv_path_alloc(iucv->msglimit,
684 if (!iucv->path) {
688 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
692 iucv_path_free(iucv->path);
693 iucv->path = NULL;
720 struct iucv_sock *iucv = iucv_sk(sk);
730 iucv->transport == AF_IUCV_TRANS_HIPER)
745 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
746 memcpy(iucv->dst_name, sa->siucv_name, 8);
748 if (iucv->transport == AF_IUCV_TRANS_HIPER)
763 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
856 struct iucv_sock *iucv = iucv_sk(sk);
861 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
862 memcpy(siucv->siucv_name, iucv->dst_name, 8);
864 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
865 memcpy(siucv->siucv_name, iucv->src_name, 8);
875 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
880 * Send the socket data in the parameter list in the iucv message
902 struct iucv_sock *iucv = iucv_sk(sk);
965 /* set iucv message target class */
977 /* allocate one skb for each iucv message:
981 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
983 LL_RESERVED_SPACE(iucv->hs_dev);
989 /* In nonlinear "classic" iucv skb,
1010 /* wait if outstanding messages for iucv path has reached */
1022 /* increment and save iucv message tag for msg_completion cbk */
1023 txmsg.tag = iucv->send_tag++;
1026 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1027 atomic_inc(&iucv->msg_sent);
1030 atomic_dec(&iucv->msg_sent);
1034 skb_queue_tail(&iucv->send_skb_q, skb);
1035 atomic_inc(&iucv->skbs_in_xmit);
1037 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1039 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1044 atomic_dec(&iucv->skbs_in_xmit);
1045 skb_unlink(skb, &iucv->send_skb_q);
1052 pr_iucv->path_sever(iucv->path, NULL);
1053 atomic_dec(&iucv->skbs_in_xmit);
1054 skb_unlink(skb, &iucv->send_skb_q);
1072 err = pr_iucv->message_send(iucv->path, &txmsg,
1076 err = pr_iucv->message_send(iucv->path, &txmsg,
1082 memcpy(user_id, iucv->dst_user_id, 8);
1084 memcpy(appl_id, iucv->dst_name, 8);
1093 atomic_dec(&iucv->skbs_in_xmit);
1094 skb_unlink(skb, &iucv->send_skb_q);
1125 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1205 struct iucv_sock *iucv = iucv_sk(sk);
1209 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1216 if (!skb_queue_empty(&iucv->backlog_skb_q))
1225 struct iucv_sock *iucv = iucv_sk(sk);
1232 skb_queue_empty(&iucv->backlog_skb_q) &&
1234 list_empty(&iucv->message_q.list))
1266 /* each iucv message contains a complete record */
1270 /* create control message to store iucv msg target class:
1272 * fragmentation of original iucv message. */
1295 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1296 atomic_inc(&iucv->msg_recv);
1297 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1305 spin_lock_bh(&iucv->message_q.lock);
1306 rskb = skb_dequeue(&iucv->backlog_skb_q);
1311 skb_queue_head(&iucv->backlog_skb_q,
1315 rskb = skb_dequeue(&iucv->backlog_skb_q);
1317 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1318 if (!list_empty(&iucv->message_q.list))
1320 if (atomic_read(&iucv->msg_recv) >=
1321 iucv->msglimit / 2) {
1329 spin_unlock_bh(&iucv->message_q.lock);
1397 struct iucv_sock *iucv = iucv_sk(sk);
1420 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1423 err = pr_iucv->message_send(iucv->path, &txmsg,
1444 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1445 iucv->path) {
1446 err = pr_iucv->path_quiesce(iucv->path, NULL);
1482 struct iucv_sock *iucv = iucv_sk(sk);
1501 iucv->flags |= IUCV_IPRMDATA;
1503 iucv->flags &= ~IUCV_IPRMDATA;
1512 iucv->msglimit = val;
1532 struct iucv_sock *iucv = iucv_sk(sk);
1549 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1553 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1554 : iucv->msglimit; /* default */
1560 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1577 /* Callback wrappers - called from iucv base support */
1585 struct iucv_sock *iucv, *niucv;
1592 iucv = NULL;
1601 iucv = iucv_sk(sk);
1605 if (!iucv)
1612 low_nmcpy(user_data, iucv->src_name);
1613 high_nmcpy(user_data, iucv->dst_name);
1645 memcpy(niucv->src_name, iucv->src_name, 8);
1646 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1655 niucv->msglimit = iucv->msglimit;
1656 path->msglim = iucv->msglimit;
1686 struct iucv_sock *iucv = iucv_sk(sk);
1696 spin_lock(&iucv->message_q.lock);
1698 if (!list_empty(&iucv->message_q.list) ||
1699 !skb_queue_empty(&iucv->backlog_skb_q))
1721 list_add_tail(&save_msg->list, &iucv->message_q.list);
1724 spin_unlock(&iucv->message_q.lock);
1734 struct iucv_sock *iucv;
1737 iucv = iucv_sk(sk);
1738 list = &iucv->send_skb_q;
1750 atomic_dec(&iucv->skbs_in_xmit);
1763 if (atomic_read(&iucv->skbs_in_xmit) == 0) {
1839 struct iucv_sock *iucv, *niucv;
1842 iucv = iucv_sk(sk);
1843 if (!iucv) {
1868 niucv->msglimit = iucv->msglimit;
1875 memcpy(niucv->src_name, iucv->src_name, 8);
1876 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1878 niucv->hs_dev = iucv->hs_dev;
1902 struct iucv_sock *iucv = iucv_sk(sk);
1904 if (!iucv || sk->sk_state != IUCV_BOUND) {
1910 iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
1923 struct iucv_sock *iucv = iucv_sk(sk);
1925 if (!iucv || sk->sk_state != IUCV_BOUND) {
1943 struct iucv_sock *iucv = iucv_sk(sk);
1946 if (!iucv) {
1966 struct iucv_sock *iucv = iucv_sk(sk);
1968 if (!iucv)
1974 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
1984 struct iucv_sock *iucv = iucv_sk(sk);
1986 if (!iucv) {
2012 spin_lock(&iucv->message_q.lock);
2013 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2016 skb_queue_tail(&iucv->backlog_skb_q, skb);
2019 spin_unlock(&iucv->message_q.lock);
2032 struct iucv_sock *iucv;
2048 iucv = NULL;
2060 iucv = iucv_sk(sk);
2072 iucv = iucv_sk(sk);
2078 if (!iucv)
2135 struct iucv_sock *iucv = iucv_sk(sk);
2142 atomic_dec(&iucv->skbs_in_xmit);
2146 atomic_inc(&iucv->pendings);
2149 atomic_dec(&iucv->skbs_in_xmit);
2150 if (atomic_dec_return(&iucv->pendings) <= 0)
2154 atomic_dec(&iucv->skbs_in_xmit);
2162 if (atomic_read(&iucv->skbs_in_xmit) == 0) {
2177 struct iucv_sock *iucv;
2183 iucv = iucv_sk(sk);
2184 if ((iucv->hs_dev == event_dev) &&