Lines Matching defs:iucv

34 #include <net/iucv/af_iucv.h>
127 * iucv_msg_length() - Returns the length of an iucv message.
130 * The function returns the length of the specified iucv message @msg of data
144 * Use this function to allocate socket buffers to store iucv message data.
160 * @state: first iucv sk state
161 * @state: second iucv sk state
175 * Always returns true if the socket is not connected (no iucv path for
180 struct iucv_sock *iucv = iucv_sk(sk);
184 if (iucv->transport == AF_IUCV_TRANS_IUCV)
185 return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim);
187 return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) &&
188 (atomic_read(&iucv->pendings) <= 0));
212 struct iucv_sock *iucv = iucv_sk(sock);
225 phs_hdr->window = iucv->msglimit;
227 confirm_recv = atomic_read(&iucv->msg_recv);
232 memcpy(phs_hdr->destUserID, iucv->dst_user_id, 8);
233 memcpy(phs_hdr->destAppName, iucv->dst_name, 8);
234 memcpy(phs_hdr->srcUserID, iucv->src_user_id, 8);
235 memcpy(phs_hdr->srcAppName, iucv->src_name, 8);
243 skb->dev = iucv->hs_dev;
271 skb_queue_tail(&iucv->send_skb_q, nskb);
274 skb_unlink(nskb, &iucv->send_skb_q);
277 atomic_sub(confirm_recv, &iucv->msg_recv);
278 WARN_ON(atomic_read(&iucv->msg_recv) < 0);
306 pr_err("Attempt to release alive iucv socket %p\n", sk);
359 struct iucv_sock *iucv = iucv_sk(sk);
360 struct iucv_path *path = iucv->path;
363 if (xchg(&iucv->path, NULL)) {
365 low_nmcpy(user_data, iucv->src_name);
366 high_nmcpy(user_data, iucv->dst_name);
378 struct iucv_sock *iucv = iucv_sk(sk);
385 LL_RESERVED_SPACE(iucv->hs_dev);
404 struct iucv_sock *iucv = iucv_sk(sk);
416 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
427 if (!err && !skb_queue_empty(&iucv->send_skb_q)) {
445 skb_queue_purge(&iucv->send_skb_q);
446 skb_queue_purge(&iucv->backlog_skb_q);
453 if (iucv->hs_dev) {
454 dev_put(iucv->hs_dev);
455 iucv->hs_dev = NULL;
476 struct iucv_sock *iucv;
481 iucv = iucv_sk(sk);
484 INIT_LIST_HEAD(&iucv->accept_q);
485 spin_lock_init(&iucv->accept_q_lock);
486 skb_queue_head_init(&iucv->send_skb_q);
487 INIT_LIST_HEAD(&iucv->message_q.list);
488 spin_lock_init(&iucv->message_q.lock);
489 skb_queue_head_init(&iucv->backlog_skb_q);
490 iucv->send_tag = 0;
491 atomic_set(&iucv->pendings, 0);
492 iucv->flags = 0;
493 iucv->msglimit = 0;
494 atomic_set(&iucv->msg_sent, 0);
495 atomic_set(&iucv->msg_recv, 0);
496 iucv->path = NULL;
497 iucv->sk_txnotify = afiucv_hs_callback_txnotify;
498 memset(&iucv->src_user_id , 0, 32);
500 iucv->transport = AF_IUCV_TRANS_IUCV;
502 iucv->transport = AF_IUCV_TRANS_HIPER;
574 static void __iucv_auto_name(struct iucv_sock *iucv)
583 memcpy(iucv->src_name, name, 8);
593 struct iucv_sock *iucv;
610 iucv = iucv_sk(sk);
615 if (iucv->path)
629 memcpy(iucv->src_user_id, sa->siucv_user_id, 8);
632 __iucv_auto_name(iucv);
634 memcpy(iucv->src_name, sa->siucv_name, 8);
636 iucv->hs_dev = dev;
639 iucv->transport = AF_IUCV_TRANS_HIPER;
640 if (!iucv->msglimit)
641 iucv->msglimit = IUCV_HIPER_MSGLIM_DEFAULT;
650 memcpy(iucv->src_name, sa->siucv_name, 8);
651 memcpy(iucv->src_user_id, iucv_userid, 8);
653 iucv->transport = AF_IUCV_TRANS_IUCV;
655 if (!iucv->msglimit)
656 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
672 struct iucv_sock *iucv = iucv_sk(sk);
678 memcpy(iucv->src_user_id, iucv_userid, 8);
679 iucv->transport = AF_IUCV_TRANS_IUCV;
683 __iucv_auto_name(iucv);
686 if (!iucv->msglimit)
687 iucv->msglimit = IUCV_QUEUELEN_DEFAULT;
696 struct iucv_sock *iucv = iucv_sk(sk);
701 low_nmcpy(user_data, iucv->src_name);
705 iucv->path = iucv_path_alloc(iucv->msglimit,
707 if (!iucv->path) {
711 err = pr_iucv->path_connect(iucv->path, &af_iucv_handler,
715 iucv_path_free(iucv->path);
716 iucv->path = NULL;
743 struct iucv_sock *iucv = iucv_sk(sk);
753 iucv->transport == AF_IUCV_TRANS_HIPER)
768 memcpy(iucv->dst_user_id, sa->siucv_user_id, 8);
769 memcpy(iucv->dst_name, sa->siucv_name, 8);
771 if (iucv->transport == AF_IUCV_TRANS_HIPER)
786 if (err && iucv->transport == AF_IUCV_TRANS_IUCV)
879 struct iucv_sock *iucv = iucv_sk(sk);
884 memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8);
885 memcpy(siucv->siucv_name, iucv->dst_name, 8);
887 memcpy(siucv->siucv_user_id, iucv->src_user_id, 8);
888 memcpy(siucv->siucv_name, iucv->src_name, 8);
898 * iucv_send_iprm() - Send socket data in parameter list of an iucv message.
903 * Send the socket data in the parameter list in the iucv message
925 struct iucv_sock *iucv = iucv_sk(sk);
988 /* set iucv message target class */
1000 /* allocate one skb for each iucv message:
1004 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1006 LL_RESERVED_SPACE(iucv->hs_dev);
1012 /* In nonlinear "classic" iucv skb,
1033 /* wait if outstanding messages for iucv path has reached */
1045 /* increment and save iucv message tag for msg_completion cbk */
1046 txmsg.tag = iucv->send_tag++;
1049 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1050 atomic_inc(&iucv->msg_sent);
1053 atomic_dec(&iucv->msg_sent);
1057 skb_queue_tail(&iucv->send_skb_q, skb);
1059 if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) &&
1061 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1066 skb_unlink(skb, &iucv->send_skb_q);
1073 pr_iucv->path_sever(iucv->path, NULL);
1074 skb_unlink(skb, &iucv->send_skb_q);
1092 err = pr_iucv->message_send(iucv->path, &txmsg,
1096 err = pr_iucv->message_send(iucv->path, &txmsg,
1102 memcpy(user_id, iucv->dst_user_id, 8);
1104 memcpy(appl_id, iucv->dst_name, 8);
1112 skb_unlink(skb, &iucv->send_skb_q);
1143 "alloc of recv iucv skb len=%lu failed with errcode=%d\n",
1223 struct iucv_sock *iucv = iucv_sk(sk);
1227 list_for_each_entry_safe(p, n, &iucv->message_q.list, list) {
1234 if (!skb_queue_empty(&iucv->backlog_skb_q))
1244 struct iucv_sock *iucv = iucv_sk(sk);
1251 skb_queue_empty(&iucv->backlog_skb_q) &&
1253 list_empty(&iucv->message_q.list))
1285 /* each iucv message contains a complete record */
1289 /* create control message to store iucv msg target class:
1291 * fragmentation of original iucv message. */
1314 if (iucv->transport == AF_IUCV_TRANS_HIPER) {
1315 atomic_inc(&iucv->msg_recv);
1316 if (atomic_read(&iucv->msg_recv) > iucv->msglimit) {
1324 spin_lock_bh(&iucv->message_q.lock);
1325 rskb = skb_dequeue(&iucv->backlog_skb_q);
1330 skb_queue_head(&iucv->backlog_skb_q,
1334 rskb = skb_dequeue(&iucv->backlog_skb_q);
1336 if (skb_queue_empty(&iucv->backlog_skb_q)) {
1337 if (!list_empty(&iucv->message_q.list))
1339 if (atomic_read(&iucv->msg_recv) >=
1340 iucv->msglimit / 2) {
1348 spin_unlock_bh(&iucv->message_q.lock);
1416 struct iucv_sock *iucv = iucv_sk(sk);
1439 if (iucv->transport == AF_IUCV_TRANS_IUCV) {
1442 err = pr_iucv->message_send(iucv->path, &txmsg,
1463 if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
1464 iucv->path) {
1465 err = pr_iucv->path_quiesce(iucv->path, NULL);
1501 struct iucv_sock *iucv = iucv_sk(sk);
1520 iucv->flags |= IUCV_IPRMDATA;
1522 iucv->flags &= ~IUCV_IPRMDATA;
1531 iucv->msglimit = val;
1551 struct iucv_sock *iucv = iucv_sk(sk);
1568 val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0;
1572 val = (iucv->path != NULL) ? iucv->path->msglim /* connected */
1573 : iucv->msglimit; /* default */
1579 val = (iucv->hs_dev) ? iucv->hs_dev->mtu -
1596 /* Callback wrappers - called from iucv base support */
1604 struct iucv_sock *iucv, *niucv;
1611 iucv = NULL;
1620 iucv = iucv_sk(sk);
1624 if (!iucv)
1631 low_nmcpy(user_data, iucv->src_name);
1632 high_nmcpy(user_data, iucv->dst_name);
1664 memcpy(niucv->src_name, iucv->src_name, 8);
1665 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1674 niucv->msglimit = iucv->msglimit;
1675 path->msglim = iucv->msglimit;
1705 struct iucv_sock *iucv = iucv_sk(sk);
1715 spin_lock(&iucv->message_q.lock);
1717 if (!list_empty(&iucv->message_q.list) ||
1718 !skb_queue_empty(&iucv->backlog_skb_q))
1740 list_add_tail(&save_msg->list, &iucv->message_q.list);
1743 spin_unlock(&iucv->message_q.lock);
1842 struct iucv_sock *iucv, *niucv;
1845 iucv = iucv_sk(sk);
1846 if (!iucv) {
1871 niucv->msglimit = iucv->msglimit;
1878 memcpy(niucv->src_name, iucv->src_name, 8);
1879 memcpy(niucv->src_user_id, iucv->src_user_id, 8);
1881 niucv->hs_dev = iucv->hs_dev;
1905 struct iucv_sock *iucv = iucv_sk(sk);
1907 if (!iucv)
1912 iucv->msglimit_peer = iucv_trans_hdr(skb)->window;
1926 struct iucv_sock *iucv = iucv_sk(sk);
1928 if (!iucv)
1946 struct iucv_sock *iucv = iucv_sk(sk);
1949 if (!iucv)
1967 struct iucv_sock *iucv = iucv_sk(sk);
1969 if (!iucv)
1975 atomic_sub(iucv_trans_hdr(skb)->window, &iucv->msg_sent);
1985 struct iucv_sock *iucv = iucv_sk(sk);
1987 if (!iucv) {
2013 spin_lock(&iucv->message_q.lock);
2014 if (skb_queue_empty(&iucv->backlog_skb_q)) {
2017 skb_queue_tail(&iucv->backlog_skb_q, skb);
2020 spin_unlock(&iucv->message_q.lock);
2033 struct iucv_sock *iucv;
2049 iucv = NULL;
2061 iucv = iucv_sk(sk);
2073 iucv = iucv_sk(sk);
2079 if (!iucv)
2139 struct iucv_sock *iucv = NULL;
2148 iucv = iucv_sk(sk);
2153 if (!iucv || sock_flag(sk, SOCK_ZAPPED))
2156 list = &iucv->send_skb_q;
2167 atomic_inc(&iucv->pendings);
2171 atomic_dec(&iucv->pendings);
2172 if (atomic_read(&iucv->pendings) <= 0)
2211 struct iucv_sock *iucv;
2217 iucv = iucv_sk(sk);
2218 if ((iucv->hs_dev == event_dev) &&