Lines Matching defs:conn

186 	struct iucv_connection *conn;
197 struct iucv_connection *conn;
264 static char *netiucv_printuser(struct iucv_connection *conn)
270 if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
273 memcpy(tmp_uid, netiucv_printname(conn->userid, 8), 8);
274 memcpy(tmp_udat, conn->userdata, 16);
280 return netiucv_printname(conn->userid, 8);
499 struct iucv_connection *conn = path->private;
502 ev.conn = conn;
504 fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
510 struct iucv_connection *conn = path->private;
513 ev.conn = conn;
515 fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
520 struct iucv_connection *conn = path->private;
522 fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
528 struct iucv_connection *conn = path->private;
539 list_for_each_entry(conn, &iucv_connection_list, list) {
540 if (strncmp(ipvmid, conn->userid, 8) ||
541 strncmp(ipuser, conn->userdata, 16))
544 conn->path = path;
545 ev.conn = conn;
547 fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
558 struct iucv_connection *conn = path->private;
560 fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
565 struct iucv_connection *conn = path->private;
567 fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
572 struct iucv_connection *conn = path->private;
574 fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
590 * @conn: The connection where this skb has been received.
596 static void netiucv_unpack_skb(struct iucv_connection *conn,
599 struct net_device *dev = conn->netdev;
654 struct iucv_connection *conn = ev->conn;
656 struct netiucv_priv *privptr = netdev_priv(conn->netdev);
661 if (!conn->netdev) {
662 iucv_message_reject(conn->path, msg);
667 if (msg->length > conn->max_buffsize) {
668 iucv_message_reject(conn->path, msg);
671 msg->length, conn->max_buffsize);
674 conn->rx_buff->data = conn->rx_buff->head;
675 skb_reset_tail_pointer(conn->rx_buff);
676 conn->rx_buff->len = 0;
677 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
684 netiucv_unpack_skb(conn, conn->rx_buff);
690 struct iucv_connection *conn = ev->conn;
705 if (!conn || !conn->netdev) {
710 privptr = netdev_priv(conn->netdev);
711 conn->prof.tx_pending--;
713 if ((skb = skb_dequeue(&conn->commit_queue))) {
724 conn->tx_buff->data = conn->tx_buff->head;
725 skb_reset_tail_pointer(conn->tx_buff);
726 conn->tx_buff->len = 0;
727 spin_lock_irqsave(&conn->collect_lock, saveflags);
728 while ((skb = skb_dequeue(&conn->collect_queue))) {
729 header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
730 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
732 skb_put(conn->tx_buff, skb->len),
740 if (conn->collect_len > conn->prof.maxmulti)
741 conn->prof.maxmulti = conn->collect_len;
742 conn->collect_len = 0;
743 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
744 if (conn->tx_buff->len == 0) {
750 skb_put_data(conn->tx_buff, &header, NETIUCV_HDRLEN);
751 conn->prof.send_stamp = jiffies;
754 rc = iucv_message_send(conn->path, &txmsg, 0, 0,
755 conn->tx_buff->data, conn->tx_buff->len);
756 conn->prof.doios_multi++;
757 conn->prof.txlen += conn->tx_buff->len;
758 conn->prof.tx_pending++;
759 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
760 conn->prof.tx_max_pending = conn->prof.tx_pending;
762 conn->prof.tx_pending--;
772 if (stat_maxcq > conn->prof.maxcqueue)
773 conn->prof.maxcqueue = stat_maxcq;
780 struct iucv_connection *conn = ev->conn;
782 struct net_device *netdev = conn->netdev;
788 conn->path = path;
791 rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
797 netdev->tx_queue_len = conn->path->msglim;
812 struct iucv_connection *conn = arg;
813 struct net_device *netdev = conn->netdev;
817 fsm_deltimer(&conn->timer);
819 netdev->tx_queue_len = conn->path->msglim;
825 struct iucv_connection *conn = arg;
828 fsm_deltimer(&conn->timer);
829 iucv_path_sever(conn->path, conn->userdata);
835 struct iucv_connection *conn = arg;
836 struct net_device *netdev = conn->netdev;
841 fsm_deltimer(&conn->timer);
842 iucv_path_sever(conn->path, conn->userdata);
844 "connection\n", netiucv_printuser(conn));
853 struct iucv_connection *conn = arg;
854 struct net_device *netdev = conn->netdev;
869 conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
871 netdev->name, netiucv_printuser(conn));
873 rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
874 NULL, conn->userdata, conn);
877 netdev->tx_queue_len = conn->path->msglim;
878 fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
879 CONN_EVENT_TIMER, conn);
884 netiucv_printname(conn->userid, 8));
890 " guest %s\n", netiucv_printname(conn->userid, 8));
903 netiucv_printname(conn->userid, 8));
920 kfree(conn->path);
921 conn->path = NULL;
937 struct iucv_connection *conn = ev->conn;
938 struct net_device *netdev = conn->netdev;
943 fsm_deltimer(&conn->timer);
945 netiucv_purge_skb_queue(&conn->collect_queue);
946 if (conn->path) {
948 iucv_path_sever(conn->path, conn->userdata);
949 kfree(conn->path);
950 conn->path = NULL;
952 netiucv_purge_skb_queue(&conn->commit_queue);
958 struct iucv_connection *conn = arg;
959 struct net_device *netdev = conn->netdev;
962 netdev->name, conn->userid);
1020 fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1039 ev.conn = privptr->conn;
1042 fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1067 netiucv_printuser(privptr->conn));
1122 * @param conn Connection to be used for sending.
1129 static int netiucv_transmit_skb(struct iucv_connection *conn,
1137 if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1140 spin_lock_irqsave(&conn->collect_lock, saveflags);
1141 if (conn->collect_len + l >
1142 (conn->max_buffsize - NETIUCV_HDRLEN)) {
1148 skb_queue_tail(&conn->collect_queue, skb);
1149 conn->collect_len += l;
1152 spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1183 fsm_newstate(conn->fsm, CONN_STATE_TX);
1184 conn->prof.send_stamp = jiffies;
1188 rc = iucv_message_send(conn->path, &msg, 0, 0,
1190 conn->prof.doios_single++;
1191 conn->prof.txlen += skb->len;
1192 conn->prof.tx_pending++;
1193 if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1194 conn->prof.tx_max_pending = conn->prof.tx_pending;
1197 fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1198 conn->prof.tx_pending--;
1199 privptr = netdev_priv(conn->netdev);
1217 skb_queue_tail(&conn->commit_queue, nskb);
1303 rc = netiucv_transmit_skb(privptr->conn, skb);
1334 return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1389 struct net_device *ndev = priv->conn->netdev;
1400 if (memcmp(username, priv->conn->userid, 9) &&
1417 memcpy(priv->conn->userid, username, 9);
1418 memcpy(priv->conn->userdata, userdata, 17);
1430 return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1437 struct net_device *ndev = priv->conn->netdev;
1472 priv->conn->max_buffsize = bs1;
1499 return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1510 return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1520 priv->conn->prof.maxmulti = 0;
1532 return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1541 priv->conn->prof.maxcqueue = 0;
1553 return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1562 priv->conn->prof.doios_single = 0;
1574 return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1583 priv->conn->prof.doios_multi = 0;
1595 return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1604 priv->conn->prof.txlen = 0;
1616 return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1625 priv->conn->prof.tx_time = 0;
1637 return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1646 priv->conn->prof.tx_pending = 0;
1658 return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1667 priv->conn->prof.tx_max_pending = 0;
1757 struct iucv_connection *conn;
1759 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1760 if (!conn)
1762 skb_queue_head_init(&conn->collect_queue);
1763 skb_queue_head_init(&conn->commit_queue);
1764 spin_lock_init(&conn->collect_lock);
1765 conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1766 conn->netdev = dev;
1768 conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1769 if (!conn->rx_buff)
1771 conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1772 if (!conn->tx_buff)
1774 conn->fsm = init_fsm("netiucvconn", conn_state_names,
1778 if (!conn->fsm)
1781 fsm_settimer(conn->fsm, &conn->timer);
1782 fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1785 memcpy(conn->userdata, userdata, 17);
1787 memcpy(conn->userid, username, 9);
1788 fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1792 list_add_tail(&conn->list, &iucv_connection_list);
1794 return conn;
1797 kfree_skb(conn->tx_buff);
1799 kfree_skb(conn->rx_buff);
1801 kfree(conn);
1810 static void netiucv_remove_connection(struct iucv_connection *conn)
1815 list_del_init(&conn->list);
1817 fsm_deltimer(&conn->timer);
1818 netiucv_purge_skb_queue(&conn->collect_queue);
1819 if (conn->path) {
1820 iucv_path_sever(conn->path, conn->userdata);
1821 kfree(conn->path);
1822 conn->path = NULL;
1824 netiucv_purge_skb_queue(&conn->commit_queue);
1825 kfree_fsm(conn->fsm);
1826 kfree_skb(conn->rx_buff);
1827 kfree_skb(conn->tx_buff);
1843 if (privptr->conn)
1844 netiucv_remove_connection(privptr->conn);
1847 privptr->conn = NULL; privptr->fsm = NULL;
1900 privptr->conn = netiucv_new_connection(dev, username, userdata);
1901 if (!privptr->conn) {
1968 netiucv_printuser(priv->conn));
2015 priv->conn->userid);