Lines Matching defs:xmitq
177 struct sk_buff_head *xmitq,
807 struct sk_buff_head xmitq;
823 __skb_queue_head_init(&xmitq);
838 rc = tipc_link_timeout(le->link, &xmitq);
843 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
854 * @xmitq: queue for messages to be xmited on
859 struct sk_buff_head *xmitq)
888 tipc_link_build_state_msg(nl, xmitq);
897 tipc_bcast_add_peer(n->net, nl, xmitq);
916 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
923 * @xmitq: queue for messages to be xmited on
928 struct sk_buff_head *xmitq)
933 __tipc_node_link_up(n, bearer_id, xmitq);
935 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
958 * @xmitq: queue for messages to be xmited on tnl link later
962 struct sk_buff_head *xmitq)
976 tipc_link_failover_prepare(l, tnl, xmitq);
987 * @xmitq: queue for messages to be xmited on
991 struct sk_buff_head *xmitq,
1041 tipc_link_build_reset_msg(l, xmitq);
1055 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1070 struct sk_buff_head xmitq;
1075 __skb_queue_head_init(&xmitq);
1079 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1094 if (!skb_queue_empty(&xmitq))
1095 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1694 struct sk_buff_head xmitq;
1738 __skb_queue_head_init(&xmitq);
1741 rc = tipc_link_xmit(le->link, list, &xmitq);
1748 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1775 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1780 while ((skb = __skb_dequeue(xmitq))) {
1790 struct sk_buff_head xmitq;
1798 __skb_queue_head_init(&xmitq);
1799 __skb_queue_tail(&xmitq, skb);
1800 tipc_bcast_xmit(net, &xmitq, &dummy);
1836 int bearer_id, struct sk_buff_head *xmitq)
1841 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1859 tipc_link_build_state_msg(ucl, xmitq);
1874 struct sk_buff_head xmitq;
1882 __skb_queue_head_init(&xmitq);
1901 tipc_link_build_state_msg(le->link, &xmitq);
1905 if (!skb_queue_empty(&xmitq))
1906 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1929 * @xmitq: queue for messages to be xmited on
1933 int bearer_id, struct sk_buff_head *xmitq)
2000 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
2014 tipc_node_link_failover(n, pl, l, xmitq);
2042 __tipc_node_link_up(n, bearer_id, xmitq);
2087 struct sk_buff_head xmitq;
2119 __skb_queue_head_init(&xmitq);
2149 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2159 rc = tipc_link_rcv(le->link, skb, &xmitq);
2171 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2173 rc = tipc_link_rcv(le->link, skb, &xmitq);
2181 tipc_node_link_up(n, bearer_id, &xmitq);
2197 if (!skb_queue_empty(&xmitq))
2198 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2211 struct sk_buff_head xmitq;
2215 __skb_queue_head_init(&xmitq);
2225 &xmitq);
2234 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2425 struct sk_buff_head xmitq;
2429 __skb_queue_head_init(&xmitq);
2473 tipc_link_set_tolerance(link, tol, &xmitq);
2479 tipc_link_set_prio(link, prio, &xmitq);
2493 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,