Lines Matching defs:xmitq
168 struct sk_buff_head *xmitq,
791 struct sk_buff_head xmitq;
807 __skb_queue_head_init(&xmitq);
822 rc = tipc_link_timeout(le->link, &xmitq);
827 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
840 struct sk_buff_head *xmitq)
869 tipc_link_build_state_msg(nl, xmitq);
878 tipc_bcast_add_peer(n->net, nl, xmitq);
897 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
906 struct sk_buff_head *xmitq)
911 __tipc_node_link_up(n, bearer_id, xmitq);
913 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
935 * @xmitq: queue for messages to be xmited on tnl link later
939 struct sk_buff_head *xmitq)
953 tipc_link_failover_prepare(l, tnl, xmitq);
964 struct sk_buff_head *xmitq,
1014 tipc_link_build_reset_msg(l, xmitq);
1028 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
1043 struct sk_buff_head xmitq;
1048 __skb_queue_head_init(&xmitq);
1052 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1067 if (!skb_queue_empty(&xmitq))
1068 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1665 struct sk_buff_head xmitq;
1709 __skb_queue_head_init(&xmitq);
1712 rc = tipc_link_xmit(le->link, list, &xmitq);
1719 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1746 int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1751 while ((skb = __skb_dequeue(xmitq))) {
1761 struct sk_buff_head xmitq;
1769 __skb_queue_head_init(&xmitq);
1770 __skb_queue_tail(&xmitq, skb);
1771 tipc_bcast_xmit(net, &xmitq, &dummy);
1807 int bearer_id, struct sk_buff_head *xmitq)
1812 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1830 tipc_link_build_state_msg(ucl, xmitq);
1845 struct sk_buff_head xmitq;
1853 __skb_queue_head_init(&xmitq);
1872 tipc_link_build_state_msg(le->link, &xmitq);
1876 if (!skb_queue_empty(&xmitq))
1877 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1902 int bearer_id, struct sk_buff_head *xmitq)
1969 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1983 tipc_node_link_failover(n, pl, l, xmitq);
2011 __tipc_node_link_up(n, bearer_id, xmitq);
2056 struct sk_buff_head xmitq;
2088 __skb_queue_head_init(&xmitq);
2118 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2128 rc = tipc_link_rcv(le->link, skb, &xmitq);
2140 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2142 rc = tipc_link_rcv(le->link, skb, &xmitq);
2150 tipc_node_link_up(n, bearer_id, &xmitq);
2166 if (!skb_queue_empty(&xmitq))
2167 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2180 struct sk_buff_head xmitq;
2184 __skb_queue_head_init(&xmitq);
2194 &xmitq);
2203 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2379 struct sk_buff_head xmitq;
2383 __skb_queue_head_init(&xmitq);
2427 tipc_link_set_tolerance(link, tol, &xmitq);
2433 tipc_link_set_prio(link, prio, &xmitq);
2447 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,