Lines Matching refs:tsk

140 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
142 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
144 static int tipc_sk_leave(struct tipc_sock *tsk);
146 static int tipc_sk_insert(struct tipc_sock *tsk);
147 static void tipc_sk_remove(struct tipc_sock *tsk);
150 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
159 static u32 tsk_own_node(struct tipc_sock *tsk)
161 return msg_prevnode(&tsk->phdr);
164 static u32 tsk_peer_node(struct tipc_sock *tsk)
166 return msg_destnode(&tsk->phdr);
169 static u32 tsk_peer_port(struct tipc_sock *tsk)
171 return msg_destport(&tsk->phdr);
174 static bool tsk_unreliable(struct tipc_sock *tsk)
176 return msg_src_droppable(&tsk->phdr) != 0;
179 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
181 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
184 static bool tsk_unreturnable(struct tipc_sock *tsk)
186 return msg_dest_droppable(&tsk->phdr) != 0;
189 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
191 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
194 static int tsk_importance(struct tipc_sock *tsk)
196 return msg_importance(&tsk->phdr);
212 static bool tsk_conn_cong(struct tipc_sock *tsk)
214 return tsk->snt_unacked > tsk->snd_win;
235 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
237 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
244 static void tsk_set_nagle(struct tipc_sock *tsk)
246 struct sock *sk = &tsk->sk;
248 tsk->maxnagle = 0;
251 if (tsk->nodelay)
253 if (!(tsk->peer_caps & TIPC_NAGLE))
256 if (tsk->max_pkt == MAX_MSG_SIZE)
257 tsk->maxnagle = 1500;
259 tsk->maxnagle = tsk->max_pkt;
323 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
325 struct sock *sk = &tsk->sk;
327 u32 peer_port = tsk_peer_port(tsk);
337 peer_node = tsk_peer_node(tsk);
452 struct tipc_sock *tsk;
479 tsk = tipc_sk(sk);
480 tsk->max_pkt = MAX_PKT_DEFAULT;
481 tsk->maxnagle = 0;
482 tsk->nagle_start = NAGLE_START_INIT;
483 INIT_LIST_HEAD(&tsk->publications);
484 INIT_LIST_HEAD(&tsk->cong_links);
485 msg = &tsk->phdr;
491 if (tipc_sk_insert(tsk)) {
497 /* Ensure tsk is visible before we read own_addr. */
503 msg_set_origport(msg, tsk->portid);
511 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
512 tsk->group_is_open = true;
513 atomic_set(&tsk->dupl_rcvcnt, 0);
516 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
517 tsk->rcv_win = tsk->snd_win;
520 tsk_set_unreturnable(tsk, true);
522 tsk_set_unreliable(tsk, true);
524 __skb_queue_head_init(&tsk->mc_method.deferredq);
531 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
533 sock_put(&tsk->sk);
540 struct tipc_sock *tsk = tipc_sk(sk);
543 u32 dnode = tsk_peer_node(tsk);
547 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
548 !tsk_conn_cong(tsk)));
551 tipc_sk_push_backlog(tsk, false);
572 tipc_node_remove_conn(net, dnode, tsk->portid);
582 tsk_own_node(tsk), tsk_peer_port(tsk),
583 tsk->portid, error);
585 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
616 struct tipc_sock *tsk;
625 tsk = tipc_sk(sk);
631 tipc_sk_leave(tsk);
632 tipc_sk_withdraw(tsk, 0, NULL);
633 __skb_queue_purge(&tsk->mc_method.deferredq);
635 tipc_sk_remove(tsk);
640 tipc_dest_list_purge(&tsk->cong_links);
641 tsk->cong_link_cnt = 0;
642 call_rcu(&tsk->rcu, tipc_sk_callback);
668 struct tipc_sock *tsk = tipc_sk(sk);
673 res = tipc_sk_withdraw(tsk, 0, NULL);
676 if (tsk->group) {
704 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
705 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
728 struct tipc_sock *tsk = tipc_sk(sk);
735 addr->addr.id.ref = tsk_peer_port(tsk);
736 addr->addr.id.node = tsk_peer_node(tsk);
738 addr->addr.id.ref = tsk->portid;
772 struct tipc_sock *tsk = tipc_sk(sk);
785 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
794 if (tsk->group_is_open && !tsk->cong_link_cnt)
824 struct tipc_sock *tsk = tipc_sk(sk);
825 struct tipc_msg *hdr = &tsk->phdr;
828 struct tipc_mc_method *method = &tsk->mc_method;
833 if (tsk->group)
837 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
867 &tsk->cong_link_cnt);
884 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
888 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
889 struct tipc_mc_method *method = &tsk->mc_method;
891 struct tipc_msg *hdr = &tsk->phdr;
904 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
910 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
912 tipc_dest_push(&tsk->cong_links, dnode, 0);
913 tsk->cong_link_cnt++;
941 struct tipc_sock *tsk = tipc_sk(sk);
954 !tipc_dest_find(&tsk->cong_links, node, 0) &&
955 tsk->group &&
956 !tipc_group_cong(tsk->group, node, port, blks,
964 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
984 struct tipc_sock *tsk = tipc_sk(sk);
985 struct list_head *cong_links = &tsk->cong_links;
987 struct tipc_msg *hdr = &tsk->phdr;
1005 exclude = tipc_group_exclude(tsk->group);
1015 cong = tipc_group_cong(tsk->group, node, port, blks,
1035 tsk->group &&
1036 !tipc_group_cong(tsk->group, node, port,
1049 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1070 struct tipc_sock *tsk = tipc_sk(sk);
1072 struct tipc_mc_method *method = &tsk->mc_method;
1075 struct tipc_msg *hdr = &tsk->phdr;
1082 !tsk->cong_link_cnt && tsk->group &&
1083 !tipc_group_bc_cong(tsk->group, blks));
1087 dsts = tipc_group_dests(tsk->group);
1102 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1114 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1119 tipc_group_update_bc_members(tsk->group, blks, ack);
1143 struct tipc_sock *tsk = tipc_sk(sk);
1144 struct tipc_group *grp = tsk->group;
1145 struct tipc_msg *hdr = &tsk->phdr;
1264 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1266 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1268 struct net *net = sock_net(&tsk->sk);
1269 u32 dnode = tsk_peer_node(tsk);
1273 tsk->pkt_cnt += skb_queue_len(txq);
1274 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1275 tsk->oneway = 0;
1276 if (tsk->nagle_start < NAGLE_START_MAX)
1277 tsk->nagle_start *= 2;
1278 tsk->expect_ack = false;
1279 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1280 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1281 tsk->nagle_start);
1283 tsk->nagle_start = NAGLE_START_INIT;
1286 tsk->expect_ack = true;
1288 tsk->expect_ack = false;
1291 tsk->msg_acc = 0;
1292 tsk->pkt_cnt = 0;
1295 if (!skb || tsk->cong_link_cnt)
1302 if (tsk->msg_acc)
1303 tsk->pkt_cnt += skb_queue_len(txq);
1304 tsk->snt_unacked += tsk->snd_backlog;
1305 tsk->snd_backlog = 0;
1306 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1308 tsk->cong_link_cnt = 1;
1313 * @tsk: receiving socket
1316 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1321 u32 onode = tsk_own_node(tsk);
1322 struct sock *sk = &tsk->sk;
1327 if (!tsk_peer_msg(tsk, hdr)) {
1334 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1335 tsk_peer_port(tsk));
1349 tsk->probe_unacked = false;
1357 was_cong = tsk_conn_cong(tsk);
1358 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1359 tsk->snt_unacked -= msg_conn_ack(hdr);
1360 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1361 tsk->snd_win = msg_adv_win(hdr);
1362 if (was_cong && !tsk_conn_cong(tsk))
1401 struct tipc_sock *tsk = tipc_sk(sk);
1404 struct list_head *clinks = &tsk->cong_links;
1406 struct tipc_group *grp = tsk->group;
1407 struct tipc_msg *hdr = &tsk->phdr;
1437 dest = &tsk->peer;
1447 if (tsk->published)
1450 tsk->conn_type = dest->addr.name.name.type;
1451 tsk->conn_instance = dest->addr.name.name.instance;
1496 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
1506 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1509 tsk->cong_link_cnt++;
1553 struct tipc_sock *tsk = tipc_sk(sk);
1554 struct tipc_msg *hdr = &tsk->phdr;
1557 u32 dnode = tsk_peer_node(tsk);
1558 int maxnagle = tsk->maxnagle;
1559 int maxpkt = tsk->max_pkt;
1570 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1571 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1578 (!tsk->cong_link_cnt &&
1579 !tsk_conn_cong(tsk) &&
1584 blocks = tsk->snd_backlog;
1585 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1591 tsk->msg_acc++;
1592 if (blocks <= 64 && tsk->expect_ack) {
1593 tsk->snd_backlog = blocks;
1597 tsk->pkt_cnt += skb_queue_len(txq);
1602 tsk->expect_ack = true;
1604 tsk->expect_ack = false;
1606 tsk->msg_acc = 0;
1607 tsk->pkt_cnt = 0;
1613 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1617 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1619 tsk->cong_link_cnt = 1;
1623 tsk->snt_unacked += blocks;
1624 tsk->snd_backlog = 0;
1652 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1655 struct sock *sk = &tsk->sk;
1657 struct tipc_msg *msg = &tsk->phdr;
1668 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1669 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1670 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1671 tsk_set_nagle(tsk);
1673 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1677 tsk->rcv_win = FLOWCTL_MSG_WIN;
1678 tsk->snd_win = FLOWCTL_MSG_WIN;
1721 * @tsk: TIPC port associated with message
1728 struct tipc_sock *tsk)
1776 has_name = (tsk->conn_type != 0);
1777 anc_data[0] = tsk->conn_type;
1778 anc_data[1] = tsk->conn_instance;
1779 anc_data[2] = tsk->conn_instance;
1793 static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1795 struct sock *sk = &tsk->sk;
1798 u32 peer_port = tsk_peer_port(tsk);
1799 u32 dnode = tsk_peer_node(tsk);
1804 dnode, tsk_own_node(tsk), peer_port,
1805 tsk->portid, TIPC_OK);
1809 msg_set_conn_ack(msg, tsk->rcv_unacked);
1810 tsk->rcv_unacked = 0;
1813 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1814 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1815 msg_set_adv_win(msg, tsk->rcv_win);
1820 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1824 skb = tipc_sk_build_ack(tsk);
1828 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1889 struct tipc_sock *tsk = tipc_sk(sk);
1928 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1976 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1978 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1993 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1994 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1995 tipc_sk_send_ack(tsk);
2016 struct tipc_sock *tsk = tipc_sk(sk);
2059 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2093 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2094 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2095 tipc_sk_send_ack(tsk);
2149 struct tipc_sock *tsk = tipc_sk(sk);
2151 struct tipc_group *grp = tsk->group;
2156 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2159 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2162 tsk->cong_link_cnt--;
2164 tipc_sk_push_backlog(tsk, false);
2170 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2185 * @tsk: TIPC socket
2190 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2193 struct sock *sk = &tsk->sk;
2197 u32 pport = tsk_peer_port(tsk);
2198 u32 pnode = tsk_peer_node(tsk);
2206 tsk->oneway = 0;
2214 tipc_sk_finish_conn(tsk, oport, onode);
2215 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2236 delay %= (tsk->conn_timeout / 4);
2253 tipc_sk_push_backlog(tsk, false);
2260 skb = tipc_sk_build_ack(tsk);
2268 if (!tsk_peer_msg(tsk, hdr))
2273 tipc_node_remove_conn(net, pnode, tsk->portid);
2306 struct tipc_sock *tsk = tipc_sk(sk);
2315 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2336 struct tipc_sock *tsk = tipc_sk(sk);
2337 struct tipc_group *grp = tsk->group;
2356 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2362 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2481 struct tipc_sock *tsk;
2488 tsk = tipc_sk_lookup(net, dport);
2490 if (likely(tsk)) {
2491 sk = &tsk->sk;
2570 struct tipc_sock *tsk = tipc_sk(sk);
2573 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2582 if (tsk->group) {
2588 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2599 memcpy(&tsk->peer, dest, destlen);
2821 struct tipc_sock *tsk = tipc_sk(sk);
2822 u32 pnode = tsk_peer_node(tsk);
2823 u32 pport = tsk_peer_port(tsk);
2824 u32 self = tsk_own_node(tsk);
2825 u32 oport = tsk->portid;
2828 if (tsk->probe_unacked) {
2840 tsk->probe_unacked = true;
2846 struct tipc_sock *tsk = tipc_sk(sk);
2849 if (tsk->cong_link_cnt) {
2861 struct tipc_sock *tsk = tipc_sk(sk);
2862 u32 pnode = tsk_peer_node(tsk);
2885 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2889 tipc_dest_push(&tsk->cong_links, pnode, 0);
2890 tsk->cong_link_cnt = 1;
2895 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2898 struct sock *sk = &tsk->sk;
2908 key = tsk->portid + tsk->pub_count + 1;
2909 if (key == tsk->portid)
2913 scope, tsk->portid, key);
2917 list_add(&publ->binding_sock, &tsk->publications);
2918 tsk->pub_count++;
2919 tsk->published = 1;
2923 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2926 struct net *net = sock_net(&tsk->sk);
2934 list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) {
2953 if (list_empty(&tsk->publications))
2954 tsk->published = 0;
2965 struct tipc_sock *tsk;
2973 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2974 sock_hold(&tsk->sk);
2976 lock_sock(&tsk->sk);
2977 msg = &tsk->phdr;
2980 release_sock(&tsk->sk);
2982 sock_put(&tsk->sk);
2986 } while (tsk == ERR_PTR(-EAGAIN));
2994 struct tipc_sock *tsk;
2997 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
2998 if (tsk)
2999 sock_hold(&tsk->sk);
3002 return tsk;
3005 static int tipc_sk_insert(struct tipc_sock *tsk)
3007 struct sock *sk = &tsk->sk;
3017 tsk->portid = portid;
3018 sock_hold(&tsk->sk);
3019 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3022 sock_put(&tsk->sk);
3028 static void tipc_sk_remove(struct tipc_sock *tsk)
3030 struct sock *sk = &tsk->sk;
3033 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3066 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3068 struct net *net = sock_net(&tsk->sk);
3069 struct tipc_group *grp = tsk->group;
3070 struct tipc_msg *hdr = &tsk->phdr;
3080 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3083 tsk->group = grp;
3091 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
3094 tsk->group = NULL;
3098 tsk->mc_method.rcast = true;
3099 tsk->mc_method.mandatory = true;
3100 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3104 static int tipc_sk_leave(struct tipc_sock *tsk)
3106 struct net *net = sock_net(&tsk->sk);
3107 struct tipc_group *grp = tsk->group;
3115 tsk->group = NULL;
3116 tipc_sk_withdraw(tsk, scope, &seq);
3137 struct tipc_sock *tsk = tipc_sk(sk);
3177 tsk_set_unreliable(tsk, value);
3182 tsk_set_unreturnable(tsk, value);
3188 tsk->mc_method.rcast = false;
3189 tsk->mc_method.mandatory = true;
3192 tsk->mc_method.rcast = true;
3193 tsk->mc_method.mandatory = true;
3196 res = tipc_sk_join(tsk, &mreq);
3199 res = tipc_sk_leave(tsk);
3202 tsk->nodelay = !!value;
3203 tsk_set_nagle(tsk);
3231 struct tipc_sock *tsk = tipc_sk(sk);
3249 value = tsk_importance(tsk);
3252 value = tsk_unreliable(tsk);
3255 value = tsk_unreturnable(tsk);
3258 value = tsk->conn_timeout;
3272 if (tsk->group)
3273 tipc_group_self(tsk->group, &seq, &scope);
3461 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3467 peer_node = tsk_peer_node(tsk);
3468 peer_port = tsk_peer_port(tsk);
3479 if (tsk->conn_type != 0) {
3482 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3484 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3498 *tsk)
3501 struct sock *sk = &tsk->sk;
3503 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3508 if (__tipc_nl_add_sk_con(skb, tsk))
3510 } else if (!list_empty(&tsk->publications)) {
3519 struct tipc_sock *tsk)
3533 if (__tipc_nl_add_sk_info(skb, tsk))
3552 struct tipc_sock *tsk))
3555 struct tipc_sock *tsk;
3559 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3560 if (IS_ERR(tsk)) {
3561 err = PTR_ERR(tsk);
3569 sock_hold(&tsk->sk);
3571 lock_sock(&tsk->sk);
3572 err = skb_handler(skb, cb, tsk);
3574 release_sock(&tsk->sk);
3575 sock_put(&tsk->sk);
3578 release_sock(&tsk->sk);
3580 sock_put(&tsk->sk);
3623 struct tipc_sock *tsk, u32 sk_filter_state,
3626 struct sock *sk = &tsk->sk;
3638 if (__tipc_nl_add_sk_info(skb, tsk))
3664 if (tsk->cong_link_cnt &&
3668 if (tsk_conn_cong(tsk) &&
3674 if (tsk->group)
3675 if (tipc_group_fill_sock_diag(tsk->group, skb))
3738 struct tipc_sock *tsk, u32 *last_publ)
3744 list_for_each_entry(p, &tsk->publications, binding_sock) {
3748 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3760 p = list_first_entry(&tsk->publications, struct publication,
3764 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3783 struct tipc_sock *tsk;
3807 tsk = tipc_sk_lookup(net, tsk_portid);
3808 if (!tsk)
3811 lock_sock(&tsk->sk);
3812 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3815 release_sock(&tsk->sk);
3816 sock_put(&tsk->sk);
3837 struct tipc_sock *tsk;
3845 tsk = tipc_sk(sk);
3857 return (_port == tsk->portid);
3862 if (tsk->published) {
3863 p = list_first_entry_or_null(&tsk->publications,
3873 type = tsk->conn_type;
3874 lower = tsk->conn_instance;
3875 upper = tsk->conn_instance;
3940 struct tipc_sock *tsk;
3949 tsk = tipc_sk(sk);
3954 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3955 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3958 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3959 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3960 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3961 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3963 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3964 if (tsk->published) {
3965 p = list_first_entry_or_null(&tsk->publications,
3971 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3972 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3973 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3974 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3975 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3976 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3977 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3978 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));