Lines Matching refs:tsk
153 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua);
154 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua);
155 static int tipc_sk_leave(struct tipc_sock *tsk);
157 static int tipc_sk_insert(struct tipc_sock *tsk);
158 static void tipc_sk_remove(struct tipc_sock *tsk);
161 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
170 static u32 tsk_own_node(struct tipc_sock *tsk)
172 return msg_prevnode(&tsk->phdr);
175 static u32 tsk_peer_node(struct tipc_sock *tsk)
177 return msg_destnode(&tsk->phdr);
180 static u32 tsk_peer_port(struct tipc_sock *tsk)
182 return msg_destport(&tsk->phdr);
185 static bool tsk_unreliable(struct tipc_sock *tsk)
187 return msg_src_droppable(&tsk->phdr) != 0;
190 static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
192 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
195 static bool tsk_unreturnable(struct tipc_sock *tsk)
197 return msg_dest_droppable(&tsk->phdr) != 0;
200 static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
202 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
205 static int tsk_importance(struct tipc_sock *tsk)
207 return msg_importance(&tsk->phdr);
223 static bool tsk_conn_cong(struct tipc_sock *tsk)
225 return tsk->snt_unacked > tsk->snd_win;
246 static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
248 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
255 static void tsk_set_nagle(struct tipc_sock *tsk)
257 struct sock *sk = &tsk->sk;
259 tsk->maxnagle = 0;
262 if (tsk->nodelay)
264 if (!(tsk->peer_caps & TIPC_NAGLE))
267 if (tsk->max_pkt == MAX_MSG_SIZE)
268 tsk->maxnagle = 1500;
270 tsk->maxnagle = tsk->max_pkt;
337 static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
339 struct sock *sk = &tsk->sk;
341 u32 peer_port = tsk_peer_port(tsk);
351 peer_node = tsk_peer_node(tsk);
466 struct tipc_sock *tsk;
493 tsk = tipc_sk(sk);
494 tsk->max_pkt = MAX_PKT_DEFAULT;
495 tsk->maxnagle = 0;
496 tsk->nagle_start = NAGLE_START_INIT;
497 INIT_LIST_HEAD(&tsk->publications);
498 INIT_LIST_HEAD(&tsk->cong_links);
499 msg = &tsk->phdr;
505 if (tipc_sk_insert(tsk)) {
511 /* Ensure tsk is visible before we read own_addr. */
517 msg_set_origport(msg, tsk->portid);
525 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
526 tsk->group_is_open = true;
527 atomic_set(&tsk->dupl_rcvcnt, 0);
530 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
531 tsk->rcv_win = tsk->snd_win;
534 tsk_set_unreturnable(tsk, true);
536 tsk_set_unreliable(tsk, true);
538 __skb_queue_head_init(&tsk->mc_method.deferredq);
545 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
547 sock_put(&tsk->sk);
554 struct tipc_sock *tsk = tipc_sk(sk);
557 u32 dnode = tsk_peer_node(tsk);
561 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
562 !tsk_conn_cong(tsk)));
565 tipc_sk_push_backlog(tsk, false);
586 tipc_node_remove_conn(net, dnode, tsk->portid);
596 tsk_own_node(tsk), tsk_peer_port(tsk),
597 tsk->portid, error);
599 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
630 struct tipc_sock *tsk;
639 tsk = tipc_sk(sk);
645 tipc_sk_leave(tsk);
646 tipc_sk_withdraw(tsk, NULL);
647 __skb_queue_purge(&tsk->mc_method.deferredq);
649 tipc_sk_remove(tsk);
654 tipc_dest_list_purge(&tsk->cong_links);
655 tsk->cong_link_cnt = 0;
656 call_rcu(&tsk->rcu, tipc_sk_callback);
680 struct tipc_sock *tsk = tipc_sk(sock->sk);
684 return tipc_sk_withdraw(tsk, NULL);
698 if (tsk->group)
702 return tipc_sk_withdraw(tsk, ua);
703 return tipc_sk_publish(tsk, ua);
752 struct tipc_sock *tsk = tipc_sk(sk);
759 addr->addr.id.ref = tsk_peer_port(tsk);
760 addr->addr.id.node = tsk_peer_node(tsk);
762 addr->addr.id.ref = tsk->portid;
796 struct tipc_sock *tsk = tipc_sk(sk);
809 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
818 if (tsk->group_is_open && !tsk->cong_link_cnt)
848 struct tipc_sock *tsk = tipc_sk(sk);
849 struct tipc_msg *hdr = &tsk->phdr;
856 if (tsk->group)
860 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
888 rc = tipc_mcast_xmit(net, &pkts, &tsk->mc_method, &dsts,
889 &tsk->cong_link_cnt);
900 * @tsk: tipc socket
907 static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
911 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
912 struct tipc_mc_method *method = &tsk->mc_method;
914 struct tipc_msg *hdr = &tsk->phdr;
927 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
933 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
935 tipc_dest_push(&tsk->cong_links, dnode, 0);
936 tsk->cong_link_cnt++;
964 struct tipc_sock *tsk = tipc_sk(sk);
977 !tipc_dest_find(&tsk->cong_links, node, 0) &&
978 tsk->group &&
979 !tipc_group_cong(tsk->group, node, port, blks,
987 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
1007 struct tipc_sock *tsk = tipc_sk(sk);
1008 struct list_head *cong_links = &tsk->cong_links;
1010 struct tipc_msg *hdr = &tsk->phdr;
1025 exclude = tipc_group_exclude(tsk->group);
1035 cong = tipc_group_cong(tsk->group, node, port, blks,
1055 tsk->group &&
1056 !tipc_group_cong(tsk->group, node, port,
1069 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1090 struct tipc_sock *tsk = tipc_sk(sk);
1092 struct tipc_mc_method *method = &tsk->mc_method;
1095 struct tipc_msg *hdr = &tsk->phdr;
1102 !tsk->cong_link_cnt && tsk->group &&
1103 !tipc_group_bc_cong(tsk->group, blks));
1107 dsts = tipc_group_dests(tsk->group);
1122 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
1134 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
1139 tipc_group_update_bc_members(tsk->group, blks, ack);
1163 struct tipc_sock *tsk = tipc_sk(sk);
1164 struct tipc_group *grp = tsk->group;
1165 struct tipc_msg *hdr = &tsk->phdr;
1275 static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
1277 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
1279 struct net *net = sock_net(&tsk->sk);
1280 u32 dnode = tsk_peer_node(tsk);
1284 tsk->pkt_cnt += skb_queue_len(txq);
1285 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1286 tsk->oneway = 0;
1287 if (tsk->nagle_start < NAGLE_START_MAX)
1288 tsk->nagle_start *= 2;
1289 tsk->expect_ack = false;
1290 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1291 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1292 tsk->nagle_start);
1294 tsk->nagle_start = NAGLE_START_INIT;
1297 tsk->expect_ack = true;
1299 tsk->expect_ack = false;
1302 tsk->msg_acc = 0;
1303 tsk->pkt_cnt = 0;
1306 if (!skb || tsk->cong_link_cnt)
1313 if (tsk->msg_acc)
1314 tsk->pkt_cnt += skb_queue_len(txq);
1315 tsk->snt_unacked += tsk->snd_backlog;
1316 tsk->snd_backlog = 0;
1317 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1319 tsk->cong_link_cnt = 1;
1324 * @tsk: receiving socket
1329 static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
1334 u32 onode = tsk_own_node(tsk);
1335 struct sock *sk = &tsk->sk;
1340 if (!tsk_peer_msg(tsk, hdr)) {
1347 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1348 tsk_peer_port(tsk));
1362 tsk->probe_unacked = false;
1370 was_cong = tsk_conn_cong(tsk);
1371 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
1372 tsk->snt_unacked -= msg_conn_ack(hdr);
1373 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1374 tsk->snd_win = msg_adv_win(hdr);
1375 if (was_cong && !tsk_conn_cong(tsk))
1414 struct tipc_sock *tsk = tipc_sk(sk);
1417 struct list_head *clinks = &tsk->cong_links;
1419 struct tipc_group *grp = tsk->group;
1420 struct tipc_msg *hdr = &tsk->phdr;
1448 ua = (struct tipc_uaddr *)&tsk->peer;
1459 if (tsk->published)
1462 tsk->conn_addrtype = atype;
1505 mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true);
1516 rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid);
1519 tsk->cong_link_cnt++;
1563 struct tipc_sock *tsk = tipc_sk(sk);
1564 struct tipc_msg *hdr = &tsk->phdr;
1567 u32 dnode = tsk_peer_node(tsk);
1568 int maxnagle = tsk->maxnagle;
1569 int maxpkt = tsk->max_pkt;
1580 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
1581 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
1588 (!tsk->cong_link_cnt &&
1589 !tsk_conn_cong(tsk) &&
1594 blocks = tsk->snd_backlog;
1595 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1601 tsk->msg_acc++;
1602 if (blocks <= 64 && tsk->expect_ack) {
1603 tsk->snd_backlog = blocks;
1607 tsk->pkt_cnt += skb_queue_len(txq);
1612 tsk->expect_ack = true;
1614 tsk->expect_ack = false;
1616 tsk->msg_acc = 0;
1617 tsk->pkt_cnt = 0;
1623 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1627 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1629 tsk->cong_link_cnt = 1;
1633 tsk->snt_unacked += blocks;
1634 tsk->snd_backlog = 0;
1662 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1665 struct sock *sk = &tsk->sk;
1667 struct tipc_msg *msg = &tsk->phdr;
1678 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1679 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
1680 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
1681 tsk_set_nagle(tsk);
1683 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1687 tsk->rcv_win = FLOWCTL_MSG_WIN;
1688 tsk->snd_win = FLOWCTL_MSG_WIN;
1731 * @tsk: TIPC port associated with message
1738 struct tipc_sock *tsk)
1781 has_addr = !!tsk->conn_addrtype;
1782 data[0] = msg_nametype(&tsk->phdr);
1783 data[1] = msg_nameinst(&tsk->phdr);
1794 static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
1796 struct sock *sk = &tsk->sk;
1799 u32 peer_port = tsk_peer_port(tsk);
1800 u32 dnode = tsk_peer_node(tsk);
1805 dnode, tsk_own_node(tsk), peer_port,
1806 tsk->portid, TIPC_OK);
1810 msg_set_conn_ack(msg, tsk->rcv_unacked);
1811 tsk->rcv_unacked = 0;
1814 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1815 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1816 msg_set_adv_win(msg, tsk->rcv_win);
1821 static void tipc_sk_send_ack(struct tipc_sock *tsk)
1825 skb = tipc_sk_build_ack(tsk);
1829 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1891 struct tipc_sock *tsk = tipc_sk(sk);
1930 rc = tipc_sk_anc_data_recv(m, skb, tsk);
1978 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
1980 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1995 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1996 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1997 tipc_sk_send_ack(tsk);
2019 struct tipc_sock *tsk = tipc_sk(sk);
2062 rc = tipc_sk_anc_data_recv(m, skb, tsk);
2096 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
2097 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
2098 tipc_sk_send_ack(tsk);
2154 struct tipc_sock *tsk = tipc_sk(sk);
2156 struct tipc_group *grp = tsk->group;
2161 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
2164 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
2167 tsk->cong_link_cnt--;
2169 tipc_sk_push_backlog(tsk, false);
2175 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
2190 * @tsk: TIPC socket
2195 static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2198 struct sock *sk = &tsk->sk;
2202 u32 pport = tsk_peer_port(tsk);
2203 u32 pnode = tsk_peer_node(tsk);
2211 tsk->oneway = 0;
2219 tipc_sk_finish_conn(tsk, oport, onode);
2220 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2241 delay %= (tsk->conn_timeout / 4);
2258 tipc_sk_push_backlog(tsk, false);
2265 skb = tipc_sk_build_ack(tsk);
2273 if (!tsk_peer_msg(tsk, hdr))
2278 tipc_node_remove_conn(net, pnode, tsk->portid);
2311 struct tipc_sock *tsk = tipc_sk(sk);
2320 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
2341 struct tipc_sock *tsk = tipc_sk(sk);
2342 struct tipc_group *grp = tsk->group;
2361 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
2367 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
2488 struct tipc_sock *tsk;
2495 tsk = tipc_sk_lookup(net, dport);
2497 if (likely(tsk)) {
2498 sk = &tsk->sk;
2577 struct tipc_sock *tsk = tipc_sk(sk);
2580 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
2589 if (tsk->group) {
2595 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2606 memcpy(&tsk->peer, dest, destlen);
2832 struct tipc_sock *tsk = tipc_sk(sk);
2833 u32 pnode = tsk_peer_node(tsk);
2834 u32 pport = tsk_peer_port(tsk);
2835 u32 self = tsk_own_node(tsk);
2836 u32 oport = tsk->portid;
2839 if (tsk->probe_unacked) {
2851 tsk->probe_unacked = true;
2857 struct tipc_sock *tsk = tipc_sk(sk);
2860 if (tsk->cong_link_cnt) {
2872 struct tipc_sock *tsk = tipc_sk(sk);
2873 u32 pnode = tsk_peer_node(tsk);
2896 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
2900 tipc_dest_push(&tsk->cong_links, pnode, 0);
2901 tsk->cong_link_cnt = 1;
2906 static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2908 struct sock *sk = &tsk->sk;
2916 key = tsk->portid + tsk->pub_count + 1;
2917 if (key == tsk->portid)
2919 skaddr.ref = tsk->portid;
2925 list_add(&p->binding_sock, &tsk->publications);
2926 tsk->pub_count++;
2927 tsk->published = true;
2931 static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua)
2933 struct net *net = sock_net(&tsk->sk);
2938 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
2958 if (list_empty(&tsk->publications)) {
2959 tsk->published = 0;
2972 struct tipc_sock *tsk;
2980 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
2981 sock_hold(&tsk->sk);
2983 lock_sock(&tsk->sk);
2984 msg = &tsk->phdr;
2987 release_sock(&tsk->sk);
2989 sock_put(&tsk->sk);
2993 } while (tsk == ERR_PTR(-EAGAIN));
3001 struct tipc_sock *tsk;
3004 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
3005 if (tsk)
3006 sock_hold(&tsk->sk);
3009 return tsk;
3012 static int tipc_sk_insert(struct tipc_sock *tsk)
3014 struct sock *sk = &tsk->sk;
3024 tsk->portid = portid;
3025 sock_hold(&tsk->sk);
3026 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3029 sock_put(&tsk->sk);
3035 static void tipc_sk_remove(struct tipc_sock *tsk)
3037 struct sock *sk = &tsk->sk;
3040 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
3073 static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3075 struct net *net = sock_net(&tsk->sk);
3076 struct tipc_group *grp = tsk->group;
3077 struct tipc_msg *hdr = &tsk->phdr;
3089 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
3092 tsk->group = grp;
3099 rc = tipc_sk_publish(tsk, &ua);
3102 tsk->group = NULL;
3106 tsk->mc_method.rcast = true;
3107 tsk->mc_method.mandatory = true;
3108 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
3112 static int tipc_sk_leave(struct tipc_sock *tsk)
3114 struct net *net = sock_net(&tsk->sk);
3115 struct tipc_group *grp = tsk->group;
3125 tsk->group = NULL;
3126 tipc_sk_withdraw(tsk, &ua);
3147 struct tipc_sock *tsk = tipc_sk(sk);
3187 tsk_set_unreliable(tsk, value);
3192 tsk_set_unreturnable(tsk, value);
3198 tsk->mc_method.rcast = false;
3199 tsk->mc_method.mandatory = true;
3202 tsk->mc_method.rcast = true;
3203 tsk->mc_method.mandatory = true;
3206 res = tipc_sk_join(tsk, &mreq);
3209 res = tipc_sk_leave(tsk);
3212 tsk->nodelay = !!value;
3213 tsk_set_nagle(tsk);
3241 struct tipc_sock *tsk = tipc_sk(sk);
3259 value = tsk_importance(tsk);
3262 value = tsk_unreliable(tsk);
3265 value = tsk_unreturnable(tsk);
3268 value = tsk->conn_timeout;
3282 if (tsk->group)
3283 tipc_group_self(tsk->group, &seq, &scope);
3468 static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
3474 peer_node = tsk_peer_node(tsk);
3475 peer_port = tsk_peer_port(tsk);
3476 conn_type = msg_nametype(&tsk->phdr);
3477 conn_instance = msg_nameinst(&tsk->phdr);
3487 if (tsk->conn_addrtype != 0) {
3506 *tsk)
3509 struct sock *sk = &tsk->sk;
3511 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
3516 if (__tipc_nl_add_sk_con(skb, tsk))
3518 } else if (!list_empty(&tsk->publications)) {
3527 struct tipc_sock *tsk)
3541 if (__tipc_nl_add_sk_info(skb, tsk))
3560 struct tipc_sock *tsk))
3563 struct tipc_sock *tsk;
3567 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3568 if (IS_ERR(tsk)) {
3569 err = PTR_ERR(tsk);
3577 sock_hold(&tsk->sk);
3579 lock_sock(&tsk->sk);
3580 err = skb_handler(skb, cb, tsk);
3582 release_sock(&tsk->sk);
3583 sock_put(&tsk->sk);
3586 release_sock(&tsk->sk);
3588 sock_put(&tsk->sk);
3631 struct tipc_sock *tsk, u32 sk_filter_state,
3634 struct sock *sk = &tsk->sk;
3646 if (__tipc_nl_add_sk_info(skb, tsk))
3672 if (tsk->cong_link_cnt &&
3676 if (tsk_conn_cong(tsk) &&
3682 if (tsk->group)
3683 if (tipc_group_fill_sock_diag(tsk->group, skb))
3746 struct tipc_sock *tsk, u32 *last_publ)
3752 list_for_each_entry(p, &tsk->publications, binding_sock) {
3756 if (list_entry_is_head(p, &tsk->publications, binding_sock)) {
3768 p = list_first_entry(&tsk->publications, struct publication,
3772 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
3791 struct tipc_sock *tsk;
3815 tsk = tipc_sk_lookup(net, tsk_portid);
3816 if (!tsk)
3819 lock_sock(&tsk->sk);
3820 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3823 release_sock(&tsk->sk);
3824 sock_put(&tsk->sk);
3846 struct tipc_sock *tsk;
3854 tsk = tipc_sk(sk);
3866 return (_port == tsk->portid);
3871 if (tsk->published) {
3872 p = list_first_entry_or_null(&tsk->publications,
3882 type = msg_nametype(&tsk->phdr);
3883 lower = msg_nameinst(&tsk->phdr);
3950 struct tipc_sock *tsk;
3959 tsk = tipc_sk(sk);
3964 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3965 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3968 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3969 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3970 conn_type = msg_nametype(&tsk->phdr);
3971 conn_instance = msg_nameinst(&tsk->phdr);
3975 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3976 if (tsk->published) {
3977 p = list_first_entry_or_null(&tsk->publications,
3983 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3984 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3985 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3986 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3987 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3988 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3989 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3990 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));