Lines Matching refs:po

189 static void *packet_previous_frame(struct packet_sock *po,
238 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
239 static void __fanout_link(struct sock *sk, struct packet_sock *po);
246 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
251 dev = rcu_dereference(po->cached_dev);
259 static void packet_cached_dev_assign(struct packet_sock *po,
262 rcu_assign_pointer(po->cached_dev, dev);
265 static void packet_cached_dev_reset(struct packet_sock *po)
267 RCU_INIT_POINTER(po->cached_dev, NULL);
270 static bool packet_use_direct_xmit(const struct packet_sock *po)
273 return READ_ONCE(po->xmit) == packet_direct_xmit;
303 struct packet_sock *po = pkt_sk(sk);
305 if (!po->running) {
306 if (po->fanout)
307 __fanout_link(sk, po);
309 dev_add_pack(&po->prot_hook);
312 po->running = 1;
323 * the po->bind_lock and do a synchronize_net to make sure no
325 * of po->prot_hook. If the sync parameter is false, it is the
330 struct packet_sock *po = pkt_sk(sk);
332 lockdep_assert_held_once(&po->bind_lock);
334 po->running = 0;
336 if (po->fanout)
337 __fanout_unlink(sk, po);
339 __dev_remove_pack(&po->prot_hook);
344 spin_unlock(&po->bind_lock);
346 spin_lock(&po->bind_lock);
352 struct packet_sock *po = pkt_sk(sk);
354 if (po->running)
365 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
372 switch (po->tp_version) {
393 static int __packet_get_status(const struct packet_sock *po, void *frame)
402 switch (po->tp_version) {
436 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
443 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
454 switch (po->tp_version) {
479 static void *packet_lookup_frame(const struct packet_sock *po,
493 if (status != __packet_get_status(po, h.raw))
499 static void *packet_current_frame(struct packet_sock *po,
503 return packet_lookup_frame(po, rb, rb->head, status);
511 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
516 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
525 static void prb_setup_retire_blk_timer(struct packet_sock *po)
529 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
535 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
544 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
578 static void init_prb_bdqc(struct packet_sock *po,
594 p1->hdrlen = po->tp_hdrlen;
595 p1->version = po->tp_version;
597 po->stats.stats3.tp_freeze_q_cnt = 0;
601 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
609 prb_setup_retire_blk_timer(po);
648 struct packet_sock *po =
649 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
650 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
654 spin_lock(&po->sk.sk_receive_queue.lock);
683 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
684 if (!prb_dispatch_next_block(pkc, po))
716 spin_unlock(&po->sk.sk_receive_queue.lock);
764 struct packet_sock *po, unsigned int stat)
770 struct sock *sk = &po->sk;
772 if (atomic_read(&po->tp_drops))
877 struct packet_sock *po)
880 po->stats.stats3.tp_freeze_q_cnt++;
892 struct packet_sock *po)
903 prb_freeze_queue(pkc, po);
917 struct packet_sock *po, unsigned int status)
937 prb_close_block(pkc, pbd, po, status);
1017 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1026 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1061 prb_retire_current_block(pkc, po, 0);
1064 curr = (char *)prb_dispatch_next_block(pkc, po);
1078 static void *packet_current_rx_frame(struct packet_sock *po,
1083 switch (po->tp_version) {
1086 curr = packet_lookup_frame(po, &po->rx_ring,
1087 po->rx_ring.head, status);
1090 return __packet_lookup_frame_in_block(po, skb, len);
1098 static void *prb_lookup_block(const struct packet_sock *po,
1122 static void *__prb_previous_block(struct packet_sock *po,
1127 return prb_lookup_block(po, rb, previous, status);
1130 static void *packet_previous_rx_frame(struct packet_sock *po,
1134 if (po->tp_version <= TPACKET_V2)
1135 return packet_previous_frame(po, rb, status);
1137 return __prb_previous_block(po, rb, status);
1140 static void packet_increment_rx_head(struct packet_sock *po,
1143 switch (po->tp_version) {
1155 static void *packet_previous_frame(struct packet_sock *po,
1160 return packet_lookup_frame(po, rb, previous, status);
1193 static int packet_alloc_pending(struct packet_sock *po)
1195 po->rx_ring.pending_refcnt = NULL;
1197 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1198 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1204 static void packet_free_pending(struct packet_sock *po)
1206 free_percpu(po->tx_ring.pending_refcnt);
1214 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1218 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1219 idx = READ_ONCE(po->rx_ring.head);
1224 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1227 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1231 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1232 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1237 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1240 static int __packet_rcv_has_room(const struct packet_sock *po,
1243 const struct sock *sk = &po->sk;
1246 if (po->prot_hook.func != tpacket_rcv) {
1259 if (po->tp_version == TPACKET_V3) {
1260 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1262 else if (__tpacket_v3_has_room(po, 0))
1265 if (__tpacket_has_room(po, ROOM_POW_OFF))
1267 else if (__tpacket_has_room(po, 0))
1274 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1278 ret = __packet_rcv_has_room(po, skb);
1281 if (READ_ONCE(po->pressure) != pressure)
1282 WRITE_ONCE(po->pressure, pressure);
1287 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1289 if (READ_ONCE(po->pressure) &&
1290 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1291 WRITE_ONCE(po->pressure, 0);
1309 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1311 u32 *history = po->rollover->history;
1364 struct packet_sock *po, *po_next, *po_skip = NULL;
1367 po = pkt_sk(rcu_dereference(f->arr[idx]));
1370 room = packet_rcv_has_room(po, skb);
1372 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1374 po_skip = po;
1377 i = j = min_t(int, po->rollover->sock, num - 1);
1383 po->rollover->sock = i;
1384 atomic_long_inc(&po->rollover->num);
1386 atomic_long_inc(&po->rollover->num_huge);
1394 atomic_long_inc(&po->rollover->num_failed);
1432 struct packet_sock *po;
1474 po = pkt_sk(rcu_dereference(f->arr[idx]));
1475 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1483 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1485 struct packet_fanout *f = po->fanout;
1496 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1498 struct packet_fanout *f = po->fanout;
1553 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1560 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1571 __fanout_set_data_bpf(po->fanout, new);
1575 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1581 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1592 __fanout_set_data_bpf(po->fanout, new);
1596 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1599 switch (po->fanout->type) {
1601 return fanout_set_data_cbpf(po, data, len);
1603 return fanout_set_data_ebpf(po, data, len);
1651 struct packet_sock *po = pkt_sk(sk);
1678 if (po->fanout)
1739 match->prot_hook.type = po->prot_hook.type;
1740 match->prot_hook.dev = po->prot_hook.dev;
1750 spin_lock(&po->bind_lock);
1751 if (po->running &&
1753 match->prot_hook.type == po->prot_hook.type &&
1754 match->prot_hook.dev == po->prot_hook.dev) {
1757 __dev_remove_pack(&po->prot_hook);
1760 WRITE_ONCE(po->fanout, match);
1762 po->rollover = rollover;
1765 __fanout_link(sk, po);
1769 spin_unlock(&po->bind_lock);
1789 struct packet_sock *po = pkt_sk(sk);
1793 f = po->fanout;
1795 po->fanout = NULL;
2089 struct packet_sock *po;
2099 po = pkt_sk(sk);
2151 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2174 po->stats.stats1.tp_packets++;
2183 atomic_inc(&po->tp_drops);
2203 struct packet_sock *po;
2230 po = pkt_sk(sk);
2251 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2252 atomic_inc(&po->tp_drops);
2266 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2267 po->tp_reserve;
2270 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2272 po->tp_reserve;
2273 if (po->has_vnet_hdr) {
2280 atomic_inc(&po->tp_drops);
2283 if (po->tp_version <= TPACKET_V2) {
2284 if (macoff + snaplen > po->rx_ring.frame_size) {
2285 if (po->copy_thresh &&
2299 snaplen = po->rx_ring.frame_size - macoff;
2306 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2309 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2315 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2320 h.raw = packet_current_rx_frame(po, skb,
2325 if (po->tp_version <= TPACKET_V2) {
2326 slot_id = po->rx_ring.head;
2327 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2329 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2336 if (po->tp_version == TPACKET_V3)
2337 prb_clear_blk_fill_status(&po->rx_ring);
2341 if (po->tp_version <= TPACKET_V2) {
2342 packet_increment_rx_head(po, &po->rx_ring);
2349 if (atomic_read(&po->tp_drops))
2353 po->stats.stats1.tp_packets++;
2366 po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2372 switch (po->tp_version) {
2424 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2432 if (po->tp_version <= TPACKET_V2) {
2444 if (po->tp_version <= TPACKET_V2) {
2446 __packet_set_status(po, h.raw, status);
2447 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2450 } else if (po->tp_version == TPACKET_V3) {
2451 prb_clear_blk_fill_status(&po->rx_ring);
2468 atomic_inc(&po->tp_drops);
2478 struct packet_sock *po = pkt_sk(skb->sk);
2480 if (likely(po->tx_ring.pg_vec)) {
2485 packet_dec_pending(&po->tx_ring);
2487 ts = __packet_set_timestamp(po, ph, skb);
2488 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2490 if (!packet_read_pending(&po->tx_ring))
2491 complete(&po->skb_completion);
2526 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2533 struct socket *sock = po->sk.sk_socket;
2541 skb->priority = po->sk.sk_priority;
2542 skb->mark = po->sk.sk_mark;
2579 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2606 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2614 switch (po->tp_version) {
2634 if (unlikely(po->tp_tx_has_off)) {
2637 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2638 off_max = po->tx_ring.frame_size - tp_len;
2639 if (po->sk.sk_type == SOCK_DGRAM) {
2640 switch (po->tp_version) {
2652 switch (po->tp_version) {
2667 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2674 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2693 mutex_lock(&po->pg_vec_lock);
2698 if (unlikely(!po->tx_ring.pg_vec)) {
2703 dev = packet_cached_dev_get(po);
2704 proto = READ_ONCE(po->num);
2714 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2715 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2730 sockcm_init(&sockc, &po->sk);
2732 err = sock_cmsg_send(&po->sk, msg, &sockc);
2737 if (po->sk.sk_socket->type == SOCK_RAW)
2739 size_max = po->tx_ring.frame_size
2740 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2742 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2745 reinit_completion(&po->skb_completion);
2748 ph = packet_current_frame(po, &po->tx_ring,
2752 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2753 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2764 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2771 if (po->has_vnet_hdr) {
2784 skb = sock_alloc_send_skb(&po->sk,
2795 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2799 !po->has_vnet_hdr &&
2805 if (po->tp_loss) {
2806 __packet_set_status(po, ph,
2808 packet_increment_head(&po->tx_ring);
2818 if (po->has_vnet_hdr) {
2827 __packet_set_status(po, ph, TP_STATUS_SENDING);
2828 packet_inc_pending(&po->tx_ring);
2832 err = READ_ONCE(po->xmit)(skb);
2836 if (err && __packet_get_status(po, ph) ==
2848 packet_increment_head(&po->tx_ring);
2857 (need_wait && packet_read_pending(&po->tx_ring))));
2863 __packet_set_status(po, ph, status);
2868 mutex_unlock(&po->pg_vec_lock);
2908 struct packet_sock *po = pkt_sk(sk);
2918 dev = packet_cached_dev_get(po);
2919 proto = READ_ONCE(po->num);
2953 if (po->has_vnet_hdr) {
3036 err = READ_ONCE(po->xmit)(skb);
3060 struct packet_sock *po = pkt_sk(sk);
3065 if (data_race(po->tx_ring.pg_vec))
3066 return tpacket_snd(po, msg);
3079 struct packet_sock *po;
3088 po = pkt_sk(sk);
3098 spin_lock(&po->bind_lock);
3100 packet_cached_dev_reset(po);
3102 if (po->prot_hook.dev) {
3103 dev_put(po->prot_hook.dev);
3104 po->prot_hook.dev = NULL;
3106 spin_unlock(&po->bind_lock);
3111 if (po->rx_ring.pg_vec) {
3116 if (po->tx_ring.pg_vec) {
3126 kfree(po->rollover);
3140 packet_free_pending(po);
3154 struct packet_sock *po = pkt_sk(sk);
3163 spin_lock(&po->bind_lock);
3165 proto = po->num;
3169 if (po->fanout) {
3191 proto_curr = po->prot_hook.type;
3192 dev_curr = po->prot_hook.dev;
3197 if (po->running) {
3202 WRITE_ONCE(po->num, 0);
3205 dev_curr = po->prot_hook.dev;
3211 BUG_ON(po->running);
3212 WRITE_ONCE(po->num, proto);
3213 po->prot_hook.type = proto;
3217 po->prot_hook.dev = NULL;
3218 WRITE_ONCE(po->ifindex, -1);
3219 packet_cached_dev_reset(po);
3221 po->prot_hook.dev = dev;
3222 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3223 packet_cached_dev_assign(po, dev);
3242 spin_unlock(&po->bind_lock);
3303 struct packet_sock *po;
3326 po = pkt_sk(sk);
3327 init_completion(&po->skb_completion);
3329 po->num = proto;
3330 po->xmit = dev_queue_xmit;
3332 err = packet_alloc_pending(po);
3336 packet_cached_dev_reset(po);
3345 spin_lock_init(&po->bind_lock);
3346 mutex_init(&po->pg_vec_lock);
3347 po->rollover = NULL;
3348 po->prot_hook.func = packet_rcv;
3351 po->prot_hook.func = packet_rcv_spkt;
3353 po->prot_hook.af_packet_priv = sk;
3354 po->prot_hook.af_packet_net = sock_net(sk);
3357 po->prot_hook.type = proto;
3554 struct packet_sock *po = pkt_sk(sk);
3561 ifindex = READ_ONCE(po->ifindex);
3564 sll->sll_protocol = READ_ONCE(po->num);
3628 struct packet_sock *po = pkt_sk(sk);
3650 for (ml = po->mclist; ml; ml = ml->next) {
3668 i->next = po->mclist;
3669 po->mclist = i;
3672 po->mclist = i->next;
3709 struct packet_sock *po = pkt_sk(sk);
3712 if (!po->mclist)
3716 while ((ml = po->mclist) != NULL) {
3719 po->mclist = ml->next;
3733 struct packet_sock *po = pkt_sk(sk);
3768 switch (po->tp_version) {
3819 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3822 po->tp_version = val;
3839 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3842 po->tp_reserve = val;
3858 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3861 po->tp_loss = !!val;
3876 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3888 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
3903 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3906 po->has_vnet_hdr = !!val;
3921 po->tp_tstamp = val;
3938 if (!READ_ONCE(po->fanout))
3941 return fanout_set_data(po, optval, optlen);
3954 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
3967 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3970 po->tp_tx_has_off = !!val;
3985 /* Paired with all lockless reads of po->xmit */
3986 WRITE_ONCE(po->xmit, val ? packet_direct_xmit : dev_queue_xmit);
4000 struct packet_sock *po = pkt_sk(sk);
4018 memcpy(&st, &po->stats, sizeof(st));
4019 memset(&po->stats, 0, sizeof(po->stats));
4021 drops = atomic_xchg(&po->tp_drops, 0);
4023 if (po->tp_version == TPACKET_V3) {
4037 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4040 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4043 val = po->has_vnet_hdr;
4046 val = po->tp_version;
4070 val = po->tp_reserve;
4073 val = po->tp_loss;
4076 val = po->tp_tstamp;
4079 val = (po->fanout ?
4080 ((u32)po->fanout->id |
4081 ((u32)po->fanout->type << 16) |
4082 ((u32)po->fanout->flags << 24)) :
4086 val = READ_ONCE(po->prot_hook.ignore_outgoing);
4089 if (!po->rollover)
4091 rstats.tp_all = atomic_long_read(&po->rollover->num);
4092 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4093 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4098 val = po->tp_tx_has_off;
4101 val = packet_use_direct_xmit(po);
4125 struct packet_sock *po = pkt_sk(sk);
4129 if (po->mclist)
4130 packet_dev_mclist_delete(dev, &po->mclist);
4134 if (dev->ifindex == po->ifindex) {
4135 spin_lock(&po->bind_lock);
4136 if (po->running) {
4143 packet_cached_dev_reset(po);
4144 WRITE_ONCE(po->ifindex, -1);
4145 if (po->prot_hook.dev)
4146 dev_put(po->prot_hook.dev);
4147 po->prot_hook.dev = NULL;
4149 spin_unlock(&po->bind_lock);
4153 if (dev->ifindex == po->ifindex) {
4154 spin_lock(&po->bind_lock);
4155 if (po->num)
4157 spin_unlock(&po->bind_lock);
4219 struct packet_sock *po = pkt_sk(sk);
4223 if (po->rx_ring.pg_vec) {
4224 if (!packet_previous_rx_frame(po, &po->rx_ring,
4228 packet_rcv_try_clear_pressure(po);
4231 if (po->tx_ring.pg_vec) {
4232 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4341 struct packet_sock *po = pkt_sk(sk);
4351 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4356 if (atomic_long_read(&po->mapped))
4370 switch (po->tp_version) {
4372 po->tp_hdrlen = TPACKET_HDRLEN;
4375 po->tp_hdrlen = TPACKET2_HDRLEN;
4378 po->tp_hdrlen = TPACKET3_HDRLEN;
4387 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4388 if (po->tp_version >= TPACKET_V3 &&
4411 switch (po->tp_version) {
4415 init_prb_bdqc(po, rb, pg_vec, req_u);
4446 spin_lock(&po->bind_lock);
4447 was_running = po->running;
4448 num = po->num;
4450 WRITE_ONCE(po->num, 0);
4453 spin_unlock(&po->bind_lock);
4458 mutex_lock(&po->pg_vec_lock);
4459 if (closing || atomic_long_read(&po->mapped) == 0) {
4463 if (po->tp_version <= TPACKET_V2)
4474 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4477 if (atomic_long_read(&po->mapped))
4479 atomic_long_read(&po->mapped));
4481 mutex_unlock(&po->pg_vec_lock);
4483 spin_lock(&po->bind_lock);
4485 WRITE_ONCE(po->num, num);
4488 spin_unlock(&po->bind_lock);
4489 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4492 prb_shutdown_retire_blk_timer(po, rb_queue);
4508 struct packet_sock *po = pkt_sk(sk);
4518 mutex_lock(&po->pg_vec_lock);
4521 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4537 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4557 atomic_long_inc(&po->mapped);
4562 mutex_unlock(&po->pg_vec_lock);
4647 const struct packet_sock *po = pkt_sk(s);
4654 ntohs(READ_ONCE(po->num)),
4655 READ_ONCE(po->ifindex),
4656 po->running,