Lines Matching refs:po
194 static void *packet_previous_frame(struct packet_sock *po,
243 static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
244 static void __fanout_link(struct sock *sk, struct packet_sock *po);
273 static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
275 if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
288 static struct net_device *packet_cached_dev_get(struct packet_sock *po)
293 dev = rcu_dereference(po->cached_dev);
300 static void packet_cached_dev_assign(struct packet_sock *po,
303 rcu_assign_pointer(po->cached_dev, dev);
306 static void packet_cached_dev_reset(struct packet_sock *po)
308 RCU_INIT_POINTER(po->cached_dev, NULL);
338 struct packet_sock *po = pkt_sk(sk);
340 if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
341 if (po->fanout)
342 __fanout_link(sk, po);
344 dev_add_pack(&po->prot_hook);
347 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
358 * the po->bind_lock and do a synchronize_net to make sure no
360 * of po->prot_hook. If the sync parameter is false, it is the
365 struct packet_sock *po = pkt_sk(sk);
367 lockdep_assert_held_once(&po->bind_lock);
369 packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
371 if (po->fanout)
372 __fanout_unlink(sk, po);
374 __dev_remove_pack(&po->prot_hook);
379 spin_unlock(&po->bind_lock);
381 spin_lock(&po->bind_lock);
387 struct packet_sock *po = pkt_sk(sk);
389 if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
400 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
407 switch (po->tp_version) {
428 static int __packet_get_status(const struct packet_sock *po, void *frame)
437 switch (po->tp_version) {
471 static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
478 if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
489 switch (po->tp_version) {
514 static void *packet_lookup_frame(const struct packet_sock *po,
528 if (status != __packet_get_status(po, h.raw))
534 static void *packet_current_frame(struct packet_sock *po,
538 return packet_lookup_frame(po, rb, rb->head, status);
546 static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
551 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
560 static void prb_setup_retire_blk_timer(struct packet_sock *po)
564 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
570 static int prb_calc_retire_blk_tmo(struct packet_sock *po,
579 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
613 static void init_prb_bdqc(struct packet_sock *po,
629 p1->hdrlen = po->tp_hdrlen;
630 p1->version = po->tp_version;
632 po->stats.stats3.tp_freeze_q_cnt = 0;
636 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
644 prb_setup_retire_blk_timer(po);
683 struct packet_sock *po =
684 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
685 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
689 spin_lock(&po->sk.sk_receive_queue.lock);
718 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
719 if (!prb_dispatch_next_block(pkc, po))
751 spin_unlock(&po->sk.sk_receive_queue.lock);
799 struct packet_sock *po, unsigned int stat)
805 struct sock *sk = &po->sk;
807 if (atomic_read(&po->tp_drops))
912 struct packet_sock *po)
915 po->stats.stats3.tp_freeze_q_cnt++;
927 struct packet_sock *po)
938 prb_freeze_queue(pkc, po);
952 struct packet_sock *po, unsigned int status)
972 prb_close_block(pkc, pbd, po, status);
1052 static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1061 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1096 prb_retire_current_block(pkc, po, 0);
1099 curr = (char *)prb_dispatch_next_block(pkc, po);
1113 static void *packet_current_rx_frame(struct packet_sock *po,
1118 switch (po->tp_version) {
1121 curr = packet_lookup_frame(po, &po->rx_ring,
1122 po->rx_ring.head, status);
1125 return __packet_lookup_frame_in_block(po, skb, len);
1133 static void *prb_lookup_block(const struct packet_sock *po,
1157 static void *__prb_previous_block(struct packet_sock *po,
1162 return prb_lookup_block(po, rb, previous, status);
1165 static void *packet_previous_rx_frame(struct packet_sock *po,
1169 if (po->tp_version <= TPACKET_V2)
1170 return packet_previous_frame(po, rb, status);
1172 return __prb_previous_block(po, rb, status);
1175 static void packet_increment_rx_head(struct packet_sock *po,
1178 switch (po->tp_version) {
1190 static void *packet_previous_frame(struct packet_sock *po,
1195 return packet_lookup_frame(po, rb, previous, status);
1228 static int packet_alloc_pending(struct packet_sock *po)
1230 po->rx_ring.pending_refcnt = NULL;
1232 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1233 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1239 static void packet_free_pending(struct packet_sock *po)
1241 free_percpu(po->tx_ring.pending_refcnt);
1249 static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1253 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1254 idx = READ_ONCE(po->rx_ring.head);
1259 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1262 static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1266 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1267 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1272 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1275 static int __packet_rcv_has_room(const struct packet_sock *po,
1278 const struct sock *sk = &po->sk;
1281 if (po->prot_hook.func != tpacket_rcv) {
1294 if (po->tp_version == TPACKET_V3) {
1295 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1297 else if (__tpacket_v3_has_room(po, 0))
1300 if (__tpacket_has_room(po, ROOM_POW_OFF))
1302 else if (__tpacket_has_room(po, 0))
1309 static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1314 ret = __packet_rcv_has_room(po, skb);
1317 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1318 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1323 static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1325 if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1326 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1327 packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1343 static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1345 u32 *history = po->rollover->history;
1398 struct packet_sock *po, *po_next, *po_skip = NULL;
1401 po = pkt_sk(rcu_dereference(f->arr[idx]));
1404 room = packet_rcv_has_room(po, skb);
1406 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1408 po_skip = po;
1411 i = j = min_t(int, po->rollover->sock, num - 1);
1418 po->rollover->sock = i;
1419 atomic_long_inc(&po->rollover->num);
1421 atomic_long_inc(&po->rollover->num_huge);
1429 atomic_long_inc(&po->rollover->num_failed);
1467 struct packet_sock *po;
1509 po = pkt_sk(rcu_dereference(f->arr[idx]));
1510 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1518 static void __fanout_link(struct sock *sk, struct packet_sock *po)
1520 struct packet_fanout *f = po->fanout;
1531 static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1533 struct packet_fanout *f = po->fanout;
1588 static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1595 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1606 __fanout_set_data_bpf(po->fanout, new);
1610 static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1616 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1627 __fanout_set_data_bpf(po->fanout, new);
1631 static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1634 switch (po->fanout->type) {
1636 return fanout_set_data_cbpf(po, data, len);
1638 return fanout_set_data_ebpf(po, data, len);
1686 struct packet_sock *po = pkt_sk(sk);
1714 if (po->fanout)
1775 match->prot_hook.type = po->prot_hook.type;
1776 match->prot_hook.dev = po->prot_hook.dev;
1787 spin_lock(&po->bind_lock);
1788 if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
1790 match->prot_hook.type == po->prot_hook.type &&
1791 match->prot_hook.dev == po->prot_hook.dev) {
1794 __dev_remove_pack(&po->prot_hook);
1797 WRITE_ONCE(po->fanout, match);
1799 po->rollover = rollover;
1802 __fanout_link(sk, po);
1806 spin_unlock(&po->bind_lock);
1826 struct packet_sock *po = pkt_sk(sk);
1830 f = po->fanout;
1832 po->fanout = NULL;
2126 struct packet_sock *po;
2136 po = pkt_sk(sk);
2188 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2211 po->stats.stats1.tp_packets++;
2221 atomic_inc(&po->tp_drops);
2241 struct packet_sock *po;
2268 po = pkt_sk(sk);
2289 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2290 atomic_inc(&po->tp_drops);
2306 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2307 po->tp_reserve;
2310 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2312 po->tp_reserve;
2313 vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2319 atomic_inc(&po->tp_drops);
2322 if (po->tp_version <= TPACKET_V2) {
2323 if (macoff + snaplen > po->rx_ring.frame_size) {
2324 if (po->copy_thresh &&
2338 snaplen = po->rx_ring.frame_size - macoff;
2345 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2348 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2354 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2359 h.raw = packet_current_rx_frame(po, skb,
2364 if (po->tp_version <= TPACKET_V2) {
2365 slot_id = po->rx_ring.head;
2366 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2368 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2375 if (po->tp_version == TPACKET_V3)
2376 prb_clear_blk_fill_status(&po->rx_ring);
2380 if (po->tp_version <= TPACKET_V2) {
2381 packet_increment_rx_head(po, &po->rx_ring);
2388 if (atomic_read(&po->tp_drops))
2392 po->stats.stats1.tp_packets++;
2406 READ_ONCE(po->tp_tstamp) |
2413 switch (po->tp_version) {
2465 if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2473 if (po->tp_version <= TPACKET_V2) {
2485 if (po->tp_version <= TPACKET_V2) {
2487 __packet_set_status(po, h.raw, status);
2488 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2491 } else if (po->tp_version == TPACKET_V3) {
2492 prb_clear_blk_fill_status(&po->rx_ring);
2509 atomic_inc(&po->tp_drops);
2519 struct packet_sock *po = pkt_sk(skb->sk);
2521 if (likely(po->tx_ring.pg_vec)) {
2526 packet_dec_pending(&po->tx_ring);
2528 ts = __packet_set_timestamp(po, ph, skb);
2529 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2531 if (!packet_read_pending(&po->tx_ring))
2532 complete(&po->skb_completion);
2577 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2584 struct socket *sock = po->sk.sk_socket;
2592 skb->priority = READ_ONCE(po->sk.sk_priority);
2593 skb->mark = READ_ONCE(po->sk.sk_mark);
2630 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2657 static int tpacket_parse_header(struct packet_sock *po, void *frame,
2665 switch (po->tp_version) {
2685 if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2688 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2689 off_max = po->tx_ring.frame_size - tp_len;
2690 if (po->sk.sk_type == SOCK_DGRAM) {
2691 switch (po->tp_version) {
2703 switch (po->tp_version) {
2718 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2725 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2736 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2745 mutex_lock(&po->pg_vec_lock);
2750 if (unlikely(!po->tx_ring.pg_vec)) {
2755 dev = packet_cached_dev_get(po);
2756 proto = READ_ONCE(po->num);
2766 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2767 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2782 sockcm_init(&sockc, &po->sk);
2784 err = sock_cmsg_send(&po->sk, msg, &sockc);
2789 if (po->sk.sk_socket->type == SOCK_RAW)
2791 size_max = po->tx_ring.frame_size
2792 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2797 reinit_completion(&po->skb_completion);
2800 ph = packet_current_frame(po, &po->tx_ring,
2804 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2805 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2816 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2836 skb = sock_alloc_send_skb(&po->sk,
2847 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2857 if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2858 __packet_set_status(po, ph,
2860 packet_increment_head(&po->tx_ring);
2879 __packet_set_status(po, ph, TP_STATUS_SENDING);
2880 packet_inc_pending(&po->tx_ring);
2883 err = packet_xmit(po, skb);
2887 if (err && __packet_get_status(po, ph) ==
2899 packet_increment_head(&po->tx_ring);
2908 (need_wait && packet_read_pending(&po->tx_ring))));
2914 __packet_set_status(po, ph, status);
2919 mutex_unlock(&po->pg_vec_lock);
2961 struct packet_sock *po = pkt_sk(sk);
2962 int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2971 dev = packet_cached_dev_get(po);
2972 proto = READ_ONCE(po->num);
3087 err = packet_xmit(po, skb);
3111 struct packet_sock *po = pkt_sk(sk);
3116 if (data_race(po->tx_ring.pg_vec))
3117 return tpacket_snd(po, msg);
3130 struct packet_sock *po;
3139 po = pkt_sk(sk);
3147 spin_lock(&po->bind_lock);
3149 packet_cached_dev_reset(po);
3151 if (po->prot_hook.dev) {
3152 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3153 po->prot_hook.dev = NULL;
3155 spin_unlock(&po->bind_lock);
3160 if (po->rx_ring.pg_vec) {
3165 if (po->tx_ring.pg_vec) {
3175 kfree(po->rollover);
3189 packet_free_pending(po);
3202 struct packet_sock *po = pkt_sk(sk);
3209 spin_lock(&po->bind_lock);
3211 proto = po->num;
3215 if (po->fanout) {
3234 need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
3238 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3243 WRITE_ONCE(po->num, 0);
3251 BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3252 WRITE_ONCE(po->num, proto);
3253 po->prot_hook.type = proto;
3255 netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3258 po->prot_hook.dev = NULL;
3259 WRITE_ONCE(po->ifindex, -1);
3260 packet_cached_dev_reset(po);
3262 netdev_hold(dev, &po->prot_hook.dev_tracker,
3264 po->prot_hook.dev = dev;
3265 WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3266 packet_cached_dev_assign(po, dev);
3284 spin_unlock(&po->bind_lock);
3345 struct packet_sock *po;
3368 po = pkt_sk(sk);
3369 init_completion(&po->skb_completion);
3371 po->num = proto;
3373 err = packet_alloc_pending(po);
3377 packet_cached_dev_reset(po);
3385 spin_lock_init(&po->bind_lock);
3386 mutex_init(&po->pg_vec_lock);
3387 po->rollover = NULL;
3388 po->prot_hook.func = packet_rcv;
3391 po->prot_hook.func = packet_rcv_spkt;
3393 po->prot_hook.af_packet_priv = sk;
3394 po->prot_hook.af_packet_net = sock_net(sk);
3397 po->prot_hook.type = proto;
3593 struct packet_sock *po = pkt_sk(sk);
3600 ifindex = READ_ONCE(po->ifindex);
3603 sll->sll_protocol = READ_ONCE(po->num);
3672 struct packet_sock *po = pkt_sk(sk);
3694 for (ml = po->mclist; ml; ml = ml->next) {
3712 i->next = po->mclist;
3713 po->mclist = i;
3716 po->mclist = i->next;
3753 struct packet_sock *po = pkt_sk(sk);
3756 if (!po->mclist)
3760 while ((ml = po->mclist) != NULL) {
3763 po->mclist = ml->next;
3777 struct packet_sock *po = pkt_sk(sk);
3812 switch (po->tp_version) {
3863 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3866 po->tp_version = val;
3883 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3886 po->tp_reserve = val;
3902 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3905 packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3920 packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3932 packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
3956 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3959 WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
3974 WRITE_ONCE(po->tp_tstamp, val);
3991 if (!READ_ONCE(po->fanout))
3994 return fanout_set_data(po, optval, optlen);
4007 WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4020 if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4021 packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4035 packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4049 struct packet_sock *po = pkt_sk(sk);
4067 memcpy(&st, &po->stats, sizeof(st));
4068 memset(&po->stats, 0, sizeof(po->stats));
4070 drops = atomic_xchg(&po->tp_drops, 0);
4072 if (po->tp_version == TPACKET_V3) {
4086 val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4089 val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4092 val = !!READ_ONCE(po->vnet_hdr_sz);
4095 val = READ_ONCE(po->vnet_hdr_sz);
4098 val = po->tp_version;
4122 val = po->tp_reserve;
4125 val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4128 val = READ_ONCE(po->tp_tstamp);
4131 val = (po->fanout ?
4132 ((u32)po->fanout->id |
4133 ((u32)po->fanout->type << 16) |
4134 ((u32)po->fanout->flags << 24)) :
4138 val = READ_ONCE(po->prot_hook.ignore_outgoing);
4141 if (!po->rollover)
4143 rstats.tp_all = atomic_long_read(&po->rollover->num);
4144 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4145 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4150 val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4153 val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4177 struct packet_sock *po = pkt_sk(sk);
4181 if (po->mclist)
4182 packet_dev_mclist_delete(dev, &po->mclist);
4186 if (dev->ifindex == po->ifindex) {
4187 spin_lock(&po->bind_lock);
4188 if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4195 packet_cached_dev_reset(po);
4196 WRITE_ONCE(po->ifindex, -1);
4197 netdev_put(po->prot_hook.dev,
4198 &po->prot_hook.dev_tracker);
4199 po->prot_hook.dev = NULL;
4201 spin_unlock(&po->bind_lock);
4205 if (dev->ifindex == po->ifindex) {
4206 spin_lock(&po->bind_lock);
4207 if (po->num)
4209 spin_unlock(&po->bind_lock);
4271 struct packet_sock *po = pkt_sk(sk);
4275 if (po->rx_ring.pg_vec) {
4276 if (!packet_previous_rx_frame(po, &po->rx_ring,
4280 packet_rcv_try_clear_pressure(po);
4283 if (po->tx_ring.pg_vec) {
4284 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4393 struct packet_sock *po = pkt_sk(sk);
4403 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4408 if (atomic_long_read(&po->mapped))
4422 switch (po->tp_version) {
4424 po->tp_hdrlen = TPACKET_HDRLEN;
4427 po->tp_hdrlen = TPACKET2_HDRLEN;
4430 po->tp_hdrlen = TPACKET3_HDRLEN;
4439 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4440 if (po->tp_version >= TPACKET_V3 &&
4463 switch (po->tp_version) {
4467 init_prb_bdqc(po, rb, pg_vec, req_u);
4498 spin_lock(&po->bind_lock);
4499 was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4500 num = po->num;
4502 WRITE_ONCE(po->num, 0);
4505 spin_unlock(&po->bind_lock);
4510 mutex_lock(&po->pg_vec_lock);
4511 if (closing || atomic_long_read(&po->mapped) == 0) {
4515 if (po->tp_version <= TPACKET_V2)
4526 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4529 if (atomic_long_read(&po->mapped))
4531 atomic_long_read(&po->mapped));
4533 mutex_unlock(&po->pg_vec_lock);
4535 spin_lock(&po->bind_lock);
4537 WRITE_ONCE(po->num, num);
4540 spin_unlock(&po->bind_lock);
4541 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4544 prb_shutdown_retire_blk_timer(po, rb_queue);
4560 struct packet_sock *po = pkt_sk(sk);
4570 mutex_lock(&po->pg_vec_lock);
4573 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4589 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4609 atomic_long_inc(&po->mapped);
4614 mutex_unlock(&po->pg_vec_lock);
4699 const struct packet_sock *po = pkt_sk(s);
4706 ntohs(READ_ONCE(po->num)),
4707 READ_ONCE(po->ifindex),
4708 packet_sock_flag(po, PACKET_SOCK_RUNNING),