Lines Matching refs:psock

55 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
58 struct sock *csk = psock->sk;
59 struct kcm_mux *mux = psock->mux;
65 if (psock->tx_stopped) {
70 psock->tx_stopped = 1;
71 KCM_STATS_INCR(psock->stats.tx_aborts);
73 if (!psock->tx_kcm) {
75 list_del(&psock->psock_avail_list);
77 /* In this case psock is being aborted while outside of
78 * write_msgs and psock is reserved. Schedule tx_work
84 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
95 struct kcm_psock *psock)
98 psock->strp.stats.bytes -
99 psock->saved_rx_bytes);
101 psock->strp.stats.msgs - psock->saved_rx_msgs;
102 psock->saved_rx_msgs = psock->strp.stats.msgs;
103 psock->saved_rx_bytes = psock->strp.stats.bytes;
107 struct kcm_psock *psock)
110 psock->stats.tx_bytes - psock->saved_tx_bytes);
112 psock->stats.tx_msgs - psock->saved_tx_msgs;
113 psock->saved_tx_msgs = psock->stats.tx_msgs;
114 psock->saved_tx_bytes = psock->stats.tx_bytes;
121 * pending ready messages on a psock. RX mux lock held.
126 struct kcm_psock *psock;
142 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
145 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
151 /* Consumed the ready message on the psock. Schedule rx_work to
154 list_del(&psock->psock_ready_list);
155 psock->ready_rx_msg = NULL;
159 strp_unpause(&psock->strp);
160 strp_check_rcv(&psock->strp);
254 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
257 struct kcm_mux *mux = psock->mux;
260 WARN_ON(psock->ready_rx_msg);
262 if (psock->rx_kcm)
263 return psock->rx_kcm;
267 if (psock->rx_kcm) {
269 return psock->rx_kcm;
272 kcm_update_rx_mux_stats(mux, psock);
275 psock->ready_rx_msg = head;
276 strp_pause(&psock->strp);
277 list_add_tail(&psock->psock_ready_list,
289 psock->rx_kcm = kcm;
291 WRITE_ONCE(kcm->rx_psock, psock);
306 static void unreserve_rx_kcm(struct kcm_psock *psock,
309 struct kcm_sock *kcm = psock->rx_kcm;
310 struct kcm_mux *mux = psock->mux;
317 psock->rx_kcm = NULL;
351 struct kcm_psock *psock;
357 psock = (struct kcm_psock *)sk->sk_user_data;
358 if (likely(psock))
359 strp_data_ready(&psock->strp);
367 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
371 kcm = reserve_rx_kcm(psock, skb);
373 /* Unable to reserve a KCM, message is held in psock and strp
381 unreserve_rx_kcm(psock, false);
388 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
389 struct bpf_prog *prog = psock->bpf_prog;
398 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
400 unreserve_rx_kcm(psock, true);
417 struct kcm_psock *psock;
423 psock = (struct kcm_psock *)sk->sk_user_data;
424 if (unlikely(!psock))
426 mux = psock->mux;
431 kcm = psock->tx_kcm;
446 struct kcm_psock *psock;
448 psock = kcm->tx_psock;
452 if (psock) {
454 if (unlikely(psock->tx_stopped))
462 /* Check again under lock to see if psock was reserved for this
463 * psock via psock_unreserve.
465 psock = kcm->tx_psock;
466 if (unlikely(psock)) {
473 psock = list_first_entry(&mux->psocks_avail,
476 list_del(&psock->psock_avail_list);
481 kcm->tx_psock = psock;
482 psock->tx_kcm = kcm;
483 KCM_STATS_INCR(psock->stats.reserved);
492 return psock;
496 static void psock_now_avail(struct kcm_psock *psock)
498 struct kcm_mux *mux = psock->mux;
502 list_add_tail(&psock->psock_avail_list,
510 psock->tx_kcm = kcm;
517 kcm->tx_psock = psock;
518 KCM_STATS_INCR(psock->stats.reserved);
526 struct kcm_psock *psock;
531 psock = kcm->tx_psock;
533 if (WARN_ON(!psock)) {
540 kcm_update_tx_mux_stats(mux, psock);
545 psock->tx_kcm = NULL;
546 KCM_STATS_INCR(psock->stats.unreserved);
548 if (unlikely(psock->tx_stopped)) {
549 if (psock->done) {
551 list_del(&psock->psock_list);
553 sock_put(psock->sk);
554 fput(psock->sk->sk_socket->file);
555 kmem_cache_free(kcm_psockp, psock);
565 psock_now_avail(psock);
586 struct kcm_psock *psock;
591 psock = kcm->tx_psock;
592 if (unlikely(psock && psock->tx_stopped)) {
593 /* A reserved psock was aborted asynchronously. Unreserve
615 psock = reserve_psock(kcm);
616 if (!psock)
623 if (WARN_ON(!psock)) {
645 ret = sock_sendmsg(psock->sk->sk_socket, &msg);
657 * psock since it has lost framing
661 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
664 psock = NULL;
674 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
694 KCM_STATS_INCR(psock->stats.tx_msgs);
700 if (psock)
1079 /* If a psock is reserved we'll do cleanup in unreserve */
1218 struct kcm_psock *psock = NULL, *tpsock;
1247 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1248 if (!psock) {
1253 psock->mux = mux;
1254 psock->sk = csk;
1255 psock->bpf_prog = prog;
1264 kmem_cache_free(kcm_psockp, psock);
1269 err = strp_init(&psock->strp, csk, &cb);
1272 kmem_cache_free(kcm_psockp, psock);
1276 psock->save_data_ready = csk->sk_data_ready;
1277 psock->save_write_space = csk->sk_write_space;
1278 psock->save_state_change = csk->sk_state_change;
1279 csk->sk_user_data = psock;
1288 /* Finished initialization, now add the psock to the MUX. */
1298 list_add(&psock->psock_list, head);
1299 psock->index = index;
1303 psock_now_avail(psock);
1307 strp_check_rcv(&psock->strp);
1345 static void kcm_unattach(struct kcm_psock *psock)
1347 struct sock *csk = psock->sk;
1348 struct kcm_mux *mux = psock->mux;
1353 * be no way to reserve a kcm for this psock.
1357 csk->sk_data_ready = psock->save_data_ready;
1358 csk->sk_write_space = psock->save_write_space;
1359 csk->sk_state_change = psock->save_state_change;
1360 strp_stop(&psock->strp);
1362 if (WARN_ON(psock->rx_kcm)) {
1370 /* Stop receiver activities. After this point psock should not be
1373 if (psock->ready_rx_msg) {
1374 list_del(&psock->psock_ready_list);
1375 kfree_skb(psock->ready_rx_msg);
1376 psock->ready_rx_msg = NULL;
1386 strp_done(&psock->strp);
1389 bpf_prog_put(psock->bpf_prog);
1393 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1394 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1398 if (psock->tx_kcm) {
1399 /* psock was reserved. Just mark it finished and we will clean
1410 kcm_abort_tx_psock(psock, EPIPE, false);
1413 if (!psock->tx_kcm) {
1414 /* psock now unreserved in window mux was unlocked */
1417 psock->done = 1;
1422 /* Queue tx work to make sure psock->done is handled */
1423 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1427 if (!psock->tx_stopped)
1428 list_del(&psock->psock_avail_list);
1429 list_del(&psock->psock_list);
1435 kmem_cache_free(kcm_psockp, psock);
1445 struct kcm_psock *psock;
1464 list_for_each_entry(psock, &mux->psocks, psock_list) {
1465 if (psock->sk != csk)
1468 /* Found the matching psock */
1470 if (psock->unattaching || WARN_ON(psock->done)) {
1475 psock->unattaching = 1;
1480 kcm_unattach(psock);
1593 struct kcm_psock *psock, *tmp_psock;
1596 list_for_each_entry_safe(psock, tmp_psock,
1598 if (!WARN_ON(psock->unattaching))
1599 kcm_unattach(psock);
1676 struct kcm_psock *psock;
1694 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1696 * from the callback (unbinding the psock occurs after canceling work.
1705 * that a psock will be assigned to this kcm.
1718 psock = kcm->tx_psock;
1719 if (psock) {
1720 /* A psock was reserved, so we need to kill it since it
1724 kcm_abort_tx_psock(psock, EPIPE, false);