Home
last modified time | relevance | path

Searched refs:sendq (Results 1 - 25 of 32) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c2755 spin_lock(&q->sendq.lock); in ctrl_xmit()
2759 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2760 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2772 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2790 spin_lock(&q->sendq.lock); in restart_ctrlq()
2794 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2804 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2814 spin_lock(&q->sendq.lock); in restart_ctrlq()
2822 spin_lock(&q->sendq.lock); in restart_ctrlq()
2828 spin_unlock(&q->sendq in restart_ctrlq()
[all...]
H A Dcxgb4_uld.c415 __skb_queue_purge(&txq->sendq); in free_sge_txq_uld()
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c2761 spin_lock(&q->sendq.lock); in ctrl_xmit()
2765 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2766 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2778 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2796 spin_lock(&q->sendq.lock); in restart_ctrlq()
2800 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2810 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2820 spin_lock(&q->sendq.lock); in restart_ctrlq()
2828 spin_lock(&q->sendq.lock); in restart_ctrlq()
2834 spin_unlock(&q->sendq in restart_ctrlq()
[all...]
/kernel/linux/linux-5.10/drivers/isdn/mISDN/
H A Ddsp_core.c149 * Send data will be writte to sendq. Sendq will be sent if confirm is received.
887 skb_queue_tail(&dsp->sendq, skb); in dsp_function()
934 skb_queue_purge(&dsp->sendq); in dsp_function()
979 skb_queue_purge(&dsp->sendq); in dsp_ctrl()
1015 while ((skb = skb_dequeue(&dsp->sendq))) { in dsp_send_bh()
1066 skb_queue_head_init(&ndsp->sendq); in dspcreate()
H A Dlayer2.h22 struct sk_buff_head sendq; member
H A Ddsp.h186 struct sk_buff_head sendq; member
H A Dtei.c335 struct sk_buff *skb = skb_dequeue(&mgr->sendq); in do_send()
359 skb = skb_dequeue(&mgr->sendq); in do_ack()
376 skb_queue_tail(&mgr->sendq, skb); in mgr_send_down()
399 skb_queue_tail(&mgr->sendq, skb); in dl_unit_data()
1253 skb_queue_purge(&mgr->sendq); in delete_teimanager()
1353 skb_queue_head_init(&mgr->sendq); in create_teimanager()
H A Ddsp_cmx.c1584 skb_queue_tail(&dsp->sendq, nskb);
1600 skb_queue_tail(&dsp->sendq, txskb);
1617 skb_queue_tail(&dsp->sendq, nskb);
1939 skb_queue_tail(&dsp->sendq, nskb);
1955 skb_queue_tail(&member->dsp->sendq, nskb);
/kernel/linux/linux-6.6/drivers/isdn/mISDN/
H A Ddsp_core.c149 * Send data will be writte to sendq. Sendq will be sent if confirm is received.
887 skb_queue_tail(&dsp->sendq, skb); in dsp_function()
934 skb_queue_purge(&dsp->sendq); in dsp_function()
978 skb_queue_purge(&dsp->sendq); in dsp_ctrl()
1014 while ((skb = skb_dequeue(&dsp->sendq))) { in dsp_send_bh()
1065 skb_queue_head_init(&ndsp->sendq); in dspcreate()
H A Dlayer2.h22 struct sk_buff_head sendq; member
H A Ddsp.h186 struct sk_buff_head sendq; member
H A Dtei.c335 struct sk_buff *skb = skb_dequeue(&mgr->sendq); in do_send()
359 skb = skb_dequeue(&mgr->sendq); in do_ack()
376 skb_queue_tail(&mgr->sendq, skb); in mgr_send_down()
399 skb_queue_tail(&mgr->sendq, skb); in dl_unit_data()
1253 skb_queue_purge(&mgr->sendq); in delete_teimanager()
1353 skb_queue_head_init(&mgr->sendq); in create_teimanager()
H A Ddsp_cmx.c1573 skb_queue_tail(&dsp->sendq, nskb);
1589 skb_queue_tail(&dsp->sendq, txskb);
1606 skb_queue_tail(&dsp->sendq, nskb);
1928 skb_queue_tail(&dsp->sendq, nskb);
1944 skb_queue_tail(&member->dsp->sendq, nskb);
/kernel/linux/linux-5.10/drivers/rpmsg/
H A Dvirtio_rpmsg_bus.c49 * @sendq: wait queue of sending contexts waiting for a tx buffers
68 wait_queue_head_t sendq; member
608 err = wait_event_interruptible_timeout(vrp->sendq, in rpmsg_send_offchannel_raw()
822 wake_up_interruptible(&vrp->sendq); in rpmsg_xmit_done()
901 init_waitqueue_head(&vrp->sendq); in rpmsg_probe()
/kernel/linux/linux-6.6/drivers/rpmsg/
H A Dvirtio_rpmsg_bus.c49 * @sendq: wait queue of sending contexts waiting for a tx buffers
67 wait_queue_head_t sendq; member
600 err = wait_event_interruptible_timeout(vrp->sendq, in rpmsg_send_offchannel_raw()
823 wake_up_interruptible(&vrp->sendq); in rpmsg_xmit_done()
891 init_waitqueue_head(&vrp->sendq); in rpmsg_probe()
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/
H A Dsiw.h435 struct siw_sqe *sendq; /* send queue element array */ member
627 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty()
634 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next()
H A Dsiw_verbs.c381 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); in siw_create_qp()
383 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe)); in siw_create_qp()
385 if (qp->sendq == NULL) { in siw_create_qp()
441 if (qp->sendq) { in siw_create_qp()
444 siw_mmap_entry_insert(uctx, qp->sendq, in siw_create_qp()
492 vfree(qp->sendq); in siw_create_qp()
815 struct siw_sqe *sqe = &qp->sendq[idx]; in siw_post_send()
H A Dsiw_qp.c1234 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_flush()
1339 vfree(qp->sendq); in siw_free_qp()
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/
H A Dsiw.h437 struct siw_sqe *sendq; /* send queue element array */ member
630 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_empty()
637 struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in sq_get_next()
H A Dsiw_verbs.c382 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe)); in siw_create_qp()
384 qp->sendq = vcalloc(num_sqe, sizeof(struct siw_sqe)); in siw_create_qp()
386 if (qp->sendq == NULL) { in siw_create_qp()
442 if (qp->sendq) { in siw_create_qp()
445 siw_mmap_entry_insert(uctx, qp->sendq, in siw_create_qp()
493 vfree(qp->sendq); in siw_create_qp()
819 struct siw_sqe *sqe = &qp->sendq[idx]; in siw_post_send()
H A Dsiw_qp.c1237 sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; in siw_sq_flush()
1342 vfree(qp->sendq); in siw_free_qp()
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb3/
H A Dadapter.h180 struct sk_buff_head sendq; /* List of backpressured offload packets */ member
H A Dsge.c708 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
1427 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1428 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1535 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1546 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1755 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1783 __skb_unlink(skb, &q->sendq); in restart_offloadq()
3085 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb3/
H A Dadapter.h180 struct sk_buff_head sendq; /* List of backpressured offload packets */ member
H A Dsge.c704 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
1422 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1423 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1531 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1542 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1753 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1781 __skb_unlink(skb, &q->sendq); in restart_offloadq()
3083 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()

Completed in 58 milliseconds

12