/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | vnic_sdma.c | 63 * @txreq: sdma transmit request 71 struct sdma_txreq txreq; member 80 static void vnic_sdma_complete(struct sdma_txreq *txreq, in vnic_sdma_complete() argument 83 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() 86 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 98 &tx->txreq, in build_vnic_ulp_payload() 109 &tx->txreq, in build_vnic_ulp_payload() 119 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_vnic_ulp_payload() 135 &tx->txreq, in build_vnic_tx_desc() 228 hfi1_vnic_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) hfi1_vnic_sdma_sleep() argument [all...] |
H A D | ipoib_tx.c | 27 * @txreq: sdma transmit request 35 struct sdma_txreq txreq; member 112 * The size of the txreq ring is fixed at initialization. in hfi1_ipoib_check_queue_stopped() 142 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_free_tx() 210 /* Finish storing txreq before incrementing head. */ in hfi1_ipoib_add_tx() 226 static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status) in hfi1_ipoib_sdma_complete() argument 228 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() 239 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload() local 270 struct sdma_txreq *txreq = &tx->txreq; hfi1_ipoib_build_tx_desc() local 627 hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) hfi1_ipoib_sdma_sleep() argument 798 struct sdma_txreq *txreq; hfi1_ipoib_drain_tx_list() local [all...] |
H A D | verbs_txreq.h | 60 struct sdma_txreq txreq; member 95 tx->txreq.num_desc = 0; 98 tx->txreq.flags = 0; 104 return &tx->txreq; in get_sdma_txreq() 113 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
|
H A D | user_sdma.c | 80 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 100 struct sdma_txreq *txreq, 124 struct sdma_txreq *txreq, in defer_packet_queue() 133 if (sdma_progress(sde, seq, txreq)) in defer_packet_queue() 205 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, in hfi1_user_sdma_alloc_queues() 725 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg() 730 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg() 732 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg() 845 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() 871 list_add_tail(&tx->txreq in user_sdma_send_pkts() 121 defer_packet_queue( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) defer_packet_queue() argument 1226 user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) user_sdma_txreq_cb() argument [all...] |
H A D | verbs.c | 148 /* Length of buffer to create verbs txreq cache name */ 632 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 679 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem() 713 &tx->txreq, in build_verbs_ulp_payload() 782 &tx->txreq, in build_verbs_tx_desc() 796 &tx->txreq, in build_verbs_tx_desc() 803 &tx->txreq, in build_verbs_tx_desc() 823 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys, in build_verbs_tx_desc() 865 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma() 894 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, p in hfi1_verbs_send_dma() [all...] |
H A D | user_sdma.h | 225 * A single txreq could span up to 3 physical pages when the MTU 231 /* Packet header for the txreq */ 233 struct sdma_txreq txreq; member
|
H A D | verbs_txreq.c | 69 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
|
H A D | qp.c | 160 container_of(tx, struct verbs_txreq, txreq)); in flush_list_head() 484 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | vnic_sdma.c | 21 * @txreq: sdma transmit request 29 struct sdma_txreq txreq; member 38 static void vnic_sdma_complete(struct sdma_txreq *txreq, in vnic_sdma_complete() argument 41 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() 44 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 56 &tx->txreq, in build_vnic_ulp_payload() 67 &tx->txreq, in build_vnic_ulp_payload() 77 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_vnic_ulp_payload() 93 &tx->txreq, in build_vnic_tx_desc() 186 hfi1_vnic_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) hfi1_vnic_sdma_sleep() argument [all...] |
H A D | ipoib_tx.c | 100 * The size of the txreq ring is fixed at initialization. in hfi1_ipoib_check_queue_stopped() 131 sdma_txclean(priv->dd, &tx->txreq); in hfi1_ipoib_free_tx() 145 sdma_txclean(txq->priv->dd, &tx->txreq); in hfi1_ipoib_drain_tx_ring() 188 static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status) in hfi1_ipoib_sdma_complete() argument 190 struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); in hfi1_ipoib_sdma_complete() 203 struct sdma_txreq *txreq = &tx->txreq; in hfi1_ipoib_build_ulp_payload() local 209 ret = sdma_txadd_kvaddr(dd, txreq, skb->data, skb_headlen(skb)); in hfi1_ipoib_build_ulp_payload() 218 txreq, in hfi1_ipoib_build_ulp_payload() 234 struct sdma_txreq *txreq = &tx->txreq; hfi1_ipoib_build_tx_desc() local 613 hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) hfi1_ipoib_sdma_sleep() argument 772 struct sdma_txreq *txreq; hfi1_ipoib_drain_tx_list() local [all...] |
H A D | verbs_txreq.h | 18 struct sdma_txreq txreq; member 53 tx->txreq.num_desc = 0; 56 tx->txreq.flags = 0; 66 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
|
H A D | user_sdma.c | 39 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 59 struct sdma_txreq *txreq, 67 struct sdma_txreq *txreq, in defer_packet_queue() 76 if (sdma_progress(sde, seq, txreq)) in defer_packet_queue() 148 snprintf(buf, 64, "txreq-kmem-cache-%u-%u-%u", dd->unit, uctxt->ctxt, in hfi1_user_sdma_alloc_queues() 664 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg() 669 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg() 671 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg() 784 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() 810 list_add_tail(&tx->txreq in user_sdma_send_pkts() 64 defer_packet_queue( struct sdma_engine *sde, struct iowait_work *wait, struct sdma_txreq *txreq, uint seq, bool pkts_sent) defer_packet_queue() argument 1154 user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) user_sdma_txreq_cb() argument [all...] |
H A D | verbs.c | 106 /* Length of buffer to create verbs txreq cache name */ 590 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 637 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem() 671 &tx->txreq, in build_verbs_ulp_payload() 740 &tx->txreq, in build_verbs_tx_desc() 754 &tx->txreq, in build_verbs_tx_desc() 761 &tx->txreq, in build_verbs_tx_desc() 781 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, sde->dd->sdma_pad_phys, in build_verbs_tx_desc() 823 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma() 852 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, p in hfi1_verbs_send_dma() [all...] |
H A D | user_sdma.h | 179 * A single txreq could span up to 3 physical pages when the MTU 185 /* Packet header for the txreq */ 187 struct sdma_txreq txreq; member
|
H A D | ipoib.h | 48 * @txreq: sdma transmit request 57 struct sdma_txreq txreq; member 70 * @shift: log2 of size for getting txreq
|
H A D | verbs_txreq.c | 27 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
|
H A D | qp.c | 118 container_of(tx, struct verbs_txreq, txreq)); in flush_list_head() 443 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|
H A D | pin_system.c | 325 ret = sdma_txadd_page(pq->dd, &tx->txreq, in add_mapping_to_sdma_packet() 393 * Add up to pkt_data_remaining bytes to the txreq, starting at the current
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_sdma.c | 496 tx->txreq.start_idx = 0; in complete_sdma_err_req() 497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send() 544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send() 547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 603 if (tx->txreq in qib_sdma_verbs_send() [all...] |
H A D | qib_verbs.c | 575 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq() 604 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq() 627 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq() 628 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 630 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 638 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 686 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 688 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 714 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete() 724 if (tx->txreq in sdma_complete() [all...] |
H A D | qib.h | 248 struct qib_sdma_txreq txreq; member
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_sdma.c | 496 tx->txreq.start_idx = 0; in complete_sdma_err_req() 497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send() 544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send() 547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 603 if (tx->txreq in qib_sdma_verbs_send() [all...] |
H A D | qib_verbs.c | 575 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq() 604 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq() 627 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq() 628 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 630 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 638 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 686 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 688 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 714 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete() 724 if (tx->txreq in sdma_complete() [all...] |
/kernel/linux/linux-5.10/drivers/net/xen-netback/ |
H A D | netback.c | 924 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 951 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops() 954 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops() 955 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 958 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 965 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 980 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 993 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 998 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? in xenvif_tx_build_gops() 999 XEN_NETBACK_TX_COPY_LEN : txreq in xenvif_tx_build_gops() [all...] |
/kernel/linux/linux-6.6/drivers/net/xen-netback/ |
H A D | netback.c | 924 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 949 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops() 952 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops() 953 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 956 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 963 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 978 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 991 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 996 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ? in xenvif_tx_build_gops() 997 XEN_NETBACK_TX_COPY_LEN : txreq in xenvif_tx_build_gops() [all...] |