Lines Matching refs:tx

231  * The tx request, once initialized,  is manipulated with calls to
240 * in the tx. Memory locations added with sdma_txadd_page()
242 * to the tx and nmapped as part of the progress processing in the
246 * tx. An example of a use case would be a pre-allocated
253 * a tx to the ring after the appropriate number of
260 * long as the tx isn't in flight.
481 struct sdma_txreq *tx,
489 * @tx: tx request to initialize
520 * being submitted. The callback will be provided this tx, a status, and a flag.
540 struct sdma_txreq *tx,
553 tx->desc_limit = ARRAY_SIZE(tx->descs);
554 tx->descp = &tx->descs[0];
555 INIT_LIST_HEAD(&tx->list);
556 tx->num_desc = 0;
557 tx->flags = flags;
558 tx->complete = cb;
559 tx->coalesce_buf = NULL;
560 tx->wait = NULL;
561 tx->packet_len = tlen;
562 tx->tlen = tx->packet_len;
563 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
564 tx->descs[0].qw[1] = 0;
566 tx->descs[0].qw[1] |=
572 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
578 * @tx: tx request to initialize
602 * The callback, if non-NULL, will be provided this tx and a status. The
608 struct sdma_txreq *tx,
613 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
636 struct sdma_txreq *tx,
644 struct sdma_desc *desc = &tx->descp[tx->num_desc];
646 if (!tx->num_desc) {
667 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
673 static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
675 if (tx->num_desc)
676 __sdma_txclean(dd, tx);
681 struct sdma_txreq *tx)
683 u16 last_desc = tx->num_desc - 1;
685 tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
686 tx->descp[last_desc].qw[1] |= dd->default_desc1;
687 if (tx->flags & SDMA_TXREQ_F_URGENT)
688 tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
695 struct sdma_txreq *tx,
705 tx,
709 WARN_ON(len > tx->tlen);
710 tx->num_desc++;
711 tx->tlen -= len;
713 if (!tx->tlen) {
714 if (tx->packet_len & (sizeof(u32) - 1)) {
715 rval = _pad_sdma_tx_descs(dd, tx);
719 _sdma_close_tx(dd, tx);
728 * @tx: tx request to which the page is added
751 struct sdma_txreq *tx,
762 if ((unlikely(tx->num_desc == tx->desc_limit))) {
763 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
777 __sdma_txclean(dd, tx);
781 return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
788 * @tx: sdma_txreq to which the page is added
803 struct sdma_txreq *tx,
809 if ((unlikely(tx->num_desc == tx->desc_limit))) {
810 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
816 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
823 * @tx: sdma_txreq to which the page is added
838 struct sdma_txreq *tx,
845 if ((unlikely(tx->num_desc == tx->desc_limit))) {
846 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
859 __sdma_txclean(dd, tx);
863 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
871 struct sdma_txreq *tx,
911 * @tx: txreq for which we need to check descriptor availability
922 struct sdma_txreq *tx)
926 if (tx->num_desc > sde->desc_avail)