Lines Matching refs:tx

190  * The tx request, once initialized,  is manipulated with calls to
199 * in the tx. Memory locations added with sdma_txadd_page()
201 * to the tx and nmapped as part of the progress processing in the
205 * tx. An example of a use case would be a pre-allocated
212 * a tx to the ring after the appropriate number of
219 * long as the tx isn't in flight.
440 struct sdma_txreq *tx,
448 * @tx: tx request to initialize
479 * being submitted. The callback will be provided this tx, a status, and a flag.
499 struct sdma_txreq *tx,
512 tx->desc_limit = ARRAY_SIZE(tx->descs);
513 tx->descp = &tx->descs[0];
514 INIT_LIST_HEAD(&tx->list);
515 tx->num_desc = 0;
516 tx->flags = flags;
517 tx->complete = cb;
518 tx->coalesce_buf = NULL;
519 tx->wait = NULL;
520 tx->packet_len = tlen;
521 tx->tlen = tx->packet_len;
522 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG;
523 tx->descs[0].qw[1] = 0;
525 tx->descs[0].qw[1] |=
531 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen);
537 * @tx: tx request to initialize
561 * The callback, if non-NULL, will be provided this tx and a status. The
567 struct sdma_txreq *tx,
572 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb);
595 struct sdma_txreq *tx,
603 struct sdma_desc *desc = &tx->descp[tx->num_desc];
605 if (!tx->num_desc) {
626 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
632 static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
634 if (tx->num_desc)
635 __sdma_txclean(dd, tx);
640 struct sdma_txreq *tx)
642 u16 last_desc = tx->num_desc - 1;
644 tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG;
645 tx->descp[last_desc].qw[1] |= dd->default_desc1;
646 if (tx->flags & SDMA_TXREQ_F_URGENT)
647 tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG |
654 struct sdma_txreq *tx,
664 tx,
668 WARN_ON(len > tx->tlen);
669 tx->num_desc++;
670 tx->tlen -= len;
672 if (!tx->tlen) {
673 if (tx->packet_len & (sizeof(u32) - 1)) {
674 rval = _pad_sdma_tx_descs(dd, tx);
678 _sdma_close_tx(dd, tx);
687 * @tx: tx request to which the page is added
710 struct sdma_txreq *tx,
721 if ((unlikely(tx->num_desc == tx->desc_limit))) {
722 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE,
736 __sdma_txclean(dd, tx);
740 return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len,
747 * @tx: sdma_txreq to which the page is added
762 struct sdma_txreq *tx,
768 if ((unlikely(tx->num_desc == tx->desc_limit))) {
769 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE,
775 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len,
782 * @tx: sdma_txreq to which the page is added
797 struct sdma_txreq *tx,
804 if ((unlikely(tx->num_desc == tx->desc_limit))) {
805 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE,
818 __sdma_txclean(dd, tx);
822 return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len,
830 struct sdma_txreq *tx,
870 * @tx: txreq for which we need to check descriptor availability
881 struct sdma_txreq *tx)
885 if (tx->num_desc > sde->desc_avail)