Lines Matching refs:tx
83 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
84 struct hfi1_vnic_sdma *vnic_sdma = tx->sdma;
87 dev_kfree_skb_any(tx->skb);
88 kmem_cache_free(vnic_sdma->dd->vnic.txreq_cache, tx);
92 struct vnic_txreq *tx)
98 &tx->txreq,
99 tx->skb->data,
100 skb_headlen(tx->skb));
104 for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) {
105 skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i];
109 &tx->txreq,
118 if (tx->plen)
119 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
120 tx->pad + HFI1_VNIC_MAX_PAD - tx->plen,
121 tx->plen);
128 struct vnic_txreq *tx,
135 &tx->txreq,
137 hdrbytes + tx->skb->len + tx->plen,
147 tx->pbc_val = cpu_to_le64(pbc);
150 &tx->txreq,
151 &tx->pbc_val,
157 ret = build_vnic_ulp_payload(sde, tx);
174 struct vnic_txreq *tx;
183 tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC);
184 if (unlikely(!tx)) {
189 tx->sdma = vnic_sdma;
190 tx->skb = skb;
191 hfi1_vnic_update_pad(tx->pad, plen);
192 tx->plen = plen;
193 ret = build_vnic_tx_desc(sde, tx, pbc);
198 &tx->txreq, vnic_sdma->pkts_sent);
210 sdma_txclean(dd, &tx->txreq);
211 kmem_cache_free(dd->vnic.txreq_cache, tx);