Lines Matching refs:tq
229 * @tq: the TX queue
233 static inline unsigned int txq_avail(const struct sge_txq *tq)
235 return tq->size - 1 - tq->in_use;
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq)
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
333 } else if ((u8 *)p == (u8 *)tq->stat) {
334 p = (const struct ulptx_sge_pair *)tq->desc;
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337 const __be64 *addr = (const __be64 *)tq->desc;
345 const __be64 *addr = (const __be64 *)tq->desc;
357 if ((u8 *)p == (u8 *)tq->stat)
358 p = (const struct ulptx_sge_pair *)tq->desc;
359 addr = ((u8 *)p + 16 <= (u8 *)tq->stat
361 : *(const __be64 *)tq->desc);
370 * @tq: the TX queue to reclaim descriptors from
377 static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
381 unsigned int cidx = tq->cidx;
386 sdesc = &tq->sdesc[cidx];
394 unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
400 if (++cidx == tq->size) {
402 sdesc = tq->sdesc;
405 tq->cidx = cidx;
411 static inline int reclaimable(const struct sge_txq *tq)
413 int hw_cidx = be16_to_cpu(tq->stat->cidx);
414 int reclaimable = hw_cidx - tq->cidx;
416 reclaimable += tq->size;
423 * @tq: the TX queue to reclaim completed descriptors from
431 struct sge_txq *tq,
434 int avail = reclaimable(tq);
444 free_tx_desc(adapter, tq, avail, unmap);
445 tq->in_use -= avail;
887 * @tq: the TX queue we are writing into
899 * wrap around, i.e., @end > @tq->stat.
901 static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
930 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
943 if (unlikely((u8 *)end > (u8 *)tq->stat)) {
944 unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
948 part1 = (u8 *)end - (u8 *)tq->stat;
949 memcpy(tq->desc, (u8 *)buf + part0, part1);
950 end = (void *)tq->desc + part1;
959 * @tq: the TX queue
964 static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
975 if (unlikely(tq->bar2_addr == NULL)) {
979 QID_V(tq->cntxt_id) | val);
995 if (n == 1 && tq->bar2_qid == 0) {
996 unsigned int index = (tq->pidx
997 ? (tq->pidx - 1)
998 : (tq->size - 1));
999 __be64 *src = (__be64 *)&tq->desc[index];
1000 __be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1024 writel(val | QID_V(tq->bar2_qid),
1025 tq->bar2_addr + SGE_UDB_KDOORBELL);
1044 * @tq: the TX queue where the packet will be inlined
1052 static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1056 int left = (void *)tq->stat - pos;
1066 skb_copy_bits(skb, left, tq->desc, skb->len - left);
1067 pos = (void *)tq->desc + (skb->len - left);
1142 static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1144 tq->in_use += n;
1145 tq->pidx += n;
1146 if (tq->pidx >= tq->size)
1147 tq->pidx -= tq->size;
1417 struct sge_txq *tq = &txq->q;
1427 if (unlikely((void *)sgl == (void *)tq->stat)) {
1428 sgl = (void *)tq->desc;
1429 end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1432 write_sgl(skb, tq, sgl, end, 0, addr);
1435 last_desc = tq->pidx + ndesc - 1;
1436 if (last_desc >= tq->size)
1437 last_desc -= tq->size;
1438 tq->sdesc[last_desc].skb = skb;
1439 tq->sdesc[last_desc].sgl = sgl;
2511 static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2516 tq->size * sizeof(*tq->desc) + s->stat_len,
2517 tq->desc, tq->phys_addr);
2518 tq->cntxt_id = 0;
2519 tq->sdesc = NULL;
2520 tq->desc = NULL;