Lines Matching refs:tq

103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
105 return tq->stopped;
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
112 tq->stopped = false;
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
120 tq->stopped = false;
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
128 tq->stopped = true;
129 tq->num_stop++;
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
196 "%s: tq[%d] error 0x%x\n",
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
339 skb = tq->buf_info[eop_idx].skb;
341 tq->buf_info[eop_idx].skb = NULL;
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
345 while (tq->tx_ring.next2comp != eop_idx) {
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
378 &gdesc->tcd), tq, adapter->pdev,
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
386 spin_lock(&tq->tx_lock);
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
391 vmxnet3_tq_wake(tq, adapter);
393 spin_unlock(&tq->tx_lock);
400 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
408 tbi = tq->buf_info + tq->tx_ring.next2comp;
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
419 for (i = 0; i < tq->tx_ring.size; i++) {
420 BUG_ON(tq->buf_info[i].skb != NULL ||
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424 tq->tx_ring.gen = VMXNET3_INIT_GEN;
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
427 tq->comp_ring.gen = VMXNET3_INIT_GEN;
428 tq->comp_ring.next2proc = 0;
433 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
436 if (tq->tx_ring.base) {
437 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
439 tq->tx_ring.base, tq->tx_ring.basePA);
440 tq->tx_ring.base = NULL;
442 if (tq->data_ring.base) {
444 tq->data_ring.size * tq->txdata_desc_size,
445 tq->data_ring.base, tq->data_ring.basePA);
446 tq->data_ring.base = NULL;
448 if (tq->comp_ring.base) {
449 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
451 tq->comp_ring.base, tq->comp_ring.basePA);
452 tq->comp_ring.base = NULL;
454 if (tq->buf_info) {
456 tq->tx_ring.size * sizeof(tq->buf_info[0]),
457 tq->buf_info, tq->buf_info_pa);
458 tq->buf_info = NULL;
475 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
481 memset(tq->tx_ring.base, 0, tq->tx_ring.size *
483 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
484 tq->tx_ring.gen = VMXNET3_INIT_GEN;
486 memset(tq->data_ring.base, 0,
487 tq->data_ring.size * tq->txdata_desc_size);
490 memset(tq->comp_ring.base, 0, tq->comp_ring.size *
492 tq->comp_ring.next2proc = 0;
493 tq->comp_ring.gen = VMXNET3_INIT_GEN;
496 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
497 for (i = 0; i < tq->tx_ring.size; i++)
498 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
505 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
510 BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
511 tq->comp_ring.base || tq->buf_info);
513 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
514 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
515 &tq->tx_ring.basePA, GFP_KERNEL);
516 if (!tq->tx_ring.base) {
521 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
522 tq->data_ring.size * tq->txdata_desc_size,
523 &tq->data_ring.basePA, GFP_KERNEL);
524 if (!tq->data_ring.base) {
529 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
530 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
531 &tq->comp_ring.basePA, GFP_KERNEL);
532 if (!tq->comp_ring.base) {
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
538 tq->buf_info = dma_alloc_coherent(&adapter->pdev->dev, sz,
539 &tq->buf_info_pa, GFP_KERNEL);
540 if (!tq->buf_info)
546 vmxnet3_tq_destroy(tq, adapter);
677 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
689 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
691 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
696 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
697 tq->tx_ring.next2fill *
698 tq->txdata_desc_size);
702 tbi = tq->buf_info + tq->tx_ring.next2fill;
707 tq->tx_ring.next2fill,
710 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
713 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
730 tbi = tq->buf_info + tq->tx_ring.next2fill;
740 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
741 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
749 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
751 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
752 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
765 tbi = tq->buf_info + tq->tx_ring.next2fill;
782 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
783 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
791 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
793 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
794 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
805 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
840 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
909 tq->txdata_desc_size,
921 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) {
922 tq->stats.oversized_hdr++;
943 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
949 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base +
950 tq->tx_ring.next2fill *
951 tq->txdata_desc_size);
956 ctx->copy_size, tq->tx_ring.next2fill);
1010 * Transmits a pkt thru a given tq
1018 * 2. tq stats may be updated accordingly
1023 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1048 tq->stats.drop_tso++;
1051 tq->stats.copy_skb_header++;
1065 tq->stats.drop_too_many_frags++;
1068 tq->stats.linearized++;
1075 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1082 tq->stats.drop_oversized_hdr++;
1090 tq->stats.drop_oversized_hdr++;
1096 tq->stats.drop_hdr_inspect_err++;
1100 spin_lock_irqsave(&tq->tx_lock, flags);
1102 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1103 tq->stats.tx_ring_full++;
1107 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1109 vmxnet3_tq_stop(tq, adapter);
1110 spin_unlock_irqrestore(&tq->tx_lock, flags);
1115 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1118 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1132 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1167 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1194 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1197 spin_unlock_irqrestore(&tq->tx_lock, flags);
1199 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1200 tq->shared->txNumDeferred = 0;
1202 VMXNET3_REG_TXPROD + tq->qid * 8,
1203 tq->tx_ring.next2fill);
1209 spin_unlock_irqrestore(&tq->tx_lock, flags);
1211 tq->stats.drop_total++;
2003 struct vmxnet3_tx_queue *tq =
2005 vmxnet3_tq_tx_complete(tq, adapter);
2028 struct vmxnet3_tx_queue *tq = data;
2029 struct vmxnet3_adapter *adapter = tq->adapter;
2032 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2042 vmxnet3_tq_tx_complete(tq, adapter);
2044 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2540 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2543 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
2544 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2545 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2546 tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
2547 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
2548 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
2549 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size);
2550 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
2554 tqc->intrIdx = tq->comp_ring.intr_idx;
2957 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2958 tq->tx_ring.size = tx_ring_size;
2959 tq->data_ring.size = tx_ring_size;
2960 tq->comp_ring.size = tx_ring_size;
2961 tq->txdata_desc_size = txdata_desc_size;
2962 tq->shared = &adapter->tqd_start[i].ctrl;
2963 tq->stopped = true;
2964 tq->adapter = adapter;
2965 tq->qid = i;
2966 err = vmxnet3_tq_create(tq, adapter);