Lines Matching refs:txq

135 #define      MVNETA_CPU_TXQ_ACCESS(txq)		 BIT(txq + 8)
773 /* Increment txq get counter */
774 static void mvneta_txq_inc_get(struct mvneta_tx_queue *txq)
776 txq->txq_get_index++;
777 if (txq->txq_get_index == txq->size)
778 txq->txq_get_index = 0;
781 /* Increment txq put counter */
782 static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
784 txq->txq_put_index++;
785 if (txq->txq_put_index == txq->size)
786 txq->txq_put_index = 0;
964 struct mvneta_tx_queue *txq,
969 pend_desc += txq->pending;
974 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
977 txq->pending = 0;
982 mvneta_txq_next_desc_get(struct mvneta_tx_queue *txq)
984 int tx_desc = txq->next_desc_to_proc;
986 txq->next_desc_to_proc = MVNETA_QUEUE_NEXT_DESC(txq, tx_desc);
987 return txq->descs + tx_desc;
993 static void mvneta_txq_desc_put(struct mvneta_tx_queue *txq)
995 if (txq->next_desc_to_proc == 0)
996 txq->next_desc_to_proc = txq->last_desc - 1;
998 txq->next_desc_to_proc--;
1260 struct mvneta_tx_queue *txq = &pp->txqs[queue];
1261 if (txq->descs)
1498 int rxq, txq;
1504 for (txq = 0; txq < txq_number; txq++)
1505 if ((txq % max_cpu) == cpu)
1506 txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
1709 struct mvneta_tx_queue *txq, u32 value)
1713 val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id));
1718 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val);
1735 struct mvneta_tx_queue *txq,
1743 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1748 mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
1753 struct mvneta_tx_queue *txq)
1758 val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id));
1769 struct mvneta_tx_queue *txq)
1774 sent_desc = mvneta_txq_sent_desc_num_get(pp, txq);
1778 mvneta_txq_sent_desc_dec(pp, txq, sent_desc);
1869 struct mvneta_tx_queue *txq, int num,
1881 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_get_index];
1882 struct mvneta_tx_desc *tx_desc = txq->descs +
1883 txq->txq_get_index;
1885 mvneta_txq_inc_get(txq);
1914 struct mvneta_tx_queue *txq)
1916 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
1919 tx_done = mvneta_txq_sent_desc_proc(pp, txq);
1923 mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
1925 txq->count -= tx_done;
1928 if (txq->count <= txq->tx_wake_threshold)
2091 mvneta_xdp_submit_frame(struct mvneta_port *pp, struct mvneta_tx_queue *txq,
2103 if (txq->count + num_frames >= txq->size)
2107 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2117 tx_desc = mvneta_txq_next_desc_get(txq);
2127 mvneta_txq_desc_put(txq);
2151 mvneta_txq_inc_put(txq);
2156 txq->pending += num_frames;
2157 txq->count += num_frames;
2163 mvneta_txq_desc_put(txq);
2164 tx_desc = txq->descs + txq->next_desc_to_proc;
2177 struct mvneta_tx_queue *txq;
2188 txq = &pp->txqs[cpu % txq_number];
2189 nq = netdev_get_tx_queue(pp->dev, txq->id);
2192 ret = mvneta_xdp_submit_frame(pp, txq, xdpf, &nxmit_byte, false);
2200 mvneta_txq_pend_desc_add(pp, txq, 0);
2219 struct mvneta_tx_queue *txq;
2229 txq = &pp->txqs[cpu % txq_number];
2230 nq = netdev_get_tx_queue(pp->dev, txq->id);
2234 ret = mvneta_xdp_submit_frame(pp, txq, frames[i], &nxmit_byte,
2243 mvneta_txq_pend_desc_add(pp, txq, 0);
2671 struct mvneta_tx_queue *txq)
2677 if (txq->tso_hdrs[i]) {
2679 txq->tso_hdrs[i],
2680 txq->tso_hdrs_phys[i]);
2681 txq->tso_hdrs[i] = NULL;
2687 struct mvneta_tx_queue *txq)
2692 num = DIV_ROUND_UP(txq->size, MVNETA_TSO_PER_PAGE);
2694 txq->tso_hdrs[i] = dma_alloc_coherent(dev, MVNETA_TSO_PAGE_SIZE,
2695 &txq->tso_hdrs_phys[i],
2697 if (!txq->tso_hdrs[i]) {
2698 mvneta_free_tso_hdrs(pp, txq);
2706 static char *mvneta_get_tso_hdr(struct mvneta_tx_queue *txq, dma_addr_t *dma)
2710 index = txq->txq_put_index / MVNETA_TSO_PER_PAGE;
2711 offset = (txq->txq_put_index % MVNETA_TSO_PER_PAGE) * TSO_HEADER_SIZE;
2713 *dma = txq->tso_hdrs_phys[index] + offset;
2715 return txq->tso_hdrs[index] + offset;
2718 static void mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq,
2721 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2727 hdr = mvneta_get_tso_hdr(txq, &hdr_phys);
2730 tx_desc = mvneta_txq_next_desc_get(txq);
2738 mvneta_txq_inc_put(txq);
2742 mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
2746 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2749 tx_desc = mvneta_txq_next_desc_get(txq);
2755 mvneta_txq_desc_put(txq);
2771 mvneta_txq_inc_put(txq);
2776 struct mvneta_tx_queue *txq,
2782 if (desc_idx >= txq->size)
2783 desc_idx -= txq->size;
2786 struct mvneta_tx_desc *tx_desc = txq->descs + desc_idx;
2787 struct mvneta_tx_buf *buf = &txq->buf[desc_idx];
2795 mvneta_txq_desc_put(txq);
2798 desc_idx = txq->size;
2804 struct mvneta_tx_queue *txq)
2812 if ((txq->count + tso_count_descs(skb)) >= txq->size)
2820 first_desc = txq->txq_put_index;
2832 mvneta_tso_put_hdr(skb, txq, &tso, data_left, total_len == 0);
2840 if (mvneta_tso_put_data(dev, txq, skb,
2857 mvneta_release_descs(pp, txq, first_desc, desc_count - 1);
2863 struct mvneta_tx_queue *txq)
2867 int first_desc = txq->txq_put_index;
2870 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2874 tx_desc = mvneta_txq_next_desc_get(txq);
2883 mvneta_txq_desc_put(txq);
2897 mvneta_txq_inc_put(txq);
2906 mvneta_release_descs(pp, txq, first_desc, i - 1);
2915 struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
2916 struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
2926 frags = mvneta_tx_tso(skb, dev, txq);
2933 tx_desc = mvneta_txq_next_desc_get(txq);
2944 mvneta_txq_desc_put(txq);
2955 mvneta_txq_inc_put(txq);
2960 mvneta_txq_inc_put(txq);
2963 if (mvneta_tx_frag_process(pp, skb, txq)) {
2968 mvneta_txq_desc_put(txq);
2981 txq->count += frags;
2982 if (txq->count >= txq->tx_stop_threshold)
2986 txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
2987 mvneta_txq_pend_desc_add(pp, txq, frags);
2989 txq->pending += frags;
3006 struct mvneta_tx_queue *txq)
3009 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3010 int tx_done = txq->count;
3012 mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
3014 /* reset txq */
3015 txq->count = 0;
3016 txq->txq_put_index = 0;
3017 txq->txq_get_index = 0;
3025 struct mvneta_tx_queue *txq;
3030 txq = mvneta_tx_done_policy(pp, cause_tx_done);
3032 nq = netdev_get_tx_queue(pp->dev, txq->id);
3035 if (txq->count)
3036 mvneta_txq_done(pp, txq);
3039 cause_tx_done &= ~((1 << txq->id));
3525 struct mvneta_tx_queue *txq)
3529 txq->size = pp->tx_ring_size;
3535 txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
3536 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
3539 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
3540 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3541 &txq->descs_phys, GFP_KERNEL);
3542 if (!txq->descs)
3545 txq->last_desc = txq->size - 1;
3547 txq->buf = kmalloc_array(txq->size, sizeof(*txq->buf), GFP_KERNEL);
3548 if (!txq->buf)
3552 err = mvneta_alloc_tso_hdrs(pp, txq);
3560 cpu = txq->id % num_present_cpus();
3563 cpumask_set_cpu(cpu, &txq->affinity_mask);
3564 netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
3570 struct mvneta_tx_queue *txq)
3573 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
3574 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
3577 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
3578 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
3580 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
3585 struct mvneta_tx_queue *txq)
3589 ret = mvneta_txq_sw_init(pp, txq);
3593 mvneta_txq_hw_init(pp, txq);
3600 struct mvneta_tx_queue *txq)
3602 struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
3604 kfree(txq->buf);
3606 mvneta_free_tso_hdrs(pp, txq);
3607 if (txq->descs)
3609 txq->size * MVNETA_DESC_ALIGNED_SIZE,
3610 txq->descs, txq->descs_phys);
3614 txq->buf = NULL;
3615 txq->descs = NULL;
3616 txq->last_desc = 0;
3617 txq->next_desc_to_proc = 0;
3618 txq->descs_phys = 0;
3622 struct mvneta_tx_queue *txq)
3625 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
3626 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
3629 mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0);
3630 mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
3634 struct mvneta_tx_queue *txq)
3636 mvneta_txq_sw_deinit(pp, txq);
3637 mvneta_txq_hw_deinit(pp, txq);
3686 netdev_err(pp->dev, "%s: can't create txq=%d\n",
4691 struct mvneta_tx_queue *txq = &pp->txqs[queue];
4692 txq->done_pkts_coal = c->tx_max_coalesced_frames;
4693 mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
5221 int rxq, txq, tc, ret;
5269 for (txq = mqprio->qopt.offset[tc];
5270 txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
5271 txq++) {
5272 if (txq >= txq_number)
5275 ret = mvneta_setup_queue_rates(pp, txq,
5356 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5357 txq->id = queue;
5358 txq->size = pp->tx_ring_size;
5359 txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
5795 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5797 mvneta_txq_hw_deinit(pp, txq);
5848 struct mvneta_tx_queue *txq = &pp->txqs[queue];
5850 txq->next_desc_to_proc = 0;
5851 mvneta_txq_hw_init(pp, txq);