Lines Matching refs:txq

178 #define IS_TSO_HEADER(txq, addr) \
179 ((addr >= txq->tso_hdrs_dma) && \
180 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
407 struct tx_queue txq[8];
439 /* rxq/txq helper functions *************************************************/
445 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
447 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
466 static void txq_reset_hw_ptr(struct tx_queue *txq)
468 struct mv643xx_eth_private *mp = txq_to_mp(txq);
471 addr = (u32)txq->tx_desc_dma;
472 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
473 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
476 static void txq_enable(struct tx_queue *txq)
478 struct mv643xx_eth_private *mp = txq_to_mp(txq);
479 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
482 static void txq_disable(struct tx_queue *txq)
484 struct mv643xx_eth_private *mp = txq_to_mp(txq);
485 u8 mask = 1 << txq->index;
492 static void txq_maybe_wake(struct tx_queue *txq)
494 struct mv643xx_eth_private *mp = txq_to_mp(txq);
495 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
499 if (txq->tx_desc_count <= txq->tx_wake_threshold)
725 txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
733 tx_index = txq->tx_curr_desc++;
734 if (txq->tx_curr_desc == txq->tx_ring_size)
735 txq->tx_curr_desc = 0;
736 desc = &txq->tx_desc_area[tx_index];
737 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
744 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
746 desc->buf_ptr = txq->tso_hdrs_dma
750 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
773 txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
776 struct mv643xx_eth_private *mp = txq_to_mp(txq);
785 tx_index = txq->tx_curr_desc;
786 desc = &txq->tx_desc_area[tx_index];
797 desc->buf_ptr = txq->tso_hdrs_dma +
798 txq->tx_curr_desc * TSO_HEADER_SIZE;
810 txq->tx_curr_desc++;
811 if (txq->tx_curr_desc == txq->tx_ring_size)
812 txq->tx_curr_desc = 0;
815 static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
818 struct mv643xx_eth_private *mp = txq_to_mp(txq);
826 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
831 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
846 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
848 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
856 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
866 __skb_queue_tail(&txq->tx_skb, skb);
874 mp->work_tx_end &= ~(1 << txq->index);
878 txq_enable(txq);
879 txq->tx_desc_count += desc_count;
888 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
890 struct mv643xx_eth_private *mp = txq_to_mp(txq);
900 tx_index = txq->tx_curr_desc++;
901 if (txq->tx_curr_desc == txq->tx_ring_size)
902 txq->tx_curr_desc = 0;
903 desc = &txq->tx_desc_area[tx_index];
904 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
926 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
929 struct mv643xx_eth_private *mp = txq_to_mp(txq);
940 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
951 tx_index = txq->tx_curr_desc++;
952 if (txq->tx_curr_desc == txq->tx_ring_size)
953 txq->tx_curr_desc = 0;
954 desc = &txq->tx_desc_area[tx_index];
955 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
958 txq_submit_frag_skb(txq, skb);
970 __skb_queue_tail(&txq->tx_skb, skb);
979 mp->work_tx_end &= ~(1 << txq->index);
983 txq_enable(txq);
985 txq->tx_desc_count += nr_frags + 1;
994 struct tx_queue *txq;
998 txq = mp->txq + queue;
1010 ret = txq_submit_tso(txq, skb, dev);
1012 ret = txq_submit_skb(txq, skb, dev);
1014 txq->tx_bytes += length;
1015 txq->tx_packets++;
1017 if (txq->tx_desc_count >= txq->tx_stop_threshold)
1020 txq->tx_dropped++;
1029 static void txq_kick(struct tx_queue *txq)
1031 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1032 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1038 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1041 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1042 expected_ptr = (u32)txq->tx_desc_dma +
1043 txq->tx_curr_desc * sizeof(struct tx_desc);
1046 txq_enable(txq);
1051 mp->work_tx_end &= ~(1 << txq->index);
1054 static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1056 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1057 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1063 while (reclaimed < budget && txq->tx_desc_count > 0) {
1069 tx_index = txq->tx_used_desc;
1070 desc = &txq->tx_desc_area[tx_index];
1071 desc_dma_map = txq->tx_desc_mapping[tx_index];
1081 txq->tx_used_desc = tx_index + 1;
1082 if (txq->tx_used_desc == txq->tx_ring_size)
1083 txq->tx_used_desc = 0;
1086 txq->tx_desc_count--;
1088 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1103 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1119 mp->work_tx &= ~(1 << txq->index);
1162 static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1164 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1176 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1177 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1180 static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1182 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1201 val |= 1 << txq->index;
1259 struct tx_queue *txq = mp->txq + i;
1261 tx_packets += txq->tx_packets;
1262 tx_bytes += txq->tx_bytes;
1263 tx_dropped += txq->tx_dropped;
2025 struct tx_queue *txq = mp->txq + index;
2031 txq->index = index;
2033 txq->tx_ring_size = mp->tx_ring_size;
2039 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2040 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2042 txq->tx_desc_count = 0;
2043 txq->tx_curr_desc = 0;
2044 txq->tx_used_desc = 0;
2046 size = txq->tx_ring_size * sizeof(struct tx_desc);
2049 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2051 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2053 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2054 size, &txq->tx_desc_dma,
2058 if (txq->tx_desc_area == NULL) {
2063 memset(txq->tx_desc_area, 0, size);
2065 txq->tx_desc_area_size = size;
2067 tx_desc = txq->tx_desc_area;
2068 for (i = 0; i < txq->tx_ring_size; i++) {
2073 if (nexti == txq->tx_ring_size)
2077 txd->next_desc_ptr = txq->tx_desc_dma +
2081 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2083 if (!txq->tx_desc_mapping) {
2089 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2090 txq->tx_ring_size * TSO_HEADER_SIZE,
2091 &txq->tso_hdrs_dma, GFP_KERNEL);
2092 if (txq->tso_hdrs == NULL) {
2096 skb_queue_head_init(&txq->tx_skb);
2101 kfree(txq->tx_desc_mapping);
2104 iounmap(txq->tx_desc_area);
2106 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2107 txq->tx_desc_area, txq->tx_desc_dma);
2111 static void txq_deinit(struct tx_queue *txq)
2113 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2115 txq_disable(txq);
2116 txq_reclaim(txq, txq->tx_ring_size, 1);
2118 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2120 if (txq->index == 0 &&
2121 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2122 iounmap(txq->tx_desc_area);
2124 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2125 txq->tx_desc_area, txq->tx_desc_dma);
2126 kfree(txq->tx_desc_mapping);
2128 if (txq->tso_hdrs)
2130 txq->tx_ring_size * TSO_HEADER_SIZE,
2131 txq->tso_hdrs, txq->tso_hdrs_dma);
2201 struct tx_queue *txq = mp->txq + i;
2203 txq_reclaim(txq, txq->tx_ring_size, 1);
2204 txq_reset_hw_ptr(txq);
2277 txq_kick(mp->txq + queue);
2279 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2280 txq_maybe_wake(mp->txq + queue);
2344 struct tx_queue *txq = mp->txq + i;
2346 txq_reset_hw_ptr(txq);
2347 txq_set_rate(txq, 1000000000, 16777216);
2348 txq_set_fixed_prio_mode(txq);
2455 txq_deinit(mp->txq + i);
2488 txq_disable(mp->txq + i);
2532 txq_deinit(mp->txq + i);