Lines Matching refs:txq
179 #define IS_TSO_HEADER(txq, addr) \
180 ((addr >= txq->tso_hdrs_dma) && \
181 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
408 struct tx_queue txq[8];
440 /* rxq/txq helper functions *************************************************/
446 static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
448 return container_of(txq, struct mv643xx_eth_private, txq[txq->index]);
467 static void txq_reset_hw_ptr(struct tx_queue *txq)
469 struct mv643xx_eth_private *mp = txq_to_mp(txq);
472 addr = (u32)txq->tx_desc_dma;
473 addr += txq->tx_curr_desc * sizeof(struct tx_desc);
474 wrlp(mp, TXQ_CURRENT_DESC_PTR(txq->index), addr);
477 static void txq_enable(struct tx_queue *txq)
479 struct mv643xx_eth_private *mp = txq_to_mp(txq);
480 wrlp(mp, TXQ_COMMAND, 1 << txq->index);
483 static void txq_disable(struct tx_queue *txq)
485 struct mv643xx_eth_private *mp = txq_to_mp(txq);
486 u8 mask = 1 << txq->index;
493 static void txq_maybe_wake(struct tx_queue *txq)
495 struct mv643xx_eth_private *mp = txq_to_mp(txq);
496 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
500 if (txq->tx_desc_count <= txq->tx_wake_threshold)
727 txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
735 tx_index = txq->tx_curr_desc++;
736 if (txq->tx_curr_desc == txq->tx_ring_size)
737 txq->tx_curr_desc = 0;
738 desc = &txq->tx_desc_area[tx_index];
739 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
746 memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
748 desc->buf_ptr = txq->tso_hdrs_dma
752 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
775 txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length,
778 struct mv643xx_eth_private *mp = txq_to_mp(txq);
787 tx_index = txq->tx_curr_desc;
788 desc = &txq->tx_desc_area[tx_index];
800 desc->buf_ptr = txq->tso_hdrs_dma +
801 txq->tx_curr_desc * TSO_HEADER_SIZE;
813 txq->tx_curr_desc++;
814 if (txq->tx_curr_desc == txq->tx_ring_size)
815 txq->tx_curr_desc = 0;
818 static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
821 struct mv643xx_eth_private *mp = txq_to_mp(txq);
829 if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
834 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc];
849 hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
851 txq_put_hdr_tso(skb, txq, data_left, &first_cmd_sts,
859 ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
869 __skb_queue_tail(&txq->tx_skb, skb);
877 mp->work_tx_end &= ~(1 << txq->index);
881 txq_enable(txq);
882 txq->tx_desc_count += desc_count;
891 static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
893 struct mv643xx_eth_private *mp = txq_to_mp(txq);
903 tx_index = txq->tx_curr_desc++;
904 if (txq->tx_curr_desc == txq->tx_ring_size)
905 txq->tx_curr_desc = 0;
906 desc = &txq->tx_desc_area[tx_index];
907 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
929 static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
932 struct mv643xx_eth_private *mp = txq_to_mp(txq);
943 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
954 tx_index = txq->tx_curr_desc++;
955 if (txq->tx_curr_desc == txq->tx_ring_size)
956 txq->tx_curr_desc = 0;
957 desc = &txq->tx_desc_area[tx_index];
958 txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
961 txq_submit_frag_skb(txq, skb);
973 __skb_queue_tail(&txq->tx_skb, skb);
982 mp->work_tx_end &= ~(1 << txq->index);
986 txq_enable(txq);
988 txq->tx_desc_count += nr_frags + 1;
997 struct tx_queue *txq;
1001 txq = mp->txq + queue;
1013 ret = txq_submit_tso(txq, skb, dev);
1015 ret = txq_submit_skb(txq, skb, dev);
1017 txq->tx_bytes += length;
1018 txq->tx_packets++;
1020 if (txq->tx_desc_count >= txq->tx_stop_threshold)
1023 txq->tx_dropped++;
1032 static void txq_kick(struct tx_queue *txq)
1034 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1035 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1041 if (rdlp(mp, TXQ_COMMAND) & (1 << txq->index))
1044 hw_desc_ptr = rdlp(mp, TXQ_CURRENT_DESC_PTR(txq->index));
1045 expected_ptr = (u32)txq->tx_desc_dma +
1046 txq->tx_curr_desc * sizeof(struct tx_desc);
1049 txq_enable(txq);
1054 mp->work_tx_end &= ~(1 << txq->index);
1057 static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1059 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1060 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
1066 while (reclaimed < budget && txq->tx_desc_count > 0) {
1072 tx_index = txq->tx_used_desc;
1073 desc = &txq->tx_desc_area[tx_index];
1074 desc_dma_map = txq->tx_desc_mapping[tx_index];
1084 txq->tx_used_desc = tx_index + 1;
1085 if (txq->tx_used_desc == txq->tx_ring_size)
1086 txq->tx_used_desc = 0;
1089 txq->tx_desc_count--;
1091 if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
1106 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1122 mp->work_tx &= ~(1 << txq->index);
1165 static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1167 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1179 wrlp(mp, TXQ_BW_TOKENS(txq->index), token_rate << 14);
1180 wrlp(mp, TXQ_BW_CONF(txq->index), (bucket_size << 10) | token_rate);
1183 static void txq_set_fixed_prio_mode(struct tx_queue *txq)
1185 struct mv643xx_eth_private *mp = txq_to_mp(txq);
1204 val |= 1 << txq->index;
1262 struct tx_queue *txq = mp->txq + i;
1264 tx_packets += txq->tx_packets;
1265 tx_bytes += txq->tx_bytes;
1266 tx_dropped += txq->tx_dropped;
2036 struct tx_queue *txq = mp->txq + index;
2042 txq->index = index;
2044 txq->tx_ring_size = mp->tx_ring_size;
2050 txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
2051 txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
2053 txq->tx_desc_count = 0;
2054 txq->tx_curr_desc = 0;
2055 txq->tx_used_desc = 0;
2057 size = txq->tx_ring_size * sizeof(struct tx_desc);
2060 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
2062 txq->tx_desc_dma = mp->tx_desc_sram_addr;
2064 txq->tx_desc_area = dma_alloc_coherent(mp->dev->dev.parent,
2065 size, &txq->tx_desc_dma,
2069 if (txq->tx_desc_area == NULL) {
2074 memset(txq->tx_desc_area, 0, size);
2076 txq->tx_desc_area_size = size;
2078 tx_desc = txq->tx_desc_area;
2079 for (i = 0; i < txq->tx_ring_size; i++) {
2084 if (nexti == txq->tx_ring_size)
2088 txd->next_desc_ptr = txq->tx_desc_dma +
2092 txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
2094 if (!txq->tx_desc_mapping) {
2100 txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
2101 txq->tx_ring_size * TSO_HEADER_SIZE,
2102 &txq->tso_hdrs_dma, GFP_KERNEL);
2103 if (txq->tso_hdrs == NULL) {
2107 skb_queue_head_init(&txq->tx_skb);
2112 kfree(txq->tx_desc_mapping);
2115 iounmap(txq->tx_desc_area);
2117 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2118 txq->tx_desc_area, txq->tx_desc_dma);
2122 static void txq_deinit(struct tx_queue *txq)
2124 struct mv643xx_eth_private *mp = txq_to_mp(txq);
2126 txq_disable(txq);
2127 txq_reclaim(txq, txq->tx_ring_size, 1);
2129 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
2131 if (txq->index == 0 &&
2132 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
2133 iounmap(txq->tx_desc_area);
2135 dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
2136 txq->tx_desc_area, txq->tx_desc_dma);
2137 kfree(txq->tx_desc_mapping);
2139 if (txq->tso_hdrs)
2141 txq->tx_ring_size * TSO_HEADER_SIZE,
2142 txq->tso_hdrs, txq->tso_hdrs_dma);
2212 struct tx_queue *txq = mp->txq + i;
2214 txq_reclaim(txq, txq->tx_ring_size, 1);
2215 txq_reset_hw_ptr(txq);
2288 txq_kick(mp->txq + queue);
2290 work_done += txq_reclaim(mp->txq + queue, work_tbd, 0);
2291 txq_maybe_wake(mp->txq + queue);
2355 struct tx_queue *txq = mp->txq + i;
2357 txq_reset_hw_ptr(txq);
2358 txq_set_rate(txq, 1000000000, 16777216);
2359 txq_set_fixed_prio_mode(txq);
2466 txq_deinit(mp->txq + i);
2499 txq_disable(mp->txq + i);
2543 txq_deinit(mp->txq + i);