Lines Matching refs:txp

91 	struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
95 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
96 skb = skb_peek(&txp->tq);
100 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
104 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
112 ifb_update_q_stats(&txp->tx_stats, skb->len);
115 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
119 txp->dev->stats.tx_dropped++;
120 if (skb_queue_len(&txp->tq) != 0)
125 skb->skb_iif = txp->dev->ifindex;
136 skb = skb_peek(&txp->rq);
138 txp->tasklet_pending = 0;
148 txp->tasklet_pending = 1;
149 tasklet_schedule(&txp->ifb_tasklet);
158 struct ifb_q_private *txp = dp->tx_private;
163 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
165 start = u64_stats_fetch_begin(&txp->rx_stats.sync);
166 packets = txp->rx_stats.packets;
167 bytes = txp->rx_stats.bytes;
168 } while (u64_stats_fetch_retry(&txp->rx_stats.sync, start));
173 start = u64_stats_fetch_begin(&txp->tx_stats.sync);
174 packets = txp->tx_stats.packets;
175 bytes = txp->tx_stats.bytes;
176 } while (u64_stats_fetch_retry(&txp->tx_stats.sync, start));
187 struct ifb_q_private *txp;
190 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
191 if (!txp)
193 dp->tx_private = txp;
194 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
195 txp->txqnum = i;
196 txp->dev = dev;
197 __skb_queue_head_init(&txp->rq);
198 __skb_queue_head_init(&txp->tq);
199 u64_stats_init(&txp->rx_stats.sync);
200 u64_stats_init(&txp->tx_stats.sync);
201 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
262 struct ifb_q_private *txp;
266 txp = dp->tx_private + i;
267 ifb_fill_stats_data(&data, &txp->rx_stats);
271 txp = dp->tx_private + i;
272 ifb_fill_stats_data(&data, &txp->tx_stats);
299 struct ifb_q_private *txp = dp->tx_private;
302 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
303 tasklet_kill(&txp->ifb_tasklet);
304 __skb_queue_purge(&txp->rq);
305 __skb_queue_purge(&txp->tq);
341 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
343 ifb_update_q_stats(&txp->rx_stats, skb->len);
351 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
352 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
354 __skb_queue_tail(&txp->rq, skb);
355 if (!txp->tasklet_pending) {
356 txp->tasklet_pending = 1;
357 tasklet_schedule(&txp->ifb_tasklet);