Lines Matching refs:tx_queue
34 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
37 unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
39 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
44 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
52 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
57 return efx_tx_get_copy_buffer(tx_queue, buffer);
99 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
109 buffer = efx_tx_queue_get_insert_buffer(tx_queue);
111 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
122 ++tx_queue->insert_count;
220 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
224 efx_tx_queue_get_insert_buffer(tx_queue);
225 u8 __iomem *piobuf = tx_queue->piobuf;
240 efx_skb_copy_bits_to_pio(tx_queue->efx, skb,
242 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf);
250 __iowrite64_copy(tx_queue->piobuf, skb->data,
263 tx_queue->piobuf_offset);
264 ++tx_queue->insert_count;
276 static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
278 struct efx_channel *channel = tx_queue->channel;
280 if (!tx_queue->piobuf)
285 efx_for_each_channel_tx_queue(tx_queue, channel)
286 if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
322 netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
324 unsigned int old_insert_count = tx_queue->insert_count;
341 switch (tx_queue->tso_version) {
343 rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
346 rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
354 rc = efx_tx_tso_fallback(tx_queue, skb);
355 tx_queue->tso_fallbacks++;
363 efx_tx_may_pio(tx_queue)) {
365 if (efx_enqueue_skb_pio(tx_queue, skb))
367 tx_queue->pio_packets++;
372 if (efx_enqueue_skb_copy(tx_queue, skb))
374 tx_queue->cb_packets++;
379 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments)))
382 efx_tx_maybe_stop_queue(tx_queue);
384 tx_queue->xmit_pending = true;
387 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
388 efx_tx_send_pending(tx_queue->channel);
391 tx_queue->tso_bursts++;
392 tx_queue->tso_packets += segments;
393 tx_queue->tx_packets += segments;
395 tx_queue->tx_packets++;
402 efx_enqueue_unwind(tx_queue, old_insert_count);
410 efx_tx_send_pending(tx_queue->channel);
425 struct efx_tx_queue *tx_queue;
442 tx_queue = efx->xdp_tx_queues[cpu];
443 if (unlikely(!tx_queue))
446 if (!tx_queue->initialised)
450 HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
456 if (netif_tx_queue_stopped(tx_queue->core_txq))
458 efx_tx_maybe_stop_queue(tx_queue);
465 tx_queue->read_count - tx_queue->insert_count;
474 prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
486 tx_buffer = efx_tx_map_chunk(tx_queue, dma_addr, len);
492 tx_queue->tx_packets++;
497 efx_nic_push_buffers(tx_queue);
501 HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
516 struct efx_tx_queue *tx_queue;
535 tx_queue = efx_get_tx_queue(efx, index, type);
536 if (WARN_ON_ONCE(!tx_queue)) {
551 return __efx_enqueue_skb(tx_queue, skb);
554 void efx_xmit_done_single(struct efx_tx_queue *tx_queue)
561 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
564 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
567 struct efx_nic *efx = tx_queue->efx;
571 tx_queue->queue);
579 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl,
582 ++tx_queue->read_count;
583 read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
586 tx_queue->pkts_compl += pkts_compl;
587 tx_queue->bytes_compl += bytes_compl;
591 efx_xmit_done_check_empty(tx_queue);
594 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
596 struct efx_nic *efx = tx_queue->efx;
599 tx_queue->core_txq =
601 tx_queue->channel->channel);