18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/****************************************************************************
38c2ecf20Sopenharmony_ci * Driver for Solarflare network controllers and boards
48c2ecf20Sopenharmony_ci * Copyright 2005-2006 Fen Systems Ltd.
58c2ecf20Sopenharmony_ci * Copyright 2005-2013 Solarflare Communications Inc.
68c2ecf20Sopenharmony_ci */
78c2ecf20Sopenharmony_ci
88c2ecf20Sopenharmony_ci#include <linux/pci.h>
98c2ecf20Sopenharmony_ci#include <linux/tcp.h>
108c2ecf20Sopenharmony_ci#include <linux/ip.h>
118c2ecf20Sopenharmony_ci#include <linux/in.h>
128c2ecf20Sopenharmony_ci#include <linux/ipv6.h>
138c2ecf20Sopenharmony_ci#include <linux/slab.h>
148c2ecf20Sopenharmony_ci#include <net/ipv6.h>
158c2ecf20Sopenharmony_ci#include <linux/if_ether.h>
168c2ecf20Sopenharmony_ci#include <linux/highmem.h>
178c2ecf20Sopenharmony_ci#include <linux/cache.h>
188c2ecf20Sopenharmony_ci#include "net_driver.h"
198c2ecf20Sopenharmony_ci#include "efx.h"
208c2ecf20Sopenharmony_ci#include "io.h"
218c2ecf20Sopenharmony_ci#include "nic.h"
228c2ecf20Sopenharmony_ci#include "tx.h"
238c2ecf20Sopenharmony_ci#include "workarounds.h"
248c2ecf20Sopenharmony_ci
258c2ecf20Sopenharmony_cistatic inline u8 *ef4_tx_get_copy_buffer(struct ef4_tx_queue *tx_queue,
268c2ecf20Sopenharmony_ci					 struct ef4_tx_buffer *buffer)
278c2ecf20Sopenharmony_ci{
288c2ecf20Sopenharmony_ci	unsigned int index = ef4_tx_queue_get_insert_index(tx_queue);
298c2ecf20Sopenharmony_ci	struct ef4_buffer *page_buf =
308c2ecf20Sopenharmony_ci		&tx_queue->cb_page[index >> (PAGE_SHIFT - EF4_TX_CB_ORDER)];
318c2ecf20Sopenharmony_ci	unsigned int offset =
328c2ecf20Sopenharmony_ci		((index << EF4_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
338c2ecf20Sopenharmony_ci
348c2ecf20Sopenharmony_ci	if (unlikely(!page_buf->addr) &&
358c2ecf20Sopenharmony_ci	    ef4_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
368c2ecf20Sopenharmony_ci				 GFP_ATOMIC))
378c2ecf20Sopenharmony_ci		return NULL;
388c2ecf20Sopenharmony_ci	buffer->dma_addr = page_buf->dma_addr + offset;
398c2ecf20Sopenharmony_ci	buffer->unmap_len = 0;
408c2ecf20Sopenharmony_ci	return (u8 *)page_buf->addr + offset;
418c2ecf20Sopenharmony_ci}
428c2ecf20Sopenharmony_ci
438c2ecf20Sopenharmony_ciu8 *ef4_tx_get_copy_buffer_limited(struct ef4_tx_queue *tx_queue,
448c2ecf20Sopenharmony_ci				   struct ef4_tx_buffer *buffer, size_t len)
458c2ecf20Sopenharmony_ci{
468c2ecf20Sopenharmony_ci	if (len > EF4_TX_CB_SIZE)
478c2ecf20Sopenharmony_ci		return NULL;
488c2ecf20Sopenharmony_ci	return ef4_tx_get_copy_buffer(tx_queue, buffer);
498c2ecf20Sopenharmony_ci}
508c2ecf20Sopenharmony_ci
518c2ecf20Sopenharmony_cistatic void ef4_dequeue_buffer(struct ef4_tx_queue *tx_queue,
528c2ecf20Sopenharmony_ci			       struct ef4_tx_buffer *buffer,
538c2ecf20Sopenharmony_ci			       unsigned int *pkts_compl,
548c2ecf20Sopenharmony_ci			       unsigned int *bytes_compl)
558c2ecf20Sopenharmony_ci{
568c2ecf20Sopenharmony_ci	if (buffer->unmap_len) {
578c2ecf20Sopenharmony_ci		struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
588c2ecf20Sopenharmony_ci		dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
598c2ecf20Sopenharmony_ci		if (buffer->flags & EF4_TX_BUF_MAP_SINGLE)
608c2ecf20Sopenharmony_ci			dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
618c2ecf20Sopenharmony_ci					 DMA_TO_DEVICE);
628c2ecf20Sopenharmony_ci		else
638c2ecf20Sopenharmony_ci			dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
648c2ecf20Sopenharmony_ci				       DMA_TO_DEVICE);
658c2ecf20Sopenharmony_ci		buffer->unmap_len = 0;
668c2ecf20Sopenharmony_ci	}
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci	if (buffer->flags & EF4_TX_BUF_SKB) {
698c2ecf20Sopenharmony_ci		(*pkts_compl)++;
708c2ecf20Sopenharmony_ci		(*bytes_compl) += buffer->skb->len;
718c2ecf20Sopenharmony_ci		dev_consume_skb_any((struct sk_buff *)buffer->skb);
728c2ecf20Sopenharmony_ci		netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
738c2ecf20Sopenharmony_ci			   "TX queue %d transmission id %x complete\n",
748c2ecf20Sopenharmony_ci			   tx_queue->queue, tx_queue->read_count);
758c2ecf20Sopenharmony_ci	}
768c2ecf20Sopenharmony_ci
778c2ecf20Sopenharmony_ci	buffer->len = 0;
788c2ecf20Sopenharmony_ci	buffer->flags = 0;
798c2ecf20Sopenharmony_ci}
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ciunsigned int ef4_tx_max_skb_descs(struct ef4_nic *efx)
828c2ecf20Sopenharmony_ci{
838c2ecf20Sopenharmony_ci	/* This is probably too much since we don't have any TSO support;
848c2ecf20Sopenharmony_ci	 * it's a left-over from when we had Software TSO.  But it's safer
858c2ecf20Sopenharmony_ci	 * to leave it as-is than try to determine a new bound.
868c2ecf20Sopenharmony_ci	 */
878c2ecf20Sopenharmony_ci	/* Header and payload descriptor for each output segment, plus
888c2ecf20Sopenharmony_ci	 * one for every input fragment boundary within a segment
898c2ecf20Sopenharmony_ci	 */
908c2ecf20Sopenharmony_ci	unsigned int max_descs = EF4_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	/* Possibly one more per segment for the alignment workaround,
938c2ecf20Sopenharmony_ci	 * or for option descriptors
948c2ecf20Sopenharmony_ci	 */
958c2ecf20Sopenharmony_ci	if (EF4_WORKAROUND_5391(efx))
968c2ecf20Sopenharmony_ci		max_descs += EF4_TSO_MAX_SEGS;
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	/* Possibly more for PCIe page boundaries within input fragments */
998c2ecf20Sopenharmony_ci	if (PAGE_SIZE > EF4_PAGE_SIZE)
1008c2ecf20Sopenharmony_ci		max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
1018c2ecf20Sopenharmony_ci				   DIV_ROUND_UP(GSO_MAX_SIZE, EF4_PAGE_SIZE));
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci	return max_descs;
1048c2ecf20Sopenharmony_ci}
1058c2ecf20Sopenharmony_ci
1068c2ecf20Sopenharmony_cistatic void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1)
1078c2ecf20Sopenharmony_ci{
1088c2ecf20Sopenharmony_ci	/* We need to consider both queues that the net core sees as one */
1098c2ecf20Sopenharmony_ci	struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(txq1);
1108c2ecf20Sopenharmony_ci	struct ef4_nic *efx = txq1->efx;
1118c2ecf20Sopenharmony_ci	unsigned int fill_level;
1128c2ecf20Sopenharmony_ci
1138c2ecf20Sopenharmony_ci	fill_level = max(txq1->insert_count - txq1->old_read_count,
1148c2ecf20Sopenharmony_ci			 txq2->insert_count - txq2->old_read_count);
1158c2ecf20Sopenharmony_ci	if (likely(fill_level < efx->txq_stop_thresh))
1168c2ecf20Sopenharmony_ci		return;
1178c2ecf20Sopenharmony_ci
1188c2ecf20Sopenharmony_ci	/* We used the stale old_read_count above, which gives us a
1198c2ecf20Sopenharmony_ci	 * pessimistic estimate of the fill level (which may even
1208c2ecf20Sopenharmony_ci	 * validly be >= efx->txq_entries).  Now try again using
1218c2ecf20Sopenharmony_ci	 * read_count (more likely to be a cache miss).
1228c2ecf20Sopenharmony_ci	 *
1238c2ecf20Sopenharmony_ci	 * If we read read_count and then conditionally stop the
1248c2ecf20Sopenharmony_ci	 * queue, it is possible for the completion path to race with
1258c2ecf20Sopenharmony_ci	 * us and complete all outstanding descriptors in the middle,
1268c2ecf20Sopenharmony_ci	 * after which there will be no more completions to wake it.
1278c2ecf20Sopenharmony_ci	 * Therefore we stop the queue first, then read read_count
1288c2ecf20Sopenharmony_ci	 * (with a memory barrier to ensure the ordering), then
1298c2ecf20Sopenharmony_ci	 * restart the queue if the fill level turns out to be low
1308c2ecf20Sopenharmony_ci	 * enough.
1318c2ecf20Sopenharmony_ci	 */
1328c2ecf20Sopenharmony_ci	netif_tx_stop_queue(txq1->core_txq);
1338c2ecf20Sopenharmony_ci	smp_mb();
1348c2ecf20Sopenharmony_ci	txq1->old_read_count = READ_ONCE(txq1->read_count);
1358c2ecf20Sopenharmony_ci	txq2->old_read_count = READ_ONCE(txq2->read_count);
1368c2ecf20Sopenharmony_ci
1378c2ecf20Sopenharmony_ci	fill_level = max(txq1->insert_count - txq1->old_read_count,
1388c2ecf20Sopenharmony_ci			 txq2->insert_count - txq2->old_read_count);
1398c2ecf20Sopenharmony_ci	EF4_BUG_ON_PARANOID(fill_level >= efx->txq_entries);
1408c2ecf20Sopenharmony_ci	if (likely(fill_level < efx->txq_stop_thresh)) {
1418c2ecf20Sopenharmony_ci		smp_mb();
1428c2ecf20Sopenharmony_ci		if (likely(!efx->loopback_selftest))
1438c2ecf20Sopenharmony_ci			netif_tx_start_queue(txq1->core_txq);
1448c2ecf20Sopenharmony_ci	}
1458c2ecf20Sopenharmony_ci}
1468c2ecf20Sopenharmony_ci
1478c2ecf20Sopenharmony_cistatic int ef4_enqueue_skb_copy(struct ef4_tx_queue *tx_queue,
1488c2ecf20Sopenharmony_ci				struct sk_buff *skb)
1498c2ecf20Sopenharmony_ci{
1508c2ecf20Sopenharmony_ci	unsigned int min_len = tx_queue->tx_min_size;
1518c2ecf20Sopenharmony_ci	unsigned int copy_len = skb->len;
1528c2ecf20Sopenharmony_ci	struct ef4_tx_buffer *buffer;
1538c2ecf20Sopenharmony_ci	u8 *copy_buffer;
1548c2ecf20Sopenharmony_ci	int rc;
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci	EF4_BUG_ON_PARANOID(copy_len > EF4_TX_CB_SIZE);
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_ci	buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
1598c2ecf20Sopenharmony_ci
1608c2ecf20Sopenharmony_ci	copy_buffer = ef4_tx_get_copy_buffer(tx_queue, buffer);
1618c2ecf20Sopenharmony_ci	if (unlikely(!copy_buffer))
1628c2ecf20Sopenharmony_ci		return -ENOMEM;
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
1658c2ecf20Sopenharmony_ci	EF4_WARN_ON_PARANOID(rc);
1668c2ecf20Sopenharmony_ci	if (unlikely(copy_len < min_len)) {
1678c2ecf20Sopenharmony_ci		memset(copy_buffer + copy_len, 0, min_len - copy_len);
1688c2ecf20Sopenharmony_ci		buffer->len = min_len;
1698c2ecf20Sopenharmony_ci	} else {
1708c2ecf20Sopenharmony_ci		buffer->len = copy_len;
1718c2ecf20Sopenharmony_ci	}
1728c2ecf20Sopenharmony_ci
1738c2ecf20Sopenharmony_ci	buffer->skb = skb;
1748c2ecf20Sopenharmony_ci	buffer->flags = EF4_TX_BUF_SKB;
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_ci	++tx_queue->insert_count;
1778c2ecf20Sopenharmony_ci	return rc;
1788c2ecf20Sopenharmony_ci}
1798c2ecf20Sopenharmony_ci
1808c2ecf20Sopenharmony_cistatic struct ef4_tx_buffer *ef4_tx_map_chunk(struct ef4_tx_queue *tx_queue,
1818c2ecf20Sopenharmony_ci					      dma_addr_t dma_addr,
1828c2ecf20Sopenharmony_ci					      size_t len)
1838c2ecf20Sopenharmony_ci{
1848c2ecf20Sopenharmony_ci	const struct ef4_nic_type *nic_type = tx_queue->efx->type;
1858c2ecf20Sopenharmony_ci	struct ef4_tx_buffer *buffer;
1868c2ecf20Sopenharmony_ci	unsigned int dma_len;
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_ci	/* Map the fragment taking account of NIC-dependent DMA limits. */
1898c2ecf20Sopenharmony_ci	do {
1908c2ecf20Sopenharmony_ci		buffer = ef4_tx_queue_get_insert_buffer(tx_queue);
1918c2ecf20Sopenharmony_ci		dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
1928c2ecf20Sopenharmony_ci
1938c2ecf20Sopenharmony_ci		buffer->len = dma_len;
1948c2ecf20Sopenharmony_ci		buffer->dma_addr = dma_addr;
1958c2ecf20Sopenharmony_ci		buffer->flags = EF4_TX_BUF_CONT;
1968c2ecf20Sopenharmony_ci		len -= dma_len;
1978c2ecf20Sopenharmony_ci		dma_addr += dma_len;
1988c2ecf20Sopenharmony_ci		++tx_queue->insert_count;
1998c2ecf20Sopenharmony_ci	} while (len);
2008c2ecf20Sopenharmony_ci
2018c2ecf20Sopenharmony_ci	return buffer;
2028c2ecf20Sopenharmony_ci}
2038c2ecf20Sopenharmony_ci
2048c2ecf20Sopenharmony_ci/* Map all data from an SKB for DMA and create descriptors on the queue.
2058c2ecf20Sopenharmony_ci */
2068c2ecf20Sopenharmony_cistatic int ef4_tx_map_data(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
2078c2ecf20Sopenharmony_ci{
2088c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
2098c2ecf20Sopenharmony_ci	struct device *dma_dev = &efx->pci_dev->dev;
2108c2ecf20Sopenharmony_ci	unsigned int frag_index, nr_frags;
2118c2ecf20Sopenharmony_ci	dma_addr_t dma_addr, unmap_addr;
2128c2ecf20Sopenharmony_ci	unsigned short dma_flags;
2138c2ecf20Sopenharmony_ci	size_t len, unmap_len;
2148c2ecf20Sopenharmony_ci
2158c2ecf20Sopenharmony_ci	nr_frags = skb_shinfo(skb)->nr_frags;
2168c2ecf20Sopenharmony_ci	frag_index = 0;
2178c2ecf20Sopenharmony_ci
2188c2ecf20Sopenharmony_ci	/* Map header data. */
2198c2ecf20Sopenharmony_ci	len = skb_headlen(skb);
2208c2ecf20Sopenharmony_ci	dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
2218c2ecf20Sopenharmony_ci	dma_flags = EF4_TX_BUF_MAP_SINGLE;
2228c2ecf20Sopenharmony_ci	unmap_len = len;
2238c2ecf20Sopenharmony_ci	unmap_addr = dma_addr;
2248c2ecf20Sopenharmony_ci
2258c2ecf20Sopenharmony_ci	if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
2268c2ecf20Sopenharmony_ci		return -EIO;
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_ci	/* Add descriptors for each fragment. */
2298c2ecf20Sopenharmony_ci	do {
2308c2ecf20Sopenharmony_ci		struct ef4_tx_buffer *buffer;
2318c2ecf20Sopenharmony_ci		skb_frag_t *fragment;
2328c2ecf20Sopenharmony_ci
2338c2ecf20Sopenharmony_ci		buffer = ef4_tx_map_chunk(tx_queue, dma_addr, len);
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci		/* The final descriptor for a fragment is responsible for
2368c2ecf20Sopenharmony_ci		 * unmapping the whole fragment.
2378c2ecf20Sopenharmony_ci		 */
2388c2ecf20Sopenharmony_ci		buffer->flags = EF4_TX_BUF_CONT | dma_flags;
2398c2ecf20Sopenharmony_ci		buffer->unmap_len = unmap_len;
2408c2ecf20Sopenharmony_ci		buffer->dma_offset = buffer->dma_addr - unmap_addr;
2418c2ecf20Sopenharmony_ci
2428c2ecf20Sopenharmony_ci		if (frag_index >= nr_frags) {
2438c2ecf20Sopenharmony_ci			/* Store SKB details with the final buffer for
2448c2ecf20Sopenharmony_ci			 * the completion.
2458c2ecf20Sopenharmony_ci			 */
2468c2ecf20Sopenharmony_ci			buffer->skb = skb;
2478c2ecf20Sopenharmony_ci			buffer->flags = EF4_TX_BUF_SKB | dma_flags;
2488c2ecf20Sopenharmony_ci			return 0;
2498c2ecf20Sopenharmony_ci		}
2508c2ecf20Sopenharmony_ci
2518c2ecf20Sopenharmony_ci		/* Move on to the next fragment. */
2528c2ecf20Sopenharmony_ci		fragment = &skb_shinfo(skb)->frags[frag_index++];
2538c2ecf20Sopenharmony_ci		len = skb_frag_size(fragment);
2548c2ecf20Sopenharmony_ci		dma_addr = skb_frag_dma_map(dma_dev, fragment,
2558c2ecf20Sopenharmony_ci				0, len, DMA_TO_DEVICE);
2568c2ecf20Sopenharmony_ci		dma_flags = 0;
2578c2ecf20Sopenharmony_ci		unmap_len = len;
2588c2ecf20Sopenharmony_ci		unmap_addr = dma_addr;
2598c2ecf20Sopenharmony_ci
2608c2ecf20Sopenharmony_ci		if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
2618c2ecf20Sopenharmony_ci			return -EIO;
2628c2ecf20Sopenharmony_ci	} while (1);
2638c2ecf20Sopenharmony_ci}
2648c2ecf20Sopenharmony_ci
2658c2ecf20Sopenharmony_ci/* Remove buffers put into a tx_queue.  None of the buffers must have
2668c2ecf20Sopenharmony_ci * an skb attached.
2678c2ecf20Sopenharmony_ci */
2688c2ecf20Sopenharmony_cistatic void ef4_enqueue_unwind(struct ef4_tx_queue *tx_queue)
2698c2ecf20Sopenharmony_ci{
2708c2ecf20Sopenharmony_ci	struct ef4_tx_buffer *buffer;
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_ci	/* Work backwards until we hit the original insert pointer value */
2738c2ecf20Sopenharmony_ci	while (tx_queue->insert_count != tx_queue->write_count) {
2748c2ecf20Sopenharmony_ci		--tx_queue->insert_count;
2758c2ecf20Sopenharmony_ci		buffer = __ef4_tx_queue_get_insert_buffer(tx_queue);
2768c2ecf20Sopenharmony_ci		ef4_dequeue_buffer(tx_queue, buffer, NULL, NULL);
2778c2ecf20Sopenharmony_ci	}
2788c2ecf20Sopenharmony_ci}
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci/*
2818c2ecf20Sopenharmony_ci * Add a socket buffer to a TX queue
2828c2ecf20Sopenharmony_ci *
2838c2ecf20Sopenharmony_ci * This maps all fragments of a socket buffer for DMA and adds them to
2848c2ecf20Sopenharmony_ci * the TX queue.  The queue's insert pointer will be incremented by
2858c2ecf20Sopenharmony_ci * the number of fragments in the socket buffer.
2868c2ecf20Sopenharmony_ci *
2878c2ecf20Sopenharmony_ci * If any DMA mapping fails, any mapped fragments will be unmapped,
2888c2ecf20Sopenharmony_ci * the queue's insert pointer will be restored to its original value.
2898c2ecf20Sopenharmony_ci *
2908c2ecf20Sopenharmony_ci * This function is split out from ef4_hard_start_xmit to allow the
2918c2ecf20Sopenharmony_ci * loopback test to direct packets via specific TX queues.
2928c2ecf20Sopenharmony_ci *
2938c2ecf20Sopenharmony_ci * Returns NETDEV_TX_OK.
2948c2ecf20Sopenharmony_ci * You must hold netif_tx_lock() to call this function.
2958c2ecf20Sopenharmony_ci */
2968c2ecf20Sopenharmony_cinetdev_tx_t ef4_enqueue_skb(struct ef4_tx_queue *tx_queue, struct sk_buff *skb)
2978c2ecf20Sopenharmony_ci{
2988c2ecf20Sopenharmony_ci	bool data_mapped = false;
2998c2ecf20Sopenharmony_ci	unsigned int skb_len;
3008c2ecf20Sopenharmony_ci
3018c2ecf20Sopenharmony_ci	skb_len = skb->len;
3028c2ecf20Sopenharmony_ci	EF4_WARN_ON_PARANOID(skb_is_gso(skb));
3038c2ecf20Sopenharmony_ci
3048c2ecf20Sopenharmony_ci	if (skb_len < tx_queue->tx_min_size ||
3058c2ecf20Sopenharmony_ci			(skb->data_len && skb_len <= EF4_TX_CB_SIZE)) {
3068c2ecf20Sopenharmony_ci		/* Pad short packets or coalesce short fragmented packets. */
3078c2ecf20Sopenharmony_ci		if (ef4_enqueue_skb_copy(tx_queue, skb))
3088c2ecf20Sopenharmony_ci			goto err;
3098c2ecf20Sopenharmony_ci		tx_queue->cb_packets++;
3108c2ecf20Sopenharmony_ci		data_mapped = true;
3118c2ecf20Sopenharmony_ci	}
3128c2ecf20Sopenharmony_ci
3138c2ecf20Sopenharmony_ci	/* Map for DMA and create descriptors if we haven't done so already. */
3148c2ecf20Sopenharmony_ci	if (!data_mapped && (ef4_tx_map_data(tx_queue, skb)))
3158c2ecf20Sopenharmony_ci		goto err;
3168c2ecf20Sopenharmony_ci
3178c2ecf20Sopenharmony_ci	/* Update BQL */
3188c2ecf20Sopenharmony_ci	netdev_tx_sent_queue(tx_queue->core_txq, skb_len);
3198c2ecf20Sopenharmony_ci
3208c2ecf20Sopenharmony_ci	/* Pass off to hardware */
3218c2ecf20Sopenharmony_ci	if (!netdev_xmit_more() || netif_xmit_stopped(tx_queue->core_txq)) {
3228c2ecf20Sopenharmony_ci		struct ef4_tx_queue *txq2 = ef4_tx_queue_partner(tx_queue);
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci		/* There could be packets left on the partner queue if those
3258c2ecf20Sopenharmony_ci		 * SKBs had skb->xmit_more set. If we do not push those they
3268c2ecf20Sopenharmony_ci		 * could be left for a long time and cause a netdev watchdog.
3278c2ecf20Sopenharmony_ci		 */
3288c2ecf20Sopenharmony_ci		if (txq2->xmit_more_available)
3298c2ecf20Sopenharmony_ci			ef4_nic_push_buffers(txq2);
3308c2ecf20Sopenharmony_ci
3318c2ecf20Sopenharmony_ci		ef4_nic_push_buffers(tx_queue);
3328c2ecf20Sopenharmony_ci	} else {
3338c2ecf20Sopenharmony_ci		tx_queue->xmit_more_available = netdev_xmit_more();
3348c2ecf20Sopenharmony_ci	}
3358c2ecf20Sopenharmony_ci
3368c2ecf20Sopenharmony_ci	tx_queue->tx_packets++;
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci	ef4_tx_maybe_stop_queue(tx_queue);
3398c2ecf20Sopenharmony_ci
3408c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
3418c2ecf20Sopenharmony_ci
3428c2ecf20Sopenharmony_ci
3438c2ecf20Sopenharmony_cierr:
3448c2ecf20Sopenharmony_ci	ef4_enqueue_unwind(tx_queue);
3458c2ecf20Sopenharmony_ci	dev_kfree_skb_any(skb);
3468c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
3478c2ecf20Sopenharmony_ci}
3488c2ecf20Sopenharmony_ci
3498c2ecf20Sopenharmony_ci/* Remove packets from the TX queue
3508c2ecf20Sopenharmony_ci *
3518c2ecf20Sopenharmony_ci * This removes packets from the TX queue, up to and including the
3528c2ecf20Sopenharmony_ci * specified index.
3538c2ecf20Sopenharmony_ci */
3548c2ecf20Sopenharmony_cistatic void ef4_dequeue_buffers(struct ef4_tx_queue *tx_queue,
3558c2ecf20Sopenharmony_ci				unsigned int index,
3568c2ecf20Sopenharmony_ci				unsigned int *pkts_compl,
3578c2ecf20Sopenharmony_ci				unsigned int *bytes_compl)
3588c2ecf20Sopenharmony_ci{
3598c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
3608c2ecf20Sopenharmony_ci	unsigned int stop_index, read_ptr;
3618c2ecf20Sopenharmony_ci
3628c2ecf20Sopenharmony_ci	stop_index = (index + 1) & tx_queue->ptr_mask;
3638c2ecf20Sopenharmony_ci	read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
3648c2ecf20Sopenharmony_ci
3658c2ecf20Sopenharmony_ci	while (read_ptr != stop_index) {
3668c2ecf20Sopenharmony_ci		struct ef4_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
3678c2ecf20Sopenharmony_ci
3688c2ecf20Sopenharmony_ci		if (!(buffer->flags & EF4_TX_BUF_OPTION) &&
3698c2ecf20Sopenharmony_ci		    unlikely(buffer->len == 0)) {
3708c2ecf20Sopenharmony_ci			netif_err(efx, tx_err, efx->net_dev,
3718c2ecf20Sopenharmony_ci				  "TX queue %d spurious TX completion id %x\n",
3728c2ecf20Sopenharmony_ci				  tx_queue->queue, read_ptr);
3738c2ecf20Sopenharmony_ci			ef4_schedule_reset(efx, RESET_TYPE_TX_SKIP);
3748c2ecf20Sopenharmony_ci			return;
3758c2ecf20Sopenharmony_ci		}
3768c2ecf20Sopenharmony_ci
3778c2ecf20Sopenharmony_ci		ef4_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
3788c2ecf20Sopenharmony_ci
3798c2ecf20Sopenharmony_ci		++tx_queue->read_count;
3808c2ecf20Sopenharmony_ci		read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
3818c2ecf20Sopenharmony_ci	}
3828c2ecf20Sopenharmony_ci}
3838c2ecf20Sopenharmony_ci
3848c2ecf20Sopenharmony_ci/* Initiate a packet transmission.  We use one channel per CPU
3858c2ecf20Sopenharmony_ci * (sharing when we have more CPUs than channels).  On Falcon, the TX
3868c2ecf20Sopenharmony_ci * completion events will be directed back to the CPU that transmitted
3878c2ecf20Sopenharmony_ci * the packet, which should be cache-efficient.
3888c2ecf20Sopenharmony_ci *
3898c2ecf20Sopenharmony_ci * Context: non-blocking.
3908c2ecf20Sopenharmony_ci * Note that returning anything other than NETDEV_TX_OK will cause the
3918c2ecf20Sopenharmony_ci * OS to free the skb.
3928c2ecf20Sopenharmony_ci */
3938c2ecf20Sopenharmony_cinetdev_tx_t ef4_hard_start_xmit(struct sk_buff *skb,
3948c2ecf20Sopenharmony_ci				struct net_device *net_dev)
3958c2ecf20Sopenharmony_ci{
3968c2ecf20Sopenharmony_ci	struct ef4_nic *efx = netdev_priv(net_dev);
3978c2ecf20Sopenharmony_ci	struct ef4_tx_queue *tx_queue;
3988c2ecf20Sopenharmony_ci	unsigned index, type;
3998c2ecf20Sopenharmony_ci
4008c2ecf20Sopenharmony_ci	EF4_WARN_ON_PARANOID(!netif_device_present(net_dev));
4018c2ecf20Sopenharmony_ci
4028c2ecf20Sopenharmony_ci	index = skb_get_queue_mapping(skb);
4038c2ecf20Sopenharmony_ci	type = skb->ip_summed == CHECKSUM_PARTIAL ? EF4_TXQ_TYPE_OFFLOAD : 0;
4048c2ecf20Sopenharmony_ci	if (index >= efx->n_tx_channels) {
4058c2ecf20Sopenharmony_ci		index -= efx->n_tx_channels;
4068c2ecf20Sopenharmony_ci		type |= EF4_TXQ_TYPE_HIGHPRI;
4078c2ecf20Sopenharmony_ci	}
4088c2ecf20Sopenharmony_ci	tx_queue = ef4_get_tx_queue(efx, index, type);
4098c2ecf20Sopenharmony_ci
4108c2ecf20Sopenharmony_ci	return ef4_enqueue_skb(tx_queue, skb);
4118c2ecf20Sopenharmony_ci}
4128c2ecf20Sopenharmony_ci
4138c2ecf20Sopenharmony_civoid ef4_init_tx_queue_core_txq(struct ef4_tx_queue *tx_queue)
4148c2ecf20Sopenharmony_ci{
4158c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
4168c2ecf20Sopenharmony_ci
4178c2ecf20Sopenharmony_ci	/* Must be inverse of queue lookup in ef4_hard_start_xmit() */
4188c2ecf20Sopenharmony_ci	tx_queue->core_txq =
4198c2ecf20Sopenharmony_ci		netdev_get_tx_queue(efx->net_dev,
4208c2ecf20Sopenharmony_ci				    tx_queue->queue / EF4_TXQ_TYPES +
4218c2ecf20Sopenharmony_ci				    ((tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
4228c2ecf20Sopenharmony_ci				     efx->n_tx_channels : 0));
4238c2ecf20Sopenharmony_ci}
4248c2ecf20Sopenharmony_ci
4258c2ecf20Sopenharmony_ciint ef4_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
4268c2ecf20Sopenharmony_ci		 void *type_data)
4278c2ecf20Sopenharmony_ci{
4288c2ecf20Sopenharmony_ci	struct ef4_nic *efx = netdev_priv(net_dev);
4298c2ecf20Sopenharmony_ci	struct tc_mqprio_qopt *mqprio = type_data;
4308c2ecf20Sopenharmony_ci	struct ef4_channel *channel;
4318c2ecf20Sopenharmony_ci	struct ef4_tx_queue *tx_queue;
4328c2ecf20Sopenharmony_ci	unsigned tc, num_tc;
4338c2ecf20Sopenharmony_ci	int rc;
4348c2ecf20Sopenharmony_ci
4358c2ecf20Sopenharmony_ci	if (type != TC_SETUP_QDISC_MQPRIO)
4368c2ecf20Sopenharmony_ci		return -EOPNOTSUPP;
4378c2ecf20Sopenharmony_ci
4388c2ecf20Sopenharmony_ci	num_tc = mqprio->num_tc;
4398c2ecf20Sopenharmony_ci
4408c2ecf20Sopenharmony_ci	if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0 || num_tc > EF4_MAX_TX_TC)
4418c2ecf20Sopenharmony_ci		return -EINVAL;
4428c2ecf20Sopenharmony_ci
4438c2ecf20Sopenharmony_ci	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci	if (num_tc == net_dev->num_tc)
4468c2ecf20Sopenharmony_ci		return 0;
4478c2ecf20Sopenharmony_ci
4488c2ecf20Sopenharmony_ci	for (tc = 0; tc < num_tc; tc++) {
4498c2ecf20Sopenharmony_ci		net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
4508c2ecf20Sopenharmony_ci		net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
4518c2ecf20Sopenharmony_ci	}
4528c2ecf20Sopenharmony_ci
4538c2ecf20Sopenharmony_ci	if (num_tc > net_dev->num_tc) {
4548c2ecf20Sopenharmony_ci		/* Initialise high-priority queues as necessary */
4558c2ecf20Sopenharmony_ci		ef4_for_each_channel(channel, efx) {
4568c2ecf20Sopenharmony_ci			ef4_for_each_possible_channel_tx_queue(tx_queue,
4578c2ecf20Sopenharmony_ci							       channel) {
4588c2ecf20Sopenharmony_ci				if (!(tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI))
4598c2ecf20Sopenharmony_ci					continue;
4608c2ecf20Sopenharmony_ci				if (!tx_queue->buffer) {
4618c2ecf20Sopenharmony_ci					rc = ef4_probe_tx_queue(tx_queue);
4628c2ecf20Sopenharmony_ci					if (rc)
4638c2ecf20Sopenharmony_ci						return rc;
4648c2ecf20Sopenharmony_ci				}
4658c2ecf20Sopenharmony_ci				if (!tx_queue->initialised)
4668c2ecf20Sopenharmony_ci					ef4_init_tx_queue(tx_queue);
4678c2ecf20Sopenharmony_ci				ef4_init_tx_queue_core_txq(tx_queue);
4688c2ecf20Sopenharmony_ci			}
4698c2ecf20Sopenharmony_ci		}
4708c2ecf20Sopenharmony_ci	} else {
4718c2ecf20Sopenharmony_ci		/* Reduce number of classes before number of queues */
4728c2ecf20Sopenharmony_ci		net_dev->num_tc = num_tc;
4738c2ecf20Sopenharmony_ci	}
4748c2ecf20Sopenharmony_ci
4758c2ecf20Sopenharmony_ci	rc = netif_set_real_num_tx_queues(net_dev,
4768c2ecf20Sopenharmony_ci					  max_t(int, num_tc, 1) *
4778c2ecf20Sopenharmony_ci					  efx->n_tx_channels);
4788c2ecf20Sopenharmony_ci	if (rc)
4798c2ecf20Sopenharmony_ci		return rc;
4808c2ecf20Sopenharmony_ci
4818c2ecf20Sopenharmony_ci	/* Do not destroy high-priority queues when they become
4828c2ecf20Sopenharmony_ci	 * unused.  We would have to flush them first, and it is
4838c2ecf20Sopenharmony_ci	 * fairly difficult to flush a subset of TX queues.  Leave
4848c2ecf20Sopenharmony_ci	 * it to ef4_fini_channels().
4858c2ecf20Sopenharmony_ci	 */
4868c2ecf20Sopenharmony_ci
4878c2ecf20Sopenharmony_ci	net_dev->num_tc = num_tc;
4888c2ecf20Sopenharmony_ci	return 0;
4898c2ecf20Sopenharmony_ci}
4908c2ecf20Sopenharmony_ci
4918c2ecf20Sopenharmony_civoid ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index)
4928c2ecf20Sopenharmony_ci{
4938c2ecf20Sopenharmony_ci	unsigned fill_level;
4948c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
4958c2ecf20Sopenharmony_ci	struct ef4_tx_queue *txq2;
4968c2ecf20Sopenharmony_ci	unsigned int pkts_compl = 0, bytes_compl = 0;
4978c2ecf20Sopenharmony_ci
4988c2ecf20Sopenharmony_ci	EF4_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
4998c2ecf20Sopenharmony_ci
5008c2ecf20Sopenharmony_ci	ef4_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
5018c2ecf20Sopenharmony_ci	tx_queue->pkts_compl += pkts_compl;
5028c2ecf20Sopenharmony_ci	tx_queue->bytes_compl += bytes_compl;
5038c2ecf20Sopenharmony_ci
5048c2ecf20Sopenharmony_ci	if (pkts_compl > 1)
5058c2ecf20Sopenharmony_ci		++tx_queue->merge_events;
5068c2ecf20Sopenharmony_ci
5078c2ecf20Sopenharmony_ci	/* See if we need to restart the netif queue.  This memory
5088c2ecf20Sopenharmony_ci	 * barrier ensures that we write read_count (inside
5098c2ecf20Sopenharmony_ci	 * ef4_dequeue_buffers()) before reading the queue status.
5108c2ecf20Sopenharmony_ci	 */
5118c2ecf20Sopenharmony_ci	smp_mb();
5128c2ecf20Sopenharmony_ci	if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
5138c2ecf20Sopenharmony_ci	    likely(efx->port_enabled) &&
5148c2ecf20Sopenharmony_ci	    likely(netif_device_present(efx->net_dev))) {
5158c2ecf20Sopenharmony_ci		txq2 = ef4_tx_queue_partner(tx_queue);
5168c2ecf20Sopenharmony_ci		fill_level = max(tx_queue->insert_count - tx_queue->read_count,
5178c2ecf20Sopenharmony_ci				 txq2->insert_count - txq2->read_count);
5188c2ecf20Sopenharmony_ci		if (fill_level <= efx->txq_wake_thresh)
5198c2ecf20Sopenharmony_ci			netif_tx_wake_queue(tx_queue->core_txq);
5208c2ecf20Sopenharmony_ci	}
5218c2ecf20Sopenharmony_ci
5228c2ecf20Sopenharmony_ci	/* Check whether the hardware queue is now empty */
5238c2ecf20Sopenharmony_ci	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
5248c2ecf20Sopenharmony_ci		tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
5258c2ecf20Sopenharmony_ci		if (tx_queue->read_count == tx_queue->old_write_count) {
5268c2ecf20Sopenharmony_ci			smp_mb();
5278c2ecf20Sopenharmony_ci			tx_queue->empty_read_count =
5288c2ecf20Sopenharmony_ci				tx_queue->read_count | EF4_EMPTY_COUNT_VALID;
5298c2ecf20Sopenharmony_ci		}
5308c2ecf20Sopenharmony_ci	}
5318c2ecf20Sopenharmony_ci}
5328c2ecf20Sopenharmony_ci
5338c2ecf20Sopenharmony_cistatic unsigned int ef4_tx_cb_page_count(struct ef4_tx_queue *tx_queue)
5348c2ecf20Sopenharmony_ci{
5358c2ecf20Sopenharmony_ci	return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EF4_TX_CB_ORDER);
5368c2ecf20Sopenharmony_ci}
5378c2ecf20Sopenharmony_ci
5388c2ecf20Sopenharmony_ciint ef4_probe_tx_queue(struct ef4_tx_queue *tx_queue)
5398c2ecf20Sopenharmony_ci{
5408c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
5418c2ecf20Sopenharmony_ci	unsigned int entries;
5428c2ecf20Sopenharmony_ci	int rc;
5438c2ecf20Sopenharmony_ci
5448c2ecf20Sopenharmony_ci	/* Create the smallest power-of-two aligned ring */
5458c2ecf20Sopenharmony_ci	entries = max(roundup_pow_of_two(efx->txq_entries), EF4_MIN_DMAQ_SIZE);
5468c2ecf20Sopenharmony_ci	EF4_BUG_ON_PARANOID(entries > EF4_MAX_DMAQ_SIZE);
5478c2ecf20Sopenharmony_ci	tx_queue->ptr_mask = entries - 1;
5488c2ecf20Sopenharmony_ci
5498c2ecf20Sopenharmony_ci	netif_dbg(efx, probe, efx->net_dev,
5508c2ecf20Sopenharmony_ci		  "creating TX queue %d size %#x mask %#x\n",
5518c2ecf20Sopenharmony_ci		  tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
5528c2ecf20Sopenharmony_ci
5538c2ecf20Sopenharmony_ci	/* Allocate software ring */
5548c2ecf20Sopenharmony_ci	tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
5558c2ecf20Sopenharmony_ci				   GFP_KERNEL);
5568c2ecf20Sopenharmony_ci	if (!tx_queue->buffer)
5578c2ecf20Sopenharmony_ci		return -ENOMEM;
5588c2ecf20Sopenharmony_ci
5598c2ecf20Sopenharmony_ci	tx_queue->cb_page = kcalloc(ef4_tx_cb_page_count(tx_queue),
5608c2ecf20Sopenharmony_ci				    sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
5618c2ecf20Sopenharmony_ci	if (!tx_queue->cb_page) {
5628c2ecf20Sopenharmony_ci		rc = -ENOMEM;
5638c2ecf20Sopenharmony_ci		goto fail1;
5648c2ecf20Sopenharmony_ci	}
5658c2ecf20Sopenharmony_ci
5668c2ecf20Sopenharmony_ci	/* Allocate hardware ring */
5678c2ecf20Sopenharmony_ci	rc = ef4_nic_probe_tx(tx_queue);
5688c2ecf20Sopenharmony_ci	if (rc)
5698c2ecf20Sopenharmony_ci		goto fail2;
5708c2ecf20Sopenharmony_ci
5718c2ecf20Sopenharmony_ci	return 0;
5728c2ecf20Sopenharmony_ci
5738c2ecf20Sopenharmony_cifail2:
5748c2ecf20Sopenharmony_ci	kfree(tx_queue->cb_page);
5758c2ecf20Sopenharmony_ci	tx_queue->cb_page = NULL;
5768c2ecf20Sopenharmony_cifail1:
5778c2ecf20Sopenharmony_ci	kfree(tx_queue->buffer);
5788c2ecf20Sopenharmony_ci	tx_queue->buffer = NULL;
5798c2ecf20Sopenharmony_ci	return rc;
5808c2ecf20Sopenharmony_ci}
5818c2ecf20Sopenharmony_ci
5828c2ecf20Sopenharmony_civoid ef4_init_tx_queue(struct ef4_tx_queue *tx_queue)
5838c2ecf20Sopenharmony_ci{
5848c2ecf20Sopenharmony_ci	struct ef4_nic *efx = tx_queue->efx;
5858c2ecf20Sopenharmony_ci
5868c2ecf20Sopenharmony_ci	netif_dbg(efx, drv, efx->net_dev,
5878c2ecf20Sopenharmony_ci		  "initialising TX queue %d\n", tx_queue->queue);
5888c2ecf20Sopenharmony_ci
5898c2ecf20Sopenharmony_ci	tx_queue->insert_count = 0;
5908c2ecf20Sopenharmony_ci	tx_queue->write_count = 0;
5918c2ecf20Sopenharmony_ci	tx_queue->old_write_count = 0;
5928c2ecf20Sopenharmony_ci	tx_queue->read_count = 0;
5938c2ecf20Sopenharmony_ci	tx_queue->old_read_count = 0;
5948c2ecf20Sopenharmony_ci	tx_queue->empty_read_count = 0 | EF4_EMPTY_COUNT_VALID;
5958c2ecf20Sopenharmony_ci	tx_queue->xmit_more_available = false;
5968c2ecf20Sopenharmony_ci
5978c2ecf20Sopenharmony_ci	/* Some older hardware requires Tx writes larger than 32. */
5988c2ecf20Sopenharmony_ci	tx_queue->tx_min_size = EF4_WORKAROUND_15592(efx) ? 33 : 0;
5998c2ecf20Sopenharmony_ci
6008c2ecf20Sopenharmony_ci	/* Set up TX descriptor ring */
6018c2ecf20Sopenharmony_ci	ef4_nic_init_tx(tx_queue);
6028c2ecf20Sopenharmony_ci
6038c2ecf20Sopenharmony_ci	tx_queue->initialised = true;
6048c2ecf20Sopenharmony_ci}
6058c2ecf20Sopenharmony_ci
6068c2ecf20Sopenharmony_civoid ef4_fini_tx_queue(struct ef4_tx_queue *tx_queue)
6078c2ecf20Sopenharmony_ci{
6088c2ecf20Sopenharmony_ci	struct ef4_tx_buffer *buffer;
6098c2ecf20Sopenharmony_ci
6108c2ecf20Sopenharmony_ci	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
6118c2ecf20Sopenharmony_ci		  "shutting down TX queue %d\n", tx_queue->queue);
6128c2ecf20Sopenharmony_ci
6138c2ecf20Sopenharmony_ci	if (!tx_queue->buffer)
6148c2ecf20Sopenharmony_ci		return;
6158c2ecf20Sopenharmony_ci
6168c2ecf20Sopenharmony_ci	/* Free any buffers left in the ring */
6178c2ecf20Sopenharmony_ci	while (tx_queue->read_count != tx_queue->write_count) {
6188c2ecf20Sopenharmony_ci		unsigned int pkts_compl = 0, bytes_compl = 0;
6198c2ecf20Sopenharmony_ci		buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
6208c2ecf20Sopenharmony_ci		ef4_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
6218c2ecf20Sopenharmony_ci
6228c2ecf20Sopenharmony_ci		++tx_queue->read_count;
6238c2ecf20Sopenharmony_ci	}
6248c2ecf20Sopenharmony_ci	tx_queue->xmit_more_available = false;
6258c2ecf20Sopenharmony_ci	netdev_tx_reset_queue(tx_queue->core_txq);
6268c2ecf20Sopenharmony_ci}
6278c2ecf20Sopenharmony_ci
6288c2ecf20Sopenharmony_civoid ef4_remove_tx_queue(struct ef4_tx_queue *tx_queue)
6298c2ecf20Sopenharmony_ci{
6308c2ecf20Sopenharmony_ci	int i;
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_ci	if (!tx_queue->buffer)
6338c2ecf20Sopenharmony_ci		return;
6348c2ecf20Sopenharmony_ci
6358c2ecf20Sopenharmony_ci	netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
6368c2ecf20Sopenharmony_ci		  "destroying TX queue %d\n", tx_queue->queue);
6378c2ecf20Sopenharmony_ci	ef4_nic_remove_tx(tx_queue);
6388c2ecf20Sopenharmony_ci
6398c2ecf20Sopenharmony_ci	if (tx_queue->cb_page) {
6408c2ecf20Sopenharmony_ci		for (i = 0; i < ef4_tx_cb_page_count(tx_queue); i++)
6418c2ecf20Sopenharmony_ci			ef4_nic_free_buffer(tx_queue->efx,
6428c2ecf20Sopenharmony_ci					    &tx_queue->cb_page[i]);
6438c2ecf20Sopenharmony_ci		kfree(tx_queue->cb_page);
6448c2ecf20Sopenharmony_ci		tx_queue->cb_page = NULL;
6458c2ecf20Sopenharmony_ci	}
6468c2ecf20Sopenharmony_ci
6478c2ecf20Sopenharmony_ci	kfree(tx_queue->buffer);
6488c2ecf20Sopenharmony_ci	tx_queue->buffer = NULL;
6498c2ecf20Sopenharmony_ci}
650