162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/**************************************************************************** 362306a36Sopenharmony_ci * Driver for Solarflare network controllers and boards 462306a36Sopenharmony_ci * Copyright 2005-2006 Fen Systems Ltd. 562306a36Sopenharmony_ci * Copyright 2005-2013 Solarflare Communications Inc. 662306a36Sopenharmony_ci */ 762306a36Sopenharmony_ci 862306a36Sopenharmony_ci#include <linux/pci.h> 962306a36Sopenharmony_ci#include <linux/tcp.h> 1062306a36Sopenharmony_ci#include <linux/ip.h> 1162306a36Sopenharmony_ci#include <linux/in.h> 1262306a36Sopenharmony_ci#include <linux/ipv6.h> 1362306a36Sopenharmony_ci#include <linux/slab.h> 1462306a36Sopenharmony_ci#include <net/ipv6.h> 1562306a36Sopenharmony_ci#include <linux/if_ether.h> 1662306a36Sopenharmony_ci#include <linux/highmem.h> 1762306a36Sopenharmony_ci#include <linux/cache.h> 1862306a36Sopenharmony_ci#include "net_driver.h" 1962306a36Sopenharmony_ci#include "efx.h" 2062306a36Sopenharmony_ci#include "io.h" 2162306a36Sopenharmony_ci#include "nic.h" 2262306a36Sopenharmony_ci#include "tx.h" 2362306a36Sopenharmony_ci#include "tx_common.h" 2462306a36Sopenharmony_ci#include "workarounds.h" 2562306a36Sopenharmony_ci 2662306a36Sopenharmony_cistatic inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, 2762306a36Sopenharmony_ci struct efx_tx_buffer *buffer) 2862306a36Sopenharmony_ci{ 2962306a36Sopenharmony_ci unsigned int index = efx_tx_queue_get_insert_index(tx_queue); 3062306a36Sopenharmony_ci struct efx_buffer *page_buf = 3162306a36Sopenharmony_ci &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; 3262306a36Sopenharmony_ci unsigned int offset = 3362306a36Sopenharmony_ci ((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1); 3462306a36Sopenharmony_ci 3562306a36Sopenharmony_ci if (unlikely(!page_buf->addr) && 3662306a36Sopenharmony_ci efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, 3762306a36Sopenharmony_ci GFP_ATOMIC)) 3862306a36Sopenharmony_ci return NULL; 3962306a36Sopenharmony_ci buffer->dma_addr = page_buf->dma_addr + offset; 4062306a36Sopenharmony_ci buffer->unmap_len = 0; 4162306a36Sopenharmony_ci return (u8 *)page_buf->addr + offset; 4262306a36Sopenharmony_ci} 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_cistatic void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) 4562306a36Sopenharmony_ci{ 4662306a36Sopenharmony_ci /* We need to consider all queues that the net core sees as one */ 4762306a36Sopenharmony_ci struct efx_nic *efx = txq1->efx; 4862306a36Sopenharmony_ci struct efx_tx_queue *txq2; 4962306a36Sopenharmony_ci unsigned int fill_level; 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_ci fill_level = efx_channel_tx_old_fill_level(txq1->channel); 5262306a36Sopenharmony_ci if (likely(fill_level < efx->txq_stop_thresh)) 5362306a36Sopenharmony_ci return; 5462306a36Sopenharmony_ci 5562306a36Sopenharmony_ci /* We used the stale old_read_count above, which gives us a 5662306a36Sopenharmony_ci * pessimistic estimate of the fill level (which may even 5762306a36Sopenharmony_ci * validly be >= efx->txq_entries). Now try again using 5862306a36Sopenharmony_ci * read_count (more likely to be a cache miss). 5962306a36Sopenharmony_ci * 6062306a36Sopenharmony_ci * If we read read_count and then conditionally stop the 6162306a36Sopenharmony_ci * queue, it is possible for the completion path to race with 6262306a36Sopenharmony_ci * us and complete all outstanding descriptors in the middle, 6362306a36Sopenharmony_ci * after which there will be no more completions to wake it. 6462306a36Sopenharmony_ci * Therefore we stop the queue first, then read read_count 6562306a36Sopenharmony_ci * (with a memory barrier to ensure the ordering), then 6662306a36Sopenharmony_ci * restart the queue if the fill level turns out to be low 6762306a36Sopenharmony_ci * enough. 6862306a36Sopenharmony_ci */ 6962306a36Sopenharmony_ci netif_tx_stop_queue(txq1->core_txq); 7062306a36Sopenharmony_ci smp_mb(); 7162306a36Sopenharmony_ci efx_for_each_channel_tx_queue(txq2, txq1->channel) 7262306a36Sopenharmony_ci txq2->old_read_count = READ_ONCE(txq2->read_count); 7362306a36Sopenharmony_ci 7462306a36Sopenharmony_ci fill_level = efx_channel_tx_old_fill_level(txq1->channel); 7562306a36Sopenharmony_ci EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries); 7662306a36Sopenharmony_ci if (likely(fill_level < efx->txq_stop_thresh)) { 7762306a36Sopenharmony_ci smp_mb(); 7862306a36Sopenharmony_ci if (likely(!efx->loopback_selftest)) 7962306a36Sopenharmony_ci netif_tx_start_queue(txq1->core_txq); 8062306a36Sopenharmony_ci } 8162306a36Sopenharmony_ci} 8262306a36Sopenharmony_ci 8362306a36Sopenharmony_cistatic int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, 8462306a36Sopenharmony_ci struct sk_buff *skb) 8562306a36Sopenharmony_ci{ 8662306a36Sopenharmony_ci unsigned int copy_len = skb->len; 8762306a36Sopenharmony_ci struct efx_tx_buffer *buffer; 8862306a36Sopenharmony_ci u8 *copy_buffer; 8962306a36Sopenharmony_ci int rc; 9062306a36Sopenharmony_ci 9162306a36Sopenharmony_ci EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE); 9262306a36Sopenharmony_ci 9362306a36Sopenharmony_ci buffer = efx_tx_queue_get_insert_buffer(tx_queue); 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); 9662306a36Sopenharmony_ci if (unlikely(!copy_buffer)) 9762306a36Sopenharmony_ci return -ENOMEM; 9862306a36Sopenharmony_ci 9962306a36Sopenharmony_ci rc = skb_copy_bits(skb, 0, copy_buffer, copy_len); 10062306a36Sopenharmony_ci EFX_WARN_ON_PARANOID(rc); 10162306a36Sopenharmony_ci buffer->len = copy_len; 10262306a36Sopenharmony_ci 10362306a36Sopenharmony_ci buffer->skb = skb; 10462306a36Sopenharmony_ci buffer->flags = EFX_TX_BUF_SKB; 10562306a36Sopenharmony_ci 10662306a36Sopenharmony_ci ++tx_queue->insert_count; 10762306a36Sopenharmony_ci return rc; 10862306a36Sopenharmony_ci} 10962306a36Sopenharmony_ci 11062306a36Sopenharmony_ci/* Send any pending traffic for a channel. xmit_more is shared across all 11162306a36Sopenharmony_ci * queues for a channel, so we must check all of them. 11262306a36Sopenharmony_ci */ 11362306a36Sopenharmony_cistatic void efx_tx_send_pending(struct efx_channel *channel) 11462306a36Sopenharmony_ci{ 11562306a36Sopenharmony_ci struct efx_tx_queue *q; 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci efx_for_each_channel_tx_queue(q, channel) { 11862306a36Sopenharmony_ci if (q->xmit_pending) 11962306a36Sopenharmony_ci efx_nic_push_buffers(q); 12062306a36Sopenharmony_ci } 12162306a36Sopenharmony_ci} 12262306a36Sopenharmony_ci 12362306a36Sopenharmony_ci/* 12462306a36Sopenharmony_ci * Add a socket buffer to a TX queue 12562306a36Sopenharmony_ci * 12662306a36Sopenharmony_ci * This maps all fragments of a socket buffer for DMA and adds them to 12762306a36Sopenharmony_ci * the TX queue. The queue's insert pointer will be incremented by 12862306a36Sopenharmony_ci * the number of fragments in the socket buffer. 12962306a36Sopenharmony_ci * 13062306a36Sopenharmony_ci * If any DMA mapping fails, any mapped fragments will be unmapped, 13162306a36Sopenharmony_ci * the queue's insert pointer will be restored to its original value. 13262306a36Sopenharmony_ci * 13362306a36Sopenharmony_ci * This function is split out from efx_siena_hard_start_xmit to allow the 13462306a36Sopenharmony_ci * loopback test to direct packets via specific TX queues. 13562306a36Sopenharmony_ci * 13662306a36Sopenharmony_ci * Returns NETDEV_TX_OK. 13762306a36Sopenharmony_ci * You must hold netif_tx_lock() to call this function. 13862306a36Sopenharmony_ci */ 13962306a36Sopenharmony_cinetdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue, 14062306a36Sopenharmony_ci struct sk_buff *skb) 14162306a36Sopenharmony_ci{ 14262306a36Sopenharmony_ci unsigned int old_insert_count = tx_queue->insert_count; 14362306a36Sopenharmony_ci bool xmit_more = netdev_xmit_more(); 14462306a36Sopenharmony_ci bool data_mapped = false; 14562306a36Sopenharmony_ci unsigned int segments; 14662306a36Sopenharmony_ci unsigned int skb_len; 14762306a36Sopenharmony_ci int rc; 14862306a36Sopenharmony_ci 14962306a36Sopenharmony_ci skb_len = skb->len; 15062306a36Sopenharmony_ci segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0; 15162306a36Sopenharmony_ci if (segments == 1) 15262306a36Sopenharmony_ci segments = 0; /* Don't use TSO for a single segment. */ 15362306a36Sopenharmony_ci 15462306a36Sopenharmony_ci /* Handle TSO first - it's *possible* (although unlikely) that we might 15562306a36Sopenharmony_ci * be passed a packet to segment that's smaller than the copybreak/PIO 15662306a36Sopenharmony_ci * size limit. 15762306a36Sopenharmony_ci */ 15862306a36Sopenharmony_ci if (segments) { 15962306a36Sopenharmony_ci rc = efx_siena_tx_tso_fallback(tx_queue, skb); 16062306a36Sopenharmony_ci tx_queue->tso_fallbacks++; 16162306a36Sopenharmony_ci if (rc == 0) 16262306a36Sopenharmony_ci return 0; 16362306a36Sopenharmony_ci goto err; 16462306a36Sopenharmony_ci } else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) { 16562306a36Sopenharmony_ci /* Pad short packets or coalesce short fragmented packets. */ 16662306a36Sopenharmony_ci if (efx_enqueue_skb_copy(tx_queue, skb)) 16762306a36Sopenharmony_ci goto err; 16862306a36Sopenharmony_ci tx_queue->cb_packets++; 16962306a36Sopenharmony_ci data_mapped = true; 17062306a36Sopenharmony_ci } 17162306a36Sopenharmony_ci 17262306a36Sopenharmony_ci /* Map for DMA and create descriptors if we haven't done so already. */ 17362306a36Sopenharmony_ci if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments))) 17462306a36Sopenharmony_ci goto err; 17562306a36Sopenharmony_ci 17662306a36Sopenharmony_ci efx_tx_maybe_stop_queue(tx_queue); 17762306a36Sopenharmony_ci 17862306a36Sopenharmony_ci tx_queue->xmit_pending = true; 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_ci /* Pass off to hardware */ 18162306a36Sopenharmony_ci if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) 18262306a36Sopenharmony_ci efx_tx_send_pending(tx_queue->channel); 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci tx_queue->tx_packets++; 18562306a36Sopenharmony_ci return NETDEV_TX_OK; 18662306a36Sopenharmony_ci 18762306a36Sopenharmony_ci 18862306a36Sopenharmony_cierr: 18962306a36Sopenharmony_ci efx_siena_enqueue_unwind(tx_queue, old_insert_count); 19062306a36Sopenharmony_ci dev_kfree_skb_any(skb); 19162306a36Sopenharmony_ci 19262306a36Sopenharmony_ci /* If we're not expecting another transmit and we had something to push 19362306a36Sopenharmony_ci * on this queue or a partner queue then we need to push here to get the 19462306a36Sopenharmony_ci * previous packets out. 19562306a36Sopenharmony_ci */ 19662306a36Sopenharmony_ci if (!xmit_more) 19762306a36Sopenharmony_ci efx_tx_send_pending(tx_queue->channel); 19862306a36Sopenharmony_ci 19962306a36Sopenharmony_ci return NETDEV_TX_OK; 20062306a36Sopenharmony_ci} 20162306a36Sopenharmony_ci 20262306a36Sopenharmony_ci/* Transmit a packet from an XDP buffer 20362306a36Sopenharmony_ci * 20462306a36Sopenharmony_ci * Returns number of packets sent on success, error code otherwise. 20562306a36Sopenharmony_ci * Runs in NAPI context, either in our poll (for XDP TX) or a different NIC 20662306a36Sopenharmony_ci * (for XDP redirect). 20762306a36Sopenharmony_ci */ 20862306a36Sopenharmony_ciint efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs, 20962306a36Sopenharmony_ci bool flush) 21062306a36Sopenharmony_ci{ 21162306a36Sopenharmony_ci struct efx_tx_buffer *tx_buffer; 21262306a36Sopenharmony_ci struct efx_tx_queue *tx_queue; 21362306a36Sopenharmony_ci struct xdp_frame *xdpf; 21462306a36Sopenharmony_ci dma_addr_t dma_addr; 21562306a36Sopenharmony_ci unsigned int len; 21662306a36Sopenharmony_ci int space; 21762306a36Sopenharmony_ci int cpu; 21862306a36Sopenharmony_ci int i = 0; 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci if (unlikely(n && !xdpfs)) 22162306a36Sopenharmony_ci return -EINVAL; 22262306a36Sopenharmony_ci if (unlikely(!n)) 22362306a36Sopenharmony_ci return 0; 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_ci cpu = raw_smp_processor_id(); 22662306a36Sopenharmony_ci if (unlikely(cpu >= efx->xdp_tx_queue_count)) 22762306a36Sopenharmony_ci return -EINVAL; 22862306a36Sopenharmony_ci 22962306a36Sopenharmony_ci tx_queue = efx->xdp_tx_queues[cpu]; 23062306a36Sopenharmony_ci if (unlikely(!tx_queue)) 23162306a36Sopenharmony_ci return -EINVAL; 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_ci if (!tx_queue->initialised) 23462306a36Sopenharmony_ci return -EINVAL; 23562306a36Sopenharmony_ci 23662306a36Sopenharmony_ci if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) 23762306a36Sopenharmony_ci HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu); 23862306a36Sopenharmony_ci 23962306a36Sopenharmony_ci /* If we're borrowing net stack queues we have to handle stop-restart 24062306a36Sopenharmony_ci * or we might block the queue and it will be considered as frozen 24162306a36Sopenharmony_ci */ 24262306a36Sopenharmony_ci if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) { 24362306a36Sopenharmony_ci if (netif_tx_queue_stopped(tx_queue->core_txq)) 24462306a36Sopenharmony_ci goto unlock; 24562306a36Sopenharmony_ci efx_tx_maybe_stop_queue(tx_queue); 24662306a36Sopenharmony_ci } 24762306a36Sopenharmony_ci 24862306a36Sopenharmony_ci /* Check for available space. We should never need multiple 24962306a36Sopenharmony_ci * descriptors per frame. 25062306a36Sopenharmony_ci */ 25162306a36Sopenharmony_ci space = efx->txq_entries + 25262306a36Sopenharmony_ci tx_queue->read_count - tx_queue->insert_count; 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci for (i = 0; i < n; i++) { 25562306a36Sopenharmony_ci xdpf = xdpfs[i]; 25662306a36Sopenharmony_ci 25762306a36Sopenharmony_ci if (i >= space) 25862306a36Sopenharmony_ci break; 25962306a36Sopenharmony_ci 26062306a36Sopenharmony_ci /* We'll want a descriptor for this tx. */ 26162306a36Sopenharmony_ci prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue)); 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ci len = xdpf->len; 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci /* Map for DMA. */ 26662306a36Sopenharmony_ci dma_addr = dma_map_single(&efx->pci_dev->dev, 26762306a36Sopenharmony_ci xdpf->data, len, 26862306a36Sopenharmony_ci DMA_TO_DEVICE); 26962306a36Sopenharmony_ci if (dma_mapping_error(&efx->pci_dev->dev, dma_addr)) 27062306a36Sopenharmony_ci break; 27162306a36Sopenharmony_ci 27262306a36Sopenharmony_ci /* Create descriptor and set up for unmapping DMA. */ 27362306a36Sopenharmony_ci tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); 27462306a36Sopenharmony_ci tx_buffer->xdpf = xdpf; 27562306a36Sopenharmony_ci tx_buffer->flags = EFX_TX_BUF_XDP | 27662306a36Sopenharmony_ci EFX_TX_BUF_MAP_SINGLE; 27762306a36Sopenharmony_ci tx_buffer->dma_offset = 0; 27862306a36Sopenharmony_ci tx_buffer->unmap_len = len; 27962306a36Sopenharmony_ci tx_queue->tx_packets++; 28062306a36Sopenharmony_ci } 28162306a36Sopenharmony_ci 28262306a36Sopenharmony_ci /* Pass mapped frames to hardware. */ 28362306a36Sopenharmony_ci if (flush && i > 0) 28462306a36Sopenharmony_ci efx_nic_push_buffers(tx_queue); 28562306a36Sopenharmony_ci 28662306a36Sopenharmony_ciunlock: 28762306a36Sopenharmony_ci if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED) 28862306a36Sopenharmony_ci HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq); 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci return i == 0 ? -EIO : i; 29162306a36Sopenharmony_ci} 29262306a36Sopenharmony_ci 29362306a36Sopenharmony_ci/* Initiate a packet transmission. We use one channel per CPU 29462306a36Sopenharmony_ci * (sharing when we have more CPUs than channels). 29562306a36Sopenharmony_ci * 29662306a36Sopenharmony_ci * Context: non-blocking. 29762306a36Sopenharmony_ci * Should always return NETDEV_TX_OK and consume the skb. 29862306a36Sopenharmony_ci */ 29962306a36Sopenharmony_cinetdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb, 30062306a36Sopenharmony_ci struct net_device *net_dev) 30162306a36Sopenharmony_ci{ 30262306a36Sopenharmony_ci struct efx_nic *efx = netdev_priv(net_dev); 30362306a36Sopenharmony_ci struct efx_tx_queue *tx_queue; 30462306a36Sopenharmony_ci unsigned index, type; 30562306a36Sopenharmony_ci 30662306a36Sopenharmony_ci EFX_WARN_ON_PARANOID(!netif_device_present(net_dev)); 30762306a36Sopenharmony_ci 30862306a36Sopenharmony_ci index = skb_get_queue_mapping(skb); 30962306a36Sopenharmony_ci type = efx_tx_csum_type_skb(skb); 31062306a36Sopenharmony_ci if (index >= efx->n_tx_channels) { 31162306a36Sopenharmony_ci index -= efx->n_tx_channels; 31262306a36Sopenharmony_ci type |= EFX_TXQ_TYPE_HIGHPRI; 31362306a36Sopenharmony_ci } 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci /* PTP "event" packet */ 31662306a36Sopenharmony_ci if (unlikely(efx_xmit_with_hwtstamp(skb)) && 31762306a36Sopenharmony_ci ((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) || 31862306a36Sopenharmony_ci unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) { 31962306a36Sopenharmony_ci /* There may be existing transmits on the channel that are 32062306a36Sopenharmony_ci * waiting for this packet to trigger the doorbell write. 32162306a36Sopenharmony_ci * We need to send the packets at this point. 32262306a36Sopenharmony_ci */ 32362306a36Sopenharmony_ci efx_tx_send_pending(efx_get_tx_channel(efx, index)); 32462306a36Sopenharmony_ci return efx_siena_ptp_tx(efx, skb); 32562306a36Sopenharmony_ci } 32662306a36Sopenharmony_ci 32762306a36Sopenharmony_ci tx_queue = efx_get_tx_queue(efx, index, type); 32862306a36Sopenharmony_ci if (WARN_ON_ONCE(!tx_queue)) { 32962306a36Sopenharmony_ci /* We don't have a TXQ of the right type. 33062306a36Sopenharmony_ci * This should never happen, as we don't advertise offload 33162306a36Sopenharmony_ci * features unless we can support them. 33262306a36Sopenharmony_ci */ 33362306a36Sopenharmony_ci dev_kfree_skb_any(skb); 33462306a36Sopenharmony_ci /* If we're not expecting another transmit and we had something to push 33562306a36Sopenharmony_ci * on this queue or a partner queue then we need to push here to get the 33662306a36Sopenharmony_ci * previous packets out. 33762306a36Sopenharmony_ci */ 33862306a36Sopenharmony_ci if (!netdev_xmit_more()) 33962306a36Sopenharmony_ci efx_tx_send_pending(efx_get_tx_channel(efx, index)); 34062306a36Sopenharmony_ci return NETDEV_TX_OK; 34162306a36Sopenharmony_ci } 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_ci return __efx_siena_enqueue_skb(tx_queue, skb); 34462306a36Sopenharmony_ci} 34562306a36Sopenharmony_ci 34662306a36Sopenharmony_civoid efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) 34762306a36Sopenharmony_ci{ 34862306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci /* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */ 35162306a36Sopenharmony_ci tx_queue->core_txq = 35262306a36Sopenharmony_ci netdev_get_tx_queue(efx->net_dev, 35362306a36Sopenharmony_ci tx_queue->channel->channel + 35462306a36Sopenharmony_ci ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ? 35562306a36Sopenharmony_ci efx->n_tx_channels : 0)); 35662306a36Sopenharmony_ci} 35762306a36Sopenharmony_ci 35862306a36Sopenharmony_ciint efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type, 35962306a36Sopenharmony_ci void *type_data) 36062306a36Sopenharmony_ci{ 36162306a36Sopenharmony_ci struct efx_nic *efx = netdev_priv(net_dev); 36262306a36Sopenharmony_ci struct tc_mqprio_qopt *mqprio = type_data; 36362306a36Sopenharmony_ci unsigned tc, num_tc; 36462306a36Sopenharmony_ci 36562306a36Sopenharmony_ci if (type != TC_SETUP_QDISC_MQPRIO) 36662306a36Sopenharmony_ci return -EOPNOTSUPP; 36762306a36Sopenharmony_ci 36862306a36Sopenharmony_ci /* Only Siena supported highpri queues */ 36962306a36Sopenharmony_ci if (efx_nic_rev(efx) > EFX_REV_SIENA_A0) 37062306a36Sopenharmony_ci return -EOPNOTSUPP; 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_ci num_tc = mqprio->num_tc; 37362306a36Sopenharmony_ci 37462306a36Sopenharmony_ci if (num_tc > EFX_MAX_TX_TC) 37562306a36Sopenharmony_ci return -EINVAL; 37662306a36Sopenharmony_ci 37762306a36Sopenharmony_ci mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 37862306a36Sopenharmony_ci 37962306a36Sopenharmony_ci if (num_tc == net_dev->num_tc) 38062306a36Sopenharmony_ci return 0; 38162306a36Sopenharmony_ci 38262306a36Sopenharmony_ci for (tc = 0; tc < num_tc; tc++) { 38362306a36Sopenharmony_ci net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels; 38462306a36Sopenharmony_ci net_dev->tc_to_txq[tc].count = efx->n_tx_channels; 38562306a36Sopenharmony_ci } 38662306a36Sopenharmony_ci 38762306a36Sopenharmony_ci net_dev->num_tc = num_tc; 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci return netif_set_real_num_tx_queues(net_dev, 39062306a36Sopenharmony_ci max_t(int, num_tc, 1) * 39162306a36Sopenharmony_ci efx->n_tx_channels); 39262306a36Sopenharmony_ci} 393