162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/**************************************************************************** 362306a36Sopenharmony_ci * Driver for Solarflare network controllers and boards 462306a36Sopenharmony_ci * Copyright 2018 Solarflare Communications Inc. 562306a36Sopenharmony_ci * 662306a36Sopenharmony_ci * This program is free software; you can redistribute it and/or modify it 762306a36Sopenharmony_ci * under the terms of the GNU General Public License version 2 as published 862306a36Sopenharmony_ci * by the Free Software Foundation, incorporated herein by reference. 962306a36Sopenharmony_ci */ 1062306a36Sopenharmony_ci 1162306a36Sopenharmony_ci#include "net_driver.h" 1262306a36Sopenharmony_ci#include "efx.h" 1362306a36Sopenharmony_ci#include "nic_common.h" 1462306a36Sopenharmony_ci#include "tx_common.h" 1562306a36Sopenharmony_ci#include <net/gso.h> 1662306a36Sopenharmony_ci 1762306a36Sopenharmony_cistatic unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) 1862306a36Sopenharmony_ci{ 1962306a36Sopenharmony_ci return DIV_ROUND_UP(tx_queue->ptr_mask + 1, 2062306a36Sopenharmony_ci PAGE_SIZE >> EFX_TX_CB_ORDER); 2162306a36Sopenharmony_ci} 2262306a36Sopenharmony_ci 2362306a36Sopenharmony_ciint efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue) 2462306a36Sopenharmony_ci{ 2562306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 2662306a36Sopenharmony_ci unsigned int entries; 2762306a36Sopenharmony_ci int rc; 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci /* Create the smallest power-of-two aligned ring */ 3062306a36Sopenharmony_ci entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE); 3162306a36Sopenharmony_ci EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE); 3262306a36Sopenharmony_ci tx_queue->ptr_mask = entries - 1; 3362306a36Sopenharmony_ci 3462306a36Sopenharmony_ci netif_dbg(efx, probe, efx->net_dev, 3562306a36Sopenharmony_ci "creating TX queue %d size %#x mask %#x\n", 3662306a36Sopenharmony_ci tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); 3762306a36Sopenharmony_ci 3862306a36Sopenharmony_ci /* Allocate software ring */ 3962306a36Sopenharmony_ci tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), 4062306a36Sopenharmony_ci GFP_KERNEL); 4162306a36Sopenharmony_ci if (!tx_queue->buffer) 4262306a36Sopenharmony_ci return -ENOMEM; 4362306a36Sopenharmony_ci 4462306a36Sopenharmony_ci tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), 4562306a36Sopenharmony_ci sizeof(tx_queue->cb_page[0]), GFP_KERNEL); 4662306a36Sopenharmony_ci if (!tx_queue->cb_page) { 4762306a36Sopenharmony_ci rc = -ENOMEM; 4862306a36Sopenharmony_ci goto fail1; 4962306a36Sopenharmony_ci } 5062306a36Sopenharmony_ci 5162306a36Sopenharmony_ci /* Allocate hardware ring, determine TXQ type */ 5262306a36Sopenharmony_ci rc = efx_nic_probe_tx(tx_queue); 5362306a36Sopenharmony_ci if (rc) 5462306a36Sopenharmony_ci goto fail2; 5562306a36Sopenharmony_ci 5662306a36Sopenharmony_ci tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue; 5762306a36Sopenharmony_ci return 0; 5862306a36Sopenharmony_ci 5962306a36Sopenharmony_cifail2: 6062306a36Sopenharmony_ci kfree(tx_queue->cb_page); 6162306a36Sopenharmony_ci tx_queue->cb_page = NULL; 6262306a36Sopenharmony_cifail1: 6362306a36Sopenharmony_ci kfree(tx_queue->buffer); 6462306a36Sopenharmony_ci tx_queue->buffer = NULL; 6562306a36Sopenharmony_ci return rc; 6662306a36Sopenharmony_ci} 6762306a36Sopenharmony_ci 6862306a36Sopenharmony_civoid efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue) 6962306a36Sopenharmony_ci{ 7062306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 7162306a36Sopenharmony_ci 7262306a36Sopenharmony_ci netif_dbg(efx, drv, efx->net_dev, 7362306a36Sopenharmony_ci "initialising TX queue %d\n", tx_queue->queue); 7462306a36Sopenharmony_ci 7562306a36Sopenharmony_ci tx_queue->insert_count = 0; 7662306a36Sopenharmony_ci tx_queue->notify_count = 0; 7762306a36Sopenharmony_ci tx_queue->write_count = 0; 7862306a36Sopenharmony_ci tx_queue->packet_write_count = 0; 7962306a36Sopenharmony_ci tx_queue->old_write_count = 0; 8062306a36Sopenharmony_ci tx_queue->read_count = 0; 8162306a36Sopenharmony_ci tx_queue->old_read_count = 0; 8262306a36Sopenharmony_ci tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; 8362306a36Sopenharmony_ci tx_queue->xmit_pending = false; 8462306a36Sopenharmony_ci tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) && 8562306a36Sopenharmony_ci tx_queue->channel == efx_siena_ptp_channel(efx)); 8662306a36Sopenharmony_ci tx_queue->completed_timestamp_major = 0; 8762306a36Sopenharmony_ci tx_queue->completed_timestamp_minor = 0; 8862306a36Sopenharmony_ci 8962306a36Sopenharmony_ci tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel); 9062306a36Sopenharmony_ci tx_queue->tso_version = 0; 9162306a36Sopenharmony_ci 9262306a36Sopenharmony_ci /* Set up TX descriptor ring */ 9362306a36Sopenharmony_ci efx_nic_init_tx(tx_queue); 9462306a36Sopenharmony_ci 9562306a36Sopenharmony_ci tx_queue->initialised = true; 9662306a36Sopenharmony_ci} 9762306a36Sopenharmony_ci 9862306a36Sopenharmony_civoid efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue) 9962306a36Sopenharmony_ci{ 10062306a36Sopenharmony_ci int i; 10162306a36Sopenharmony_ci 10262306a36Sopenharmony_ci if (!tx_queue->buffer) 10362306a36Sopenharmony_ci return; 10462306a36Sopenharmony_ci 10562306a36Sopenharmony_ci netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 10662306a36Sopenharmony_ci "destroying TX queue %d\n", tx_queue->queue); 10762306a36Sopenharmony_ci efx_nic_remove_tx(tx_queue); 10862306a36Sopenharmony_ci 10962306a36Sopenharmony_ci if (tx_queue->cb_page) { 11062306a36Sopenharmony_ci for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) 11162306a36Sopenharmony_ci efx_siena_free_buffer(tx_queue->efx, 11262306a36Sopenharmony_ci &tx_queue->cb_page[i]); 11362306a36Sopenharmony_ci kfree(tx_queue->cb_page); 11462306a36Sopenharmony_ci tx_queue->cb_page = NULL; 11562306a36Sopenharmony_ci } 11662306a36Sopenharmony_ci 11762306a36Sopenharmony_ci kfree(tx_queue->buffer); 11862306a36Sopenharmony_ci tx_queue->buffer = NULL; 11962306a36Sopenharmony_ci tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL; 12062306a36Sopenharmony_ci} 12162306a36Sopenharmony_ci 12262306a36Sopenharmony_cistatic void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 12362306a36Sopenharmony_ci struct efx_tx_buffer *buffer, 12462306a36Sopenharmony_ci unsigned int *pkts_compl, 12562306a36Sopenharmony_ci unsigned int *bytes_compl) 12662306a36Sopenharmony_ci{ 12762306a36Sopenharmony_ci if (buffer->unmap_len) { 12862306a36Sopenharmony_ci struct device *dma_dev = &tx_queue->efx->pci_dev->dev; 12962306a36Sopenharmony_ci dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset; 13062306a36Sopenharmony_ci 13162306a36Sopenharmony_ci if (buffer->flags & EFX_TX_BUF_MAP_SINGLE) 13262306a36Sopenharmony_ci dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len, 13362306a36Sopenharmony_ci DMA_TO_DEVICE); 13462306a36Sopenharmony_ci else 13562306a36Sopenharmony_ci dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len, 13662306a36Sopenharmony_ci DMA_TO_DEVICE); 13762306a36Sopenharmony_ci buffer->unmap_len = 0; 13862306a36Sopenharmony_ci } 13962306a36Sopenharmony_ci 14062306a36Sopenharmony_ci if (buffer->flags & EFX_TX_BUF_SKB) { 14162306a36Sopenharmony_ci struct sk_buff *skb = (struct sk_buff *)buffer->skb; 14262306a36Sopenharmony_ci 14362306a36Sopenharmony_ci EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl); 14462306a36Sopenharmony_ci (*pkts_compl)++; 14562306a36Sopenharmony_ci (*bytes_compl) += skb->len; 14662306a36Sopenharmony_ci if (tx_queue->timestamping && 14762306a36Sopenharmony_ci (tx_queue->completed_timestamp_major || 14862306a36Sopenharmony_ci tx_queue->completed_timestamp_minor)) { 14962306a36Sopenharmony_ci struct skb_shared_hwtstamps hwtstamp; 15062306a36Sopenharmony_ci 15162306a36Sopenharmony_ci hwtstamp.hwtstamp = 15262306a36Sopenharmony_ci efx_siena_ptp_nic_to_kernel_time(tx_queue); 15362306a36Sopenharmony_ci skb_tstamp_tx(skb, &hwtstamp); 15462306a36Sopenharmony_ci 15562306a36Sopenharmony_ci tx_queue->completed_timestamp_major = 0; 15662306a36Sopenharmony_ci tx_queue->completed_timestamp_minor = 0; 15762306a36Sopenharmony_ci } 15862306a36Sopenharmony_ci dev_consume_skb_any((struct sk_buff *)buffer->skb); 15962306a36Sopenharmony_ci netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, 16062306a36Sopenharmony_ci "TX queue %d transmission id %x complete\n", 16162306a36Sopenharmony_ci tx_queue->queue, tx_queue->read_count); 16262306a36Sopenharmony_ci } else if (buffer->flags & EFX_TX_BUF_XDP) { 16362306a36Sopenharmony_ci xdp_return_frame_rx_napi(buffer->xdpf); 16462306a36Sopenharmony_ci } 16562306a36Sopenharmony_ci 16662306a36Sopenharmony_ci buffer->len = 0; 16762306a36Sopenharmony_ci buffer->flags = 0; 16862306a36Sopenharmony_ci} 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_civoid efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue) 17162306a36Sopenharmony_ci{ 17262306a36Sopenharmony_ci struct efx_tx_buffer *buffer; 17362306a36Sopenharmony_ci 17462306a36Sopenharmony_ci netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, 17562306a36Sopenharmony_ci "shutting down TX queue %d\n", tx_queue->queue); 17662306a36Sopenharmony_ci 17762306a36Sopenharmony_ci if (!tx_queue->buffer) 17862306a36Sopenharmony_ci return; 17962306a36Sopenharmony_ci 18062306a36Sopenharmony_ci /* Free any buffers left in the ring */ 18162306a36Sopenharmony_ci while (tx_queue->read_count != tx_queue->write_count) { 18262306a36Sopenharmony_ci unsigned int pkts_compl = 0, bytes_compl = 0; 18362306a36Sopenharmony_ci 18462306a36Sopenharmony_ci buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; 18562306a36Sopenharmony_ci efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 18662306a36Sopenharmony_ci 18762306a36Sopenharmony_ci ++tx_queue->read_count; 18862306a36Sopenharmony_ci } 18962306a36Sopenharmony_ci tx_queue->xmit_pending = false; 19062306a36Sopenharmony_ci netdev_tx_reset_queue(tx_queue->core_txq); 19162306a36Sopenharmony_ci} 19262306a36Sopenharmony_ci 19362306a36Sopenharmony_ci/* Remove packets from the TX queue 19462306a36Sopenharmony_ci * 19562306a36Sopenharmony_ci * This removes packets from the TX queue, up to and including the 19662306a36Sopenharmony_ci * specified index. 19762306a36Sopenharmony_ci */ 19862306a36Sopenharmony_cistatic void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, 19962306a36Sopenharmony_ci unsigned int index, 20062306a36Sopenharmony_ci unsigned int *pkts_compl, 20162306a36Sopenharmony_ci unsigned int *bytes_compl) 20262306a36Sopenharmony_ci{ 20362306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 20462306a36Sopenharmony_ci unsigned int stop_index, read_ptr; 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci stop_index = (index + 1) & tx_queue->ptr_mask; 20762306a36Sopenharmony_ci read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 20862306a36Sopenharmony_ci 20962306a36Sopenharmony_ci while (read_ptr != stop_index) { 21062306a36Sopenharmony_ci struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 21162306a36Sopenharmony_ci 21262306a36Sopenharmony_ci if (!efx_tx_buffer_in_use(buffer)) { 21362306a36Sopenharmony_ci netif_err(efx, tx_err, efx->net_dev, 21462306a36Sopenharmony_ci "TX queue %d spurious TX completion id %d\n", 21562306a36Sopenharmony_ci tx_queue->queue, read_ptr); 21662306a36Sopenharmony_ci efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP); 21762306a36Sopenharmony_ci return; 21862306a36Sopenharmony_ci } 21962306a36Sopenharmony_ci 22062306a36Sopenharmony_ci efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); 22162306a36Sopenharmony_ci 22262306a36Sopenharmony_ci ++tx_queue->read_count; 22362306a36Sopenharmony_ci read_ptr = tx_queue->read_count & tx_queue->ptr_mask; 22462306a36Sopenharmony_ci } 22562306a36Sopenharmony_ci} 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_civoid efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue) 22862306a36Sopenharmony_ci{ 22962306a36Sopenharmony_ci if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { 23062306a36Sopenharmony_ci tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); 23162306a36Sopenharmony_ci if (tx_queue->read_count == tx_queue->old_write_count) { 23262306a36Sopenharmony_ci /* Ensure that read_count is flushed. */ 23362306a36Sopenharmony_ci smp_mb(); 23462306a36Sopenharmony_ci tx_queue->empty_read_count = 23562306a36Sopenharmony_ci tx_queue->read_count | EFX_EMPTY_COUNT_VALID; 23662306a36Sopenharmony_ci } 23762306a36Sopenharmony_ci } 23862306a36Sopenharmony_ci} 23962306a36Sopenharmony_ci 24062306a36Sopenharmony_civoid efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) 24162306a36Sopenharmony_ci{ 24262306a36Sopenharmony_ci unsigned int fill_level, pkts_compl = 0, bytes_compl = 0; 24362306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); 24662306a36Sopenharmony_ci 24762306a36Sopenharmony_ci efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); 24862306a36Sopenharmony_ci tx_queue->pkts_compl += pkts_compl; 24962306a36Sopenharmony_ci tx_queue->bytes_compl += bytes_compl; 25062306a36Sopenharmony_ci 25162306a36Sopenharmony_ci if (pkts_compl > 1) 25262306a36Sopenharmony_ci ++tx_queue->merge_events; 25362306a36Sopenharmony_ci 25462306a36Sopenharmony_ci /* See if we need to restart the netif queue. This memory 25562306a36Sopenharmony_ci * barrier ensures that we write read_count (inside 25662306a36Sopenharmony_ci * efx_dequeue_buffers()) before reading the queue status. 25762306a36Sopenharmony_ci */ 25862306a36Sopenharmony_ci smp_mb(); 25962306a36Sopenharmony_ci if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && 26062306a36Sopenharmony_ci likely(efx->port_enabled) && 26162306a36Sopenharmony_ci likely(netif_device_present(efx->net_dev))) { 26262306a36Sopenharmony_ci fill_level = efx_channel_tx_fill_level(tx_queue->channel); 26362306a36Sopenharmony_ci if (fill_level <= efx->txq_wake_thresh) 26462306a36Sopenharmony_ci netif_tx_wake_queue(tx_queue->core_txq); 26562306a36Sopenharmony_ci } 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci efx_siena_xmit_done_check_empty(tx_queue); 26862306a36Sopenharmony_ci} 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci/* Remove buffers put into a tx_queue for the current packet. 27162306a36Sopenharmony_ci * None of the buffers must have an skb attached. 27262306a36Sopenharmony_ci */ 27362306a36Sopenharmony_civoid efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue, 27462306a36Sopenharmony_ci unsigned int insert_count) 27562306a36Sopenharmony_ci{ 27662306a36Sopenharmony_ci struct efx_tx_buffer *buffer; 27762306a36Sopenharmony_ci unsigned int bytes_compl = 0; 27862306a36Sopenharmony_ci unsigned int pkts_compl = 0; 27962306a36Sopenharmony_ci 28062306a36Sopenharmony_ci /* Work backwards until we hit the original insert pointer value */ 28162306a36Sopenharmony_ci while (tx_queue->insert_count != insert_count) { 28262306a36Sopenharmony_ci --tx_queue->insert_count; 28362306a36Sopenharmony_ci buffer = __efx_tx_queue_get_insert_buffer(tx_queue); 28462306a36Sopenharmony_ci efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); 28562306a36Sopenharmony_ci } 28662306a36Sopenharmony_ci} 28762306a36Sopenharmony_ci 28862306a36Sopenharmony_cistruct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue, 28962306a36Sopenharmony_ci dma_addr_t dma_addr, size_t len) 29062306a36Sopenharmony_ci{ 29162306a36Sopenharmony_ci const struct efx_nic_type *nic_type = tx_queue->efx->type; 29262306a36Sopenharmony_ci struct efx_tx_buffer *buffer; 29362306a36Sopenharmony_ci unsigned int dma_len; 29462306a36Sopenharmony_ci 29562306a36Sopenharmony_ci /* Map the fragment taking account of NIC-dependent DMA limits. */ 29662306a36Sopenharmony_ci do { 29762306a36Sopenharmony_ci buffer = efx_tx_queue_get_insert_buffer(tx_queue); 29862306a36Sopenharmony_ci 29962306a36Sopenharmony_ci if (nic_type->tx_limit_len) 30062306a36Sopenharmony_ci dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); 30162306a36Sopenharmony_ci else 30262306a36Sopenharmony_ci dma_len = len; 30362306a36Sopenharmony_ci 30462306a36Sopenharmony_ci buffer->len = dma_len; 30562306a36Sopenharmony_ci buffer->dma_addr = dma_addr; 30662306a36Sopenharmony_ci buffer->flags = EFX_TX_BUF_CONT; 30762306a36Sopenharmony_ci len -= dma_len; 30862306a36Sopenharmony_ci dma_addr += dma_len; 30962306a36Sopenharmony_ci ++tx_queue->insert_count; 31062306a36Sopenharmony_ci } while (len); 31162306a36Sopenharmony_ci 31262306a36Sopenharmony_ci return buffer; 31362306a36Sopenharmony_ci} 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_cistatic int efx_tx_tso_header_length(struct sk_buff *skb) 31662306a36Sopenharmony_ci{ 31762306a36Sopenharmony_ci size_t header_len; 31862306a36Sopenharmony_ci 31962306a36Sopenharmony_ci if (skb->encapsulation) 32062306a36Sopenharmony_ci header_len = skb_inner_transport_header(skb) - 32162306a36Sopenharmony_ci skb->data + 32262306a36Sopenharmony_ci (inner_tcp_hdr(skb)->doff << 2u); 32362306a36Sopenharmony_ci else 32462306a36Sopenharmony_ci header_len = skb_transport_header(skb) - skb->data + 32562306a36Sopenharmony_ci (tcp_hdr(skb)->doff << 2u); 32662306a36Sopenharmony_ci return header_len; 32762306a36Sopenharmony_ci} 32862306a36Sopenharmony_ci 32962306a36Sopenharmony_ci/* Map all data from an SKB for DMA and create descriptors on the queue. */ 33062306a36Sopenharmony_ciint efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, 33162306a36Sopenharmony_ci unsigned int segment_count) 33262306a36Sopenharmony_ci{ 33362306a36Sopenharmony_ci struct efx_nic *efx = tx_queue->efx; 33462306a36Sopenharmony_ci struct device *dma_dev = &efx->pci_dev->dev; 33562306a36Sopenharmony_ci unsigned int frag_index, nr_frags; 33662306a36Sopenharmony_ci dma_addr_t dma_addr, unmap_addr; 33762306a36Sopenharmony_ci unsigned short dma_flags; 33862306a36Sopenharmony_ci size_t len, unmap_len; 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ci nr_frags = skb_shinfo(skb)->nr_frags; 34162306a36Sopenharmony_ci frag_index = 0; 34262306a36Sopenharmony_ci 34362306a36Sopenharmony_ci /* Map header data. */ 34462306a36Sopenharmony_ci len = skb_headlen(skb); 34562306a36Sopenharmony_ci dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE); 34662306a36Sopenharmony_ci dma_flags = EFX_TX_BUF_MAP_SINGLE; 34762306a36Sopenharmony_ci unmap_len = len; 34862306a36Sopenharmony_ci unmap_addr = dma_addr; 34962306a36Sopenharmony_ci 35062306a36Sopenharmony_ci if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 35162306a36Sopenharmony_ci return -EIO; 35262306a36Sopenharmony_ci 35362306a36Sopenharmony_ci if (segment_count) { 35462306a36Sopenharmony_ci /* For TSO we need to put the header in to a separate 35562306a36Sopenharmony_ci * descriptor. Map this separately if necessary. 35662306a36Sopenharmony_ci */ 35762306a36Sopenharmony_ci size_t header_len = efx_tx_tso_header_length(skb); 35862306a36Sopenharmony_ci 35962306a36Sopenharmony_ci if (header_len != len) { 36062306a36Sopenharmony_ci tx_queue->tso_long_headers++; 36162306a36Sopenharmony_ci efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len); 36262306a36Sopenharmony_ci len -= header_len; 36362306a36Sopenharmony_ci dma_addr += header_len; 36462306a36Sopenharmony_ci } 36562306a36Sopenharmony_ci } 36662306a36Sopenharmony_ci 36762306a36Sopenharmony_ci /* Add descriptors for each fragment. */ 36862306a36Sopenharmony_ci do { 36962306a36Sopenharmony_ci struct efx_tx_buffer *buffer; 37062306a36Sopenharmony_ci skb_frag_t *fragment; 37162306a36Sopenharmony_ci 37262306a36Sopenharmony_ci buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len); 37362306a36Sopenharmony_ci 37462306a36Sopenharmony_ci /* The final descriptor for a fragment is responsible for 37562306a36Sopenharmony_ci * unmapping the whole fragment. 37662306a36Sopenharmony_ci */ 37762306a36Sopenharmony_ci buffer->flags = EFX_TX_BUF_CONT | dma_flags; 37862306a36Sopenharmony_ci buffer->unmap_len = unmap_len; 37962306a36Sopenharmony_ci buffer->dma_offset = buffer->dma_addr - unmap_addr; 38062306a36Sopenharmony_ci 38162306a36Sopenharmony_ci if (frag_index >= nr_frags) { 38262306a36Sopenharmony_ci /* Store SKB details with the final buffer for 38362306a36Sopenharmony_ci * the completion. 38462306a36Sopenharmony_ci */ 38562306a36Sopenharmony_ci buffer->skb = skb; 38662306a36Sopenharmony_ci buffer->flags = EFX_TX_BUF_SKB | dma_flags; 38762306a36Sopenharmony_ci return 0; 38862306a36Sopenharmony_ci } 38962306a36Sopenharmony_ci 39062306a36Sopenharmony_ci /* Move on to the next fragment. */ 39162306a36Sopenharmony_ci fragment = &skb_shinfo(skb)->frags[frag_index++]; 39262306a36Sopenharmony_ci len = skb_frag_size(fragment); 39362306a36Sopenharmony_ci dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len, 39462306a36Sopenharmony_ci DMA_TO_DEVICE); 39562306a36Sopenharmony_ci dma_flags = 0; 39662306a36Sopenharmony_ci unmap_len = len; 39762306a36Sopenharmony_ci unmap_addr = dma_addr; 39862306a36Sopenharmony_ci 39962306a36Sopenharmony_ci if (unlikely(dma_mapping_error(dma_dev, dma_addr))) 40062306a36Sopenharmony_ci return -EIO; 40162306a36Sopenharmony_ci } while (1); 40262306a36Sopenharmony_ci} 40362306a36Sopenharmony_ci 40462306a36Sopenharmony_ciunsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx) 40562306a36Sopenharmony_ci{ 40662306a36Sopenharmony_ci /* Header and payload descriptor for each output segment, plus 40762306a36Sopenharmony_ci * one for every input fragment boundary within a segment 40862306a36Sopenharmony_ci */ 40962306a36Sopenharmony_ci unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; 41062306a36Sopenharmony_ci 41162306a36Sopenharmony_ci /* Possibly one more per segment for option descriptors */ 41262306a36Sopenharmony_ci if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) 41362306a36Sopenharmony_ci max_descs += EFX_TSO_MAX_SEGS; 41462306a36Sopenharmony_ci 41562306a36Sopenharmony_ci /* Possibly more for PCIe page boundaries within input fragments */ 41662306a36Sopenharmony_ci if (PAGE_SIZE > EFX_PAGE_SIZE) 41762306a36Sopenharmony_ci max_descs += max_t(unsigned int, MAX_SKB_FRAGS, 41862306a36Sopenharmony_ci DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); 41962306a36Sopenharmony_ci 42062306a36Sopenharmony_ci return max_descs; 42162306a36Sopenharmony_ci} 42262306a36Sopenharmony_ci 42362306a36Sopenharmony_ci/* 42462306a36Sopenharmony_ci * Fallback to software TSO. 42562306a36Sopenharmony_ci * 42662306a36Sopenharmony_ci * This is used if we are unable to send a GSO packet through hardware TSO. 42762306a36Sopenharmony_ci * This should only ever happen due to per-queue restrictions - unsupported 42862306a36Sopenharmony_ci * packets should first be filtered by the feature flags. 42962306a36Sopenharmony_ci * 43062306a36Sopenharmony_ci * Returns 0 on success, error code otherwise. 43162306a36Sopenharmony_ci */ 43262306a36Sopenharmony_ciint efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, 43362306a36Sopenharmony_ci struct sk_buff *skb) 43462306a36Sopenharmony_ci{ 43562306a36Sopenharmony_ci struct sk_buff *segments, *next; 43662306a36Sopenharmony_ci 43762306a36Sopenharmony_ci segments = skb_gso_segment(skb, 0); 43862306a36Sopenharmony_ci if (IS_ERR(segments)) 43962306a36Sopenharmony_ci return PTR_ERR(segments); 44062306a36Sopenharmony_ci 44162306a36Sopenharmony_ci dev_consume_skb_any(skb); 44262306a36Sopenharmony_ci 44362306a36Sopenharmony_ci skb_list_walk_safe(segments, skb, next) { 44462306a36Sopenharmony_ci skb_mark_not_on_list(skb); 44562306a36Sopenharmony_ci efx_enqueue_skb(tx_queue, skb); 44662306a36Sopenharmony_ci } 44762306a36Sopenharmony_ci 44862306a36Sopenharmony_ci return 0; 44962306a36Sopenharmony_ci} 450