18c2ecf20Sopenharmony_ci/*
28c2ecf20Sopenharmony_ci * This file is part of the Chelsio T4 Ethernet driver for Linux.
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
58c2ecf20Sopenharmony_ci *
68c2ecf20Sopenharmony_ci * This software is available to you under a choice of one of two
78c2ecf20Sopenharmony_ci * licenses.  You may choose to be licensed under the terms of the GNU
88c2ecf20Sopenharmony_ci * General Public License (GPL) Version 2, available from the file
98c2ecf20Sopenharmony_ci * COPYING in the main directory of this source tree, or the
108c2ecf20Sopenharmony_ci * OpenIB.org BSD license below:
118c2ecf20Sopenharmony_ci *
128c2ecf20Sopenharmony_ci *     Redistribution and use in source and binary forms, with or
138c2ecf20Sopenharmony_ci *     without modification, are permitted provided that the following
148c2ecf20Sopenharmony_ci *     conditions are met:
158c2ecf20Sopenharmony_ci *
168c2ecf20Sopenharmony_ci *      - Redistributions of source code must retain the above
178c2ecf20Sopenharmony_ci *        copyright notice, this list of conditions and the following
188c2ecf20Sopenharmony_ci *        disclaimer.
198c2ecf20Sopenharmony_ci *
208c2ecf20Sopenharmony_ci *      - Redistributions in binary form must reproduce the above
218c2ecf20Sopenharmony_ci *        copyright notice, this list of conditions and the following
228c2ecf20Sopenharmony_ci *        disclaimer in the documentation and/or other materials
238c2ecf20Sopenharmony_ci *        provided with the distribution.
248c2ecf20Sopenharmony_ci *
258c2ecf20Sopenharmony_ci * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
268c2ecf20Sopenharmony_ci * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
278c2ecf20Sopenharmony_ci * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
288c2ecf20Sopenharmony_ci * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
298c2ecf20Sopenharmony_ci * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
308c2ecf20Sopenharmony_ci * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
318c2ecf20Sopenharmony_ci * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
328c2ecf20Sopenharmony_ci * SOFTWARE.
338c2ecf20Sopenharmony_ci */
348c2ecf20Sopenharmony_ci
358c2ecf20Sopenharmony_ci#include <linux/skbuff.h>
368c2ecf20Sopenharmony_ci#include <linux/netdevice.h>
378c2ecf20Sopenharmony_ci#include <linux/etherdevice.h>
388c2ecf20Sopenharmony_ci#include <linux/if_vlan.h>
398c2ecf20Sopenharmony_ci#include <linux/ip.h>
408c2ecf20Sopenharmony_ci#include <linux/dma-mapping.h>
418c2ecf20Sopenharmony_ci#include <linux/jiffies.h>
428c2ecf20Sopenharmony_ci#include <linux/prefetch.h>
438c2ecf20Sopenharmony_ci#include <linux/export.h>
448c2ecf20Sopenharmony_ci#include <net/xfrm.h>
458c2ecf20Sopenharmony_ci#include <net/ipv6.h>
468c2ecf20Sopenharmony_ci#include <net/tcp.h>
478c2ecf20Sopenharmony_ci#include <net/busy_poll.h>
488c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_FCOE
498c2ecf20Sopenharmony_ci#include <scsi/fc/fc_fcoe.h>
508c2ecf20Sopenharmony_ci#endif /* CONFIG_CHELSIO_T4_FCOE */
518c2ecf20Sopenharmony_ci#include "cxgb4.h"
528c2ecf20Sopenharmony_ci#include "t4_regs.h"
538c2ecf20Sopenharmony_ci#include "t4_values.h"
548c2ecf20Sopenharmony_ci#include "t4_msg.h"
558c2ecf20Sopenharmony_ci#include "t4fw_api.h"
568c2ecf20Sopenharmony_ci#include "cxgb4_ptp.h"
578c2ecf20Sopenharmony_ci#include "cxgb4_uld.h"
588c2ecf20Sopenharmony_ci#include "cxgb4_tc_mqprio.h"
598c2ecf20Sopenharmony_ci#include "sched.h"
608c2ecf20Sopenharmony_ci
618c2ecf20Sopenharmony_ci/*
628c2ecf20Sopenharmony_ci * Rx buffer size.  We use largish buffers if possible but settle for single
638c2ecf20Sopenharmony_ci * pages under memory shortage.
648c2ecf20Sopenharmony_ci */
658c2ecf20Sopenharmony_ci#if PAGE_SHIFT >= 16
668c2ecf20Sopenharmony_ci# define FL_PG_ORDER 0
678c2ecf20Sopenharmony_ci#else
688c2ecf20Sopenharmony_ci# define FL_PG_ORDER (16 - PAGE_SHIFT)
698c2ecf20Sopenharmony_ci#endif
708c2ecf20Sopenharmony_ci
718c2ecf20Sopenharmony_ci/* RX_PULL_LEN should be <= RX_COPY_THRES */
728c2ecf20Sopenharmony_ci#define RX_COPY_THRES    256
738c2ecf20Sopenharmony_ci#define RX_PULL_LEN      128
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_ci/*
768c2ecf20Sopenharmony_ci * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
778c2ecf20Sopenharmony_ci * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
788c2ecf20Sopenharmony_ci */
798c2ecf20Sopenharmony_ci#define RX_PKT_SKB_LEN   512
808c2ecf20Sopenharmony_ci
818c2ecf20Sopenharmony_ci/*
828c2ecf20Sopenharmony_ci * Max number of Tx descriptors we clean up at a time.  Should be modest as
838c2ecf20Sopenharmony_ci * freeing skbs isn't cheap and it happens while holding locks.  We just need
848c2ecf20Sopenharmony_ci * to free packets faster than they arrive, we eventually catch up and keep
858c2ecf20Sopenharmony_ci * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.  It should
868c2ecf20Sopenharmony_ci * also match the CIDX Flush Threshold.
878c2ecf20Sopenharmony_ci */
888c2ecf20Sopenharmony_ci#define MAX_TX_RECLAIM 32
898c2ecf20Sopenharmony_ci
908c2ecf20Sopenharmony_ci/*
918c2ecf20Sopenharmony_ci * Max number of Rx buffers we replenish at a time.  Again keep this modest,
928c2ecf20Sopenharmony_ci * allocating buffers isn't cheap either.
938c2ecf20Sopenharmony_ci */
948c2ecf20Sopenharmony_ci#define MAX_RX_REFILL 16U
958c2ecf20Sopenharmony_ci
968c2ecf20Sopenharmony_ci/*
978c2ecf20Sopenharmony_ci * Period of the Rx queue check timer.  This timer is infrequent as it has
988c2ecf20Sopenharmony_ci * something to do only when the system experiences severe memory shortage.
998c2ecf20Sopenharmony_ci */
1008c2ecf20Sopenharmony_ci#define RX_QCHECK_PERIOD (HZ / 2)
1018c2ecf20Sopenharmony_ci
1028c2ecf20Sopenharmony_ci/*
1038c2ecf20Sopenharmony_ci * Period of the Tx queue check timer.
1048c2ecf20Sopenharmony_ci */
1058c2ecf20Sopenharmony_ci#define TX_QCHECK_PERIOD (HZ / 2)
1068c2ecf20Sopenharmony_ci
1078c2ecf20Sopenharmony_ci/*
1088c2ecf20Sopenharmony_ci * Max number of Tx descriptors to be reclaimed by the Tx timer.
1098c2ecf20Sopenharmony_ci */
1108c2ecf20Sopenharmony_ci#define MAX_TIMER_TX_RECLAIM 100
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci/*
1138c2ecf20Sopenharmony_ci * Timer index used when backing off due to memory shortage.
1148c2ecf20Sopenharmony_ci */
1158c2ecf20Sopenharmony_ci#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
1168c2ecf20Sopenharmony_ci
1178c2ecf20Sopenharmony_ci/*
1188c2ecf20Sopenharmony_ci * Suspension threshold for non-Ethernet Tx queues.  We require enough room
1198c2ecf20Sopenharmony_ci * for a full sized WR.
1208c2ecf20Sopenharmony_ci */
1218c2ecf20Sopenharmony_ci#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
1228c2ecf20Sopenharmony_ci
1238c2ecf20Sopenharmony_ci/*
1248c2ecf20Sopenharmony_ci * Max Tx descriptor space we allow for an Ethernet packet to be inlined
1258c2ecf20Sopenharmony_ci * into a WR.
1268c2ecf20Sopenharmony_ci */
1278c2ecf20Sopenharmony_ci#define MAX_IMM_TX_PKT_LEN 256
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_ci/*
1308c2ecf20Sopenharmony_ci * Max size of a WR sent through a control Tx queue.
1318c2ecf20Sopenharmony_ci */
1328c2ecf20Sopenharmony_ci#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
1338c2ecf20Sopenharmony_ci
1348c2ecf20Sopenharmony_cistruct rx_sw_desc {                /* SW state per Rx descriptor */
1358c2ecf20Sopenharmony_ci	struct page *page;
1368c2ecf20Sopenharmony_ci	dma_addr_t dma_addr;
1378c2ecf20Sopenharmony_ci};
1388c2ecf20Sopenharmony_ci
1398c2ecf20Sopenharmony_ci/*
1408c2ecf20Sopenharmony_ci * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
1418c2ecf20Sopenharmony_ci * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
1428c2ecf20Sopenharmony_ci * We could easily support more but there doesn't seem to be much need for
1438c2ecf20Sopenharmony_ci * that ...
1448c2ecf20Sopenharmony_ci */
1458c2ecf20Sopenharmony_ci#define FL_MTU_SMALL 1500
1468c2ecf20Sopenharmony_ci#define FL_MTU_LARGE 9000
1478c2ecf20Sopenharmony_ci
1488c2ecf20Sopenharmony_cistatic inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
1498c2ecf20Sopenharmony_ci					  unsigned int mtu)
1508c2ecf20Sopenharmony_ci{
1518c2ecf20Sopenharmony_ci	struct sge *s = &adapter->sge;
1528c2ecf20Sopenharmony_ci
1538c2ecf20Sopenharmony_ci	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
1548c2ecf20Sopenharmony_ci}
1558c2ecf20Sopenharmony_ci
1568c2ecf20Sopenharmony_ci#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
1578c2ecf20Sopenharmony_ci#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
1588c2ecf20Sopenharmony_ci
1598c2ecf20Sopenharmony_ci/*
1608c2ecf20Sopenharmony_ci * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
1618c2ecf20Sopenharmony_ci * these to specify the buffer size as an index into the SGE Free List Buffer
1628c2ecf20Sopenharmony_ci * Size register array.  We also use bit 4, when the buffer has been unmapped
1638c2ecf20Sopenharmony_ci * for DMA, but this is of course never sent to the hardware and is only used
1648c2ecf20Sopenharmony_ci * to prevent double unmappings.  All of the above requires that the Free List
1658c2ecf20Sopenharmony_ci * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
1668c2ecf20Sopenharmony_ci * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
1678c2ecf20Sopenharmony_ci * Free List Buffer alignment is 32 bytes, this works out for us ...
1688c2ecf20Sopenharmony_ci */
1698c2ecf20Sopenharmony_cienum {
1708c2ecf20Sopenharmony_ci	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
1718c2ecf20Sopenharmony_ci	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
1728c2ecf20Sopenharmony_ci	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
1738c2ecf20Sopenharmony_ci
1748c2ecf20Sopenharmony_ci	/*
1758c2ecf20Sopenharmony_ci	 * XXX We shouldn't depend on being able to use these indices.
1768c2ecf20Sopenharmony_ci	 * XXX Especially when some other Master PF has initialized the
1778c2ecf20Sopenharmony_ci	 * XXX adapter or we use the Firmware Configuration File.  We
1788c2ecf20Sopenharmony_ci	 * XXX should really search through the Host Buffer Size register
1798c2ecf20Sopenharmony_ci	 * XXX array for the appropriately sized buffer indices.
1808c2ecf20Sopenharmony_ci	 */
1818c2ecf20Sopenharmony_ci	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
1828c2ecf20Sopenharmony_ci	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
1838c2ecf20Sopenharmony_ci
1848c2ecf20Sopenharmony_ci	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
1858c2ecf20Sopenharmony_ci	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
1868c2ecf20Sopenharmony_ci};
1878c2ecf20Sopenharmony_ci
1888c2ecf20Sopenharmony_cistatic int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
1898c2ecf20Sopenharmony_ci#define MIN_NAPI_WORK  1
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_cistatic inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
1928c2ecf20Sopenharmony_ci{
1938c2ecf20Sopenharmony_ci	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
1948c2ecf20Sopenharmony_ci}
1958c2ecf20Sopenharmony_ci
1968c2ecf20Sopenharmony_cistatic inline bool is_buf_mapped(const struct rx_sw_desc *d)
1978c2ecf20Sopenharmony_ci{
1988c2ecf20Sopenharmony_ci	return !(d->dma_addr & RX_UNMAPPED_BUF);
1998c2ecf20Sopenharmony_ci}
2008c2ecf20Sopenharmony_ci
2018c2ecf20Sopenharmony_ci/**
2028c2ecf20Sopenharmony_ci *	txq_avail - return the number of available slots in a Tx queue
2038c2ecf20Sopenharmony_ci *	@q: the Tx queue
2048c2ecf20Sopenharmony_ci *
2058c2ecf20Sopenharmony_ci *	Returns the number of descriptors in a Tx queue available to write new
2068c2ecf20Sopenharmony_ci *	packets.
2078c2ecf20Sopenharmony_ci */
2088c2ecf20Sopenharmony_cistatic inline unsigned int txq_avail(const struct sge_txq *q)
2098c2ecf20Sopenharmony_ci{
2108c2ecf20Sopenharmony_ci	return q->size - 1 - q->in_use;
2118c2ecf20Sopenharmony_ci}
2128c2ecf20Sopenharmony_ci
2138c2ecf20Sopenharmony_ci/**
2148c2ecf20Sopenharmony_ci *	fl_cap - return the capacity of a free-buffer list
2158c2ecf20Sopenharmony_ci *	@fl: the FL
2168c2ecf20Sopenharmony_ci *
2178c2ecf20Sopenharmony_ci *	Returns the capacity of a free-buffer list.  The capacity is less than
2188c2ecf20Sopenharmony_ci *	the size because one descriptor needs to be left unpopulated, otherwise
2198c2ecf20Sopenharmony_ci *	HW will think the FL is empty.
2208c2ecf20Sopenharmony_ci */
2218c2ecf20Sopenharmony_cistatic inline unsigned int fl_cap(const struct sge_fl *fl)
2228c2ecf20Sopenharmony_ci{
2238c2ecf20Sopenharmony_ci	return fl->size - 8;   /* 1 descriptor = 8 buffers */
2248c2ecf20Sopenharmony_ci}
2258c2ecf20Sopenharmony_ci
2268c2ecf20Sopenharmony_ci/**
2278c2ecf20Sopenharmony_ci *	fl_starving - return whether a Free List is starving.
2288c2ecf20Sopenharmony_ci *	@adapter: pointer to the adapter
2298c2ecf20Sopenharmony_ci *	@fl: the Free List
2308c2ecf20Sopenharmony_ci *
2318c2ecf20Sopenharmony_ci *	Tests specified Free List to see whether the number of buffers
2328c2ecf20Sopenharmony_ci *	available to the hardware has falled below our "starvation"
2338c2ecf20Sopenharmony_ci *	threshold.
2348c2ecf20Sopenharmony_ci */
2358c2ecf20Sopenharmony_cistatic inline bool fl_starving(const struct adapter *adapter,
2368c2ecf20Sopenharmony_ci			       const struct sge_fl *fl)
2378c2ecf20Sopenharmony_ci{
2388c2ecf20Sopenharmony_ci	const struct sge *s = &adapter->sge;
2398c2ecf20Sopenharmony_ci
2408c2ecf20Sopenharmony_ci	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
2418c2ecf20Sopenharmony_ci}
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ciint cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
2448c2ecf20Sopenharmony_ci		  dma_addr_t *addr)
2458c2ecf20Sopenharmony_ci{
2468c2ecf20Sopenharmony_ci	const skb_frag_t *fp, *end;
2478c2ecf20Sopenharmony_ci	const struct skb_shared_info *si;
2488c2ecf20Sopenharmony_ci
2498c2ecf20Sopenharmony_ci	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
2508c2ecf20Sopenharmony_ci	if (dma_mapping_error(dev, *addr))
2518c2ecf20Sopenharmony_ci		goto out_err;
2528c2ecf20Sopenharmony_ci
2538c2ecf20Sopenharmony_ci	si = skb_shinfo(skb);
2548c2ecf20Sopenharmony_ci	end = &si->frags[si->nr_frags];
2558c2ecf20Sopenharmony_ci
2568c2ecf20Sopenharmony_ci	for (fp = si->frags; fp < end; fp++) {
2578c2ecf20Sopenharmony_ci		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
2588c2ecf20Sopenharmony_ci					   DMA_TO_DEVICE);
2598c2ecf20Sopenharmony_ci		if (dma_mapping_error(dev, *addr))
2608c2ecf20Sopenharmony_ci			goto unwind;
2618c2ecf20Sopenharmony_ci	}
2628c2ecf20Sopenharmony_ci	return 0;
2638c2ecf20Sopenharmony_ci
2648c2ecf20Sopenharmony_ciunwind:
2658c2ecf20Sopenharmony_ci	while (fp-- > si->frags)
2668c2ecf20Sopenharmony_ci		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
2678c2ecf20Sopenharmony_ci
2688c2ecf20Sopenharmony_ci	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
2698c2ecf20Sopenharmony_ciout_err:
2708c2ecf20Sopenharmony_ci	return -ENOMEM;
2718c2ecf20Sopenharmony_ci}
2728c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_map_skb);
2738c2ecf20Sopenharmony_ci
2748c2ecf20Sopenharmony_cistatic void unmap_skb(struct device *dev, const struct sk_buff *skb,
2758c2ecf20Sopenharmony_ci		      const dma_addr_t *addr)
2768c2ecf20Sopenharmony_ci{
2778c2ecf20Sopenharmony_ci	const skb_frag_t *fp, *end;
2788c2ecf20Sopenharmony_ci	const struct skb_shared_info *si;
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
2818c2ecf20Sopenharmony_ci
2828c2ecf20Sopenharmony_ci	si = skb_shinfo(skb);
2838c2ecf20Sopenharmony_ci	end = &si->frags[si->nr_frags];
2848c2ecf20Sopenharmony_ci	for (fp = si->frags; fp < end; fp++)
2858c2ecf20Sopenharmony_ci		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
2868c2ecf20Sopenharmony_ci}
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci#ifdef CONFIG_NEED_DMA_MAP_STATE
2898c2ecf20Sopenharmony_ci/**
2908c2ecf20Sopenharmony_ci *	deferred_unmap_destructor - unmap a packet when it is freed
2918c2ecf20Sopenharmony_ci *	@skb: the packet
2928c2ecf20Sopenharmony_ci *
2938c2ecf20Sopenharmony_ci *	This is the packet destructor used for Tx packets that need to remain
2948c2ecf20Sopenharmony_ci *	mapped until they are freed rather than until their Tx descriptors are
2958c2ecf20Sopenharmony_ci *	freed.
2968c2ecf20Sopenharmony_ci */
2978c2ecf20Sopenharmony_cistatic void deferred_unmap_destructor(struct sk_buff *skb)
2988c2ecf20Sopenharmony_ci{
2998c2ecf20Sopenharmony_ci	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
3008c2ecf20Sopenharmony_ci}
3018c2ecf20Sopenharmony_ci#endif
3028c2ecf20Sopenharmony_ci
3038c2ecf20Sopenharmony_ci/**
3048c2ecf20Sopenharmony_ci *	free_tx_desc - reclaims Tx descriptors and their buffers
3058c2ecf20Sopenharmony_ci *	@adap: the adapter
3068c2ecf20Sopenharmony_ci *	@q: the Tx queue to reclaim descriptors from
3078c2ecf20Sopenharmony_ci *	@n: the number of descriptors to reclaim
3088c2ecf20Sopenharmony_ci *	@unmap: whether the buffers should be unmapped for DMA
3098c2ecf20Sopenharmony_ci *
3108c2ecf20Sopenharmony_ci *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
3118c2ecf20Sopenharmony_ci *	Tx buffers.  Called with the Tx queue lock held.
3128c2ecf20Sopenharmony_ci */
3138c2ecf20Sopenharmony_civoid free_tx_desc(struct adapter *adap, struct sge_txq *q,
3148c2ecf20Sopenharmony_ci		  unsigned int n, bool unmap)
3158c2ecf20Sopenharmony_ci{
3168c2ecf20Sopenharmony_ci	unsigned int cidx = q->cidx;
3178c2ecf20Sopenharmony_ci	struct tx_sw_desc *d;
3188c2ecf20Sopenharmony_ci
3198c2ecf20Sopenharmony_ci	d = &q->sdesc[cidx];
3208c2ecf20Sopenharmony_ci	while (n--) {
3218c2ecf20Sopenharmony_ci		if (d->skb) {                       /* an SGL is present */
3228c2ecf20Sopenharmony_ci			if (unmap && d->addr[0]) {
3238c2ecf20Sopenharmony_ci				unmap_skb(adap->pdev_dev, d->skb, d->addr);
3248c2ecf20Sopenharmony_ci				memset(d->addr, 0, sizeof(d->addr));
3258c2ecf20Sopenharmony_ci			}
3268c2ecf20Sopenharmony_ci			dev_consume_skb_any(d->skb);
3278c2ecf20Sopenharmony_ci			d->skb = NULL;
3288c2ecf20Sopenharmony_ci		}
3298c2ecf20Sopenharmony_ci		++d;
3308c2ecf20Sopenharmony_ci		if (++cidx == q->size) {
3318c2ecf20Sopenharmony_ci			cidx = 0;
3328c2ecf20Sopenharmony_ci			d = q->sdesc;
3338c2ecf20Sopenharmony_ci		}
3348c2ecf20Sopenharmony_ci	}
3358c2ecf20Sopenharmony_ci	q->cidx = cidx;
3368c2ecf20Sopenharmony_ci}
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci/*
3398c2ecf20Sopenharmony_ci * Return the number of reclaimable descriptors in a Tx queue.
3408c2ecf20Sopenharmony_ci */
3418c2ecf20Sopenharmony_cistatic inline int reclaimable(const struct sge_txq *q)
3428c2ecf20Sopenharmony_ci{
3438c2ecf20Sopenharmony_ci	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
3448c2ecf20Sopenharmony_ci	hw_cidx -= q->cidx;
3458c2ecf20Sopenharmony_ci	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
3468c2ecf20Sopenharmony_ci}
3478c2ecf20Sopenharmony_ci
3488c2ecf20Sopenharmony_ci/**
3498c2ecf20Sopenharmony_ci *	reclaim_completed_tx - reclaims completed TX Descriptors
3508c2ecf20Sopenharmony_ci *	@adap: the adapter
3518c2ecf20Sopenharmony_ci *	@q: the Tx queue to reclaim completed descriptors from
3528c2ecf20Sopenharmony_ci *	@maxreclaim: the maximum number of TX Descriptors to reclaim or -1
3538c2ecf20Sopenharmony_ci *	@unmap: whether the buffers should be unmapped for DMA
3548c2ecf20Sopenharmony_ci *
3558c2ecf20Sopenharmony_ci *	Reclaims Tx Descriptors that the SGE has indicated it has processed,
3568c2ecf20Sopenharmony_ci *	and frees the associated buffers if possible.  If @max == -1, then
3578c2ecf20Sopenharmony_ci *	we'll use a defaiult maximum.  Called with the TX Queue locked.
3588c2ecf20Sopenharmony_ci */
3598c2ecf20Sopenharmony_cistatic inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
3608c2ecf20Sopenharmony_ci				       int maxreclaim, bool unmap)
3618c2ecf20Sopenharmony_ci{
3628c2ecf20Sopenharmony_ci	int reclaim = reclaimable(q);
3638c2ecf20Sopenharmony_ci
3648c2ecf20Sopenharmony_ci	if (reclaim) {
3658c2ecf20Sopenharmony_ci		/*
3668c2ecf20Sopenharmony_ci		 * Limit the amount of clean up work we do at a time to keep
3678c2ecf20Sopenharmony_ci		 * the Tx lock hold time O(1).
3688c2ecf20Sopenharmony_ci		 */
3698c2ecf20Sopenharmony_ci		if (maxreclaim < 0)
3708c2ecf20Sopenharmony_ci			maxreclaim = MAX_TX_RECLAIM;
3718c2ecf20Sopenharmony_ci		if (reclaim > maxreclaim)
3728c2ecf20Sopenharmony_ci			reclaim = maxreclaim;
3738c2ecf20Sopenharmony_ci
3748c2ecf20Sopenharmony_ci		free_tx_desc(adap, q, reclaim, unmap);
3758c2ecf20Sopenharmony_ci		q->in_use -= reclaim;
3768c2ecf20Sopenharmony_ci	}
3778c2ecf20Sopenharmony_ci
3788c2ecf20Sopenharmony_ci	return reclaim;
3798c2ecf20Sopenharmony_ci}
3808c2ecf20Sopenharmony_ci
3818c2ecf20Sopenharmony_ci/**
3828c2ecf20Sopenharmony_ci *	cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
3838c2ecf20Sopenharmony_ci *	@adap: the adapter
3848c2ecf20Sopenharmony_ci *	@q: the Tx queue to reclaim completed descriptors from
3858c2ecf20Sopenharmony_ci *	@unmap: whether the buffers should be unmapped for DMA
3868c2ecf20Sopenharmony_ci *
3878c2ecf20Sopenharmony_ci *	Reclaims Tx descriptors that the SGE has indicated it has processed,
3888c2ecf20Sopenharmony_ci *	and frees the associated buffers if possible.  Called with the Tx
3898c2ecf20Sopenharmony_ci *	queue locked.
3908c2ecf20Sopenharmony_ci */
3918c2ecf20Sopenharmony_civoid cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
3928c2ecf20Sopenharmony_ci				bool unmap)
3938c2ecf20Sopenharmony_ci{
3948c2ecf20Sopenharmony_ci	(void)reclaim_completed_tx(adap, q, -1, unmap);
3958c2ecf20Sopenharmony_ci}
3968c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_reclaim_completed_tx);
3978c2ecf20Sopenharmony_ci
3988c2ecf20Sopenharmony_cistatic inline int get_buf_size(struct adapter *adapter,
3998c2ecf20Sopenharmony_ci			       const struct rx_sw_desc *d)
4008c2ecf20Sopenharmony_ci{
4018c2ecf20Sopenharmony_ci	struct sge *s = &adapter->sge;
4028c2ecf20Sopenharmony_ci	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
4038c2ecf20Sopenharmony_ci	int buf_size;
4048c2ecf20Sopenharmony_ci
4058c2ecf20Sopenharmony_ci	switch (rx_buf_size_idx) {
4068c2ecf20Sopenharmony_ci	case RX_SMALL_PG_BUF:
4078c2ecf20Sopenharmony_ci		buf_size = PAGE_SIZE;
4088c2ecf20Sopenharmony_ci		break;
4098c2ecf20Sopenharmony_ci
4108c2ecf20Sopenharmony_ci	case RX_LARGE_PG_BUF:
4118c2ecf20Sopenharmony_ci		buf_size = PAGE_SIZE << s->fl_pg_order;
4128c2ecf20Sopenharmony_ci		break;
4138c2ecf20Sopenharmony_ci
4148c2ecf20Sopenharmony_ci	case RX_SMALL_MTU_BUF:
4158c2ecf20Sopenharmony_ci		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
4168c2ecf20Sopenharmony_ci		break;
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_ci	case RX_LARGE_MTU_BUF:
4198c2ecf20Sopenharmony_ci		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
4208c2ecf20Sopenharmony_ci		break;
4218c2ecf20Sopenharmony_ci
4228c2ecf20Sopenharmony_ci	default:
4238c2ecf20Sopenharmony_ci		BUG();
4248c2ecf20Sopenharmony_ci	}
4258c2ecf20Sopenharmony_ci
4268c2ecf20Sopenharmony_ci	return buf_size;
4278c2ecf20Sopenharmony_ci}
4288c2ecf20Sopenharmony_ci
4298c2ecf20Sopenharmony_ci/**
4308c2ecf20Sopenharmony_ci *	free_rx_bufs - free the Rx buffers on an SGE free list
4318c2ecf20Sopenharmony_ci *	@adap: the adapter
4328c2ecf20Sopenharmony_ci *	@q: the SGE free list to free buffers from
4338c2ecf20Sopenharmony_ci *	@n: how many buffers to free
4348c2ecf20Sopenharmony_ci *
4358c2ecf20Sopenharmony_ci *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
4368c2ecf20Sopenharmony_ci *	buffers must be made inaccessible to HW before calling this function.
4378c2ecf20Sopenharmony_ci */
4388c2ecf20Sopenharmony_cistatic void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
4398c2ecf20Sopenharmony_ci{
4408c2ecf20Sopenharmony_ci	while (n--) {
4418c2ecf20Sopenharmony_ci		struct rx_sw_desc *d = &q->sdesc[q->cidx];
4428c2ecf20Sopenharmony_ci
4438c2ecf20Sopenharmony_ci		if (is_buf_mapped(d))
4448c2ecf20Sopenharmony_ci			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
4458c2ecf20Sopenharmony_ci				       get_buf_size(adap, d),
4468c2ecf20Sopenharmony_ci				       PCI_DMA_FROMDEVICE);
4478c2ecf20Sopenharmony_ci		put_page(d->page);
4488c2ecf20Sopenharmony_ci		d->page = NULL;
4498c2ecf20Sopenharmony_ci		if (++q->cidx == q->size)
4508c2ecf20Sopenharmony_ci			q->cidx = 0;
4518c2ecf20Sopenharmony_ci		q->avail--;
4528c2ecf20Sopenharmony_ci	}
4538c2ecf20Sopenharmony_ci}
4548c2ecf20Sopenharmony_ci
4558c2ecf20Sopenharmony_ci/**
4568c2ecf20Sopenharmony_ci *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
4578c2ecf20Sopenharmony_ci *	@adap: the adapter
4588c2ecf20Sopenharmony_ci *	@q: the SGE free list
4598c2ecf20Sopenharmony_ci *
4608c2ecf20Sopenharmony_ci *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
4618c2ecf20Sopenharmony_ci *	buffer must be made inaccessible to HW before calling this function.
4628c2ecf20Sopenharmony_ci *
4638c2ecf20Sopenharmony_ci *	This is similar to @free_rx_bufs above but does not free the buffer.
4648c2ecf20Sopenharmony_ci *	Do note that the FL still loses any further access to the buffer.
4658c2ecf20Sopenharmony_ci */
4668c2ecf20Sopenharmony_cistatic void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
4678c2ecf20Sopenharmony_ci{
4688c2ecf20Sopenharmony_ci	struct rx_sw_desc *d = &q->sdesc[q->cidx];
4698c2ecf20Sopenharmony_ci
4708c2ecf20Sopenharmony_ci	if (is_buf_mapped(d))
4718c2ecf20Sopenharmony_ci		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
4728c2ecf20Sopenharmony_ci			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
4738c2ecf20Sopenharmony_ci	d->page = NULL;
4748c2ecf20Sopenharmony_ci	if (++q->cidx == q->size)
4758c2ecf20Sopenharmony_ci		q->cidx = 0;
4768c2ecf20Sopenharmony_ci	q->avail--;
4778c2ecf20Sopenharmony_ci}
4788c2ecf20Sopenharmony_ci
4798c2ecf20Sopenharmony_cistatic inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
4808c2ecf20Sopenharmony_ci{
4818c2ecf20Sopenharmony_ci	if (q->pend_cred >= 8) {
4828c2ecf20Sopenharmony_ci		u32 val = adap->params.arch.sge_fl_db;
4838c2ecf20Sopenharmony_ci
4848c2ecf20Sopenharmony_ci		if (is_t4(adap->params.chip))
4858c2ecf20Sopenharmony_ci			val |= PIDX_V(q->pend_cred / 8);
4868c2ecf20Sopenharmony_ci		else
4878c2ecf20Sopenharmony_ci			val |= PIDX_T5_V(q->pend_cred / 8);
4888c2ecf20Sopenharmony_ci
4898c2ecf20Sopenharmony_ci		/* Make sure all memory writes to the Free List queue are
4908c2ecf20Sopenharmony_ci		 * committed before we tell the hardware about them.
4918c2ecf20Sopenharmony_ci		 */
4928c2ecf20Sopenharmony_ci		wmb();
4938c2ecf20Sopenharmony_ci
4948c2ecf20Sopenharmony_ci		/* If we don't have access to the new User Doorbell (T5+), use
4958c2ecf20Sopenharmony_ci		 * the old doorbell mechanism; otherwise use the new BAR2
4968c2ecf20Sopenharmony_ci		 * mechanism.
4978c2ecf20Sopenharmony_ci		 */
4988c2ecf20Sopenharmony_ci		if (unlikely(q->bar2_addr == NULL)) {
4998c2ecf20Sopenharmony_ci			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
5008c2ecf20Sopenharmony_ci				     val | QID_V(q->cntxt_id));
5018c2ecf20Sopenharmony_ci		} else {
5028c2ecf20Sopenharmony_ci			writel(val | QID_V(q->bar2_qid),
5038c2ecf20Sopenharmony_ci			       q->bar2_addr + SGE_UDB_KDOORBELL);
5048c2ecf20Sopenharmony_ci
5058c2ecf20Sopenharmony_ci			/* This Write memory Barrier will force the write to
5068c2ecf20Sopenharmony_ci			 * the User Doorbell area to be flushed.
5078c2ecf20Sopenharmony_ci			 */
5088c2ecf20Sopenharmony_ci			wmb();
5098c2ecf20Sopenharmony_ci		}
5108c2ecf20Sopenharmony_ci		q->pend_cred &= 7;
5118c2ecf20Sopenharmony_ci	}
5128c2ecf20Sopenharmony_ci}
5138c2ecf20Sopenharmony_ci
5148c2ecf20Sopenharmony_cistatic inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
5158c2ecf20Sopenharmony_ci				  dma_addr_t mapping)
5168c2ecf20Sopenharmony_ci{
5178c2ecf20Sopenharmony_ci	sd->page = pg;
5188c2ecf20Sopenharmony_ci	sd->dma_addr = mapping;      /* includes size low bits */
5198c2ecf20Sopenharmony_ci}
5208c2ecf20Sopenharmony_ci
5218c2ecf20Sopenharmony_ci/**
5228c2ecf20Sopenharmony_ci *	refill_fl - refill an SGE Rx buffer ring
5238c2ecf20Sopenharmony_ci *	@adap: the adapter
5248c2ecf20Sopenharmony_ci *	@q: the ring to refill
5258c2ecf20Sopenharmony_ci *	@n: the number of new buffers to allocate
5268c2ecf20Sopenharmony_ci *	@gfp: the gfp flags for the allocations
5278c2ecf20Sopenharmony_ci *
5288c2ecf20Sopenharmony_ci *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
5298c2ecf20Sopenharmony_ci *	allocated with the supplied gfp flags.  The caller must assure that
5308c2ecf20Sopenharmony_ci *	@n does not exceed the queue's capacity.  If afterwards the queue is
5318c2ecf20Sopenharmony_ci *	found critically low mark it as starving in the bitmap of starving FLs.
5328c2ecf20Sopenharmony_ci *
5338c2ecf20Sopenharmony_ci *	Returns the number of buffers allocated.
5348c2ecf20Sopenharmony_ci */
5358c2ecf20Sopenharmony_cistatic unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
5368c2ecf20Sopenharmony_ci			      gfp_t gfp)
5378c2ecf20Sopenharmony_ci{
5388c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
5398c2ecf20Sopenharmony_ci	struct page *pg;
5408c2ecf20Sopenharmony_ci	dma_addr_t mapping;
5418c2ecf20Sopenharmony_ci	unsigned int cred = q->avail;
5428c2ecf20Sopenharmony_ci	__be64 *d = &q->desc[q->pidx];
5438c2ecf20Sopenharmony_ci	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
5448c2ecf20Sopenharmony_ci	int node;
5458c2ecf20Sopenharmony_ci
5468c2ecf20Sopenharmony_ci#ifdef CONFIG_DEBUG_FS
5478c2ecf20Sopenharmony_ci	if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
5488c2ecf20Sopenharmony_ci		goto out;
5498c2ecf20Sopenharmony_ci#endif
5508c2ecf20Sopenharmony_ci
5518c2ecf20Sopenharmony_ci	gfp |= __GFP_NOWARN;
5528c2ecf20Sopenharmony_ci	node = dev_to_node(adap->pdev_dev);
5538c2ecf20Sopenharmony_ci
5548c2ecf20Sopenharmony_ci	if (s->fl_pg_order == 0)
5558c2ecf20Sopenharmony_ci		goto alloc_small_pages;
5568c2ecf20Sopenharmony_ci
5578c2ecf20Sopenharmony_ci	/*
5588c2ecf20Sopenharmony_ci	 * Prefer large buffers
5598c2ecf20Sopenharmony_ci	 */
5608c2ecf20Sopenharmony_ci	while (n) {
5618c2ecf20Sopenharmony_ci		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
5628c2ecf20Sopenharmony_ci		if (unlikely(!pg)) {
5638c2ecf20Sopenharmony_ci			q->large_alloc_failed++;
5648c2ecf20Sopenharmony_ci			break;       /* fall back to single pages */
5658c2ecf20Sopenharmony_ci		}
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ci		mapping = dma_map_page(adap->pdev_dev, pg, 0,
5688c2ecf20Sopenharmony_ci				       PAGE_SIZE << s->fl_pg_order,
5698c2ecf20Sopenharmony_ci				       PCI_DMA_FROMDEVICE);
5708c2ecf20Sopenharmony_ci		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
5718c2ecf20Sopenharmony_ci			__free_pages(pg, s->fl_pg_order);
5728c2ecf20Sopenharmony_ci			q->mapping_err++;
5738c2ecf20Sopenharmony_ci			goto out;   /* do not try small pages for this error */
5748c2ecf20Sopenharmony_ci		}
5758c2ecf20Sopenharmony_ci		mapping |= RX_LARGE_PG_BUF;
5768c2ecf20Sopenharmony_ci		*d++ = cpu_to_be64(mapping);
5778c2ecf20Sopenharmony_ci
5788c2ecf20Sopenharmony_ci		set_rx_sw_desc(sd, pg, mapping);
5798c2ecf20Sopenharmony_ci		sd++;
5808c2ecf20Sopenharmony_ci
5818c2ecf20Sopenharmony_ci		q->avail++;
5828c2ecf20Sopenharmony_ci		if (++q->pidx == q->size) {
5838c2ecf20Sopenharmony_ci			q->pidx = 0;
5848c2ecf20Sopenharmony_ci			sd = q->sdesc;
5858c2ecf20Sopenharmony_ci			d = q->desc;
5868c2ecf20Sopenharmony_ci		}
5878c2ecf20Sopenharmony_ci		n--;
5888c2ecf20Sopenharmony_ci	}
5898c2ecf20Sopenharmony_ci
5908c2ecf20Sopenharmony_cialloc_small_pages:
5918c2ecf20Sopenharmony_ci	while (n--) {
5928c2ecf20Sopenharmony_ci		pg = alloc_pages_node(node, gfp, 0);
5938c2ecf20Sopenharmony_ci		if (unlikely(!pg)) {
5948c2ecf20Sopenharmony_ci			q->alloc_failed++;
5958c2ecf20Sopenharmony_ci			break;
5968c2ecf20Sopenharmony_ci		}
5978c2ecf20Sopenharmony_ci
5988c2ecf20Sopenharmony_ci		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
5998c2ecf20Sopenharmony_ci				       PCI_DMA_FROMDEVICE);
6008c2ecf20Sopenharmony_ci		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
6018c2ecf20Sopenharmony_ci			put_page(pg);
6028c2ecf20Sopenharmony_ci			q->mapping_err++;
6038c2ecf20Sopenharmony_ci			goto out;
6048c2ecf20Sopenharmony_ci		}
6058c2ecf20Sopenharmony_ci		*d++ = cpu_to_be64(mapping);
6068c2ecf20Sopenharmony_ci
6078c2ecf20Sopenharmony_ci		set_rx_sw_desc(sd, pg, mapping);
6088c2ecf20Sopenharmony_ci		sd++;
6098c2ecf20Sopenharmony_ci
6108c2ecf20Sopenharmony_ci		q->avail++;
6118c2ecf20Sopenharmony_ci		if (++q->pidx == q->size) {
6128c2ecf20Sopenharmony_ci			q->pidx = 0;
6138c2ecf20Sopenharmony_ci			sd = q->sdesc;
6148c2ecf20Sopenharmony_ci			d = q->desc;
6158c2ecf20Sopenharmony_ci		}
6168c2ecf20Sopenharmony_ci	}
6178c2ecf20Sopenharmony_ci
6188c2ecf20Sopenharmony_ciout:	cred = q->avail - cred;
6198c2ecf20Sopenharmony_ci	q->pend_cred += cred;
6208c2ecf20Sopenharmony_ci	ring_fl_db(adap, q);
6218c2ecf20Sopenharmony_ci
6228c2ecf20Sopenharmony_ci	if (unlikely(fl_starving(adap, q))) {
6238c2ecf20Sopenharmony_ci		smp_wmb();
6248c2ecf20Sopenharmony_ci		q->low++;
6258c2ecf20Sopenharmony_ci		set_bit(q->cntxt_id - adap->sge.egr_start,
6268c2ecf20Sopenharmony_ci			adap->sge.starving_fl);
6278c2ecf20Sopenharmony_ci	}
6288c2ecf20Sopenharmony_ci
6298c2ecf20Sopenharmony_ci	return cred;
6308c2ecf20Sopenharmony_ci}
6318c2ecf20Sopenharmony_ci
6328c2ecf20Sopenharmony_cistatic inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
6338c2ecf20Sopenharmony_ci{
6348c2ecf20Sopenharmony_ci	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
6358c2ecf20Sopenharmony_ci		  GFP_ATOMIC);
6368c2ecf20Sopenharmony_ci}
6378c2ecf20Sopenharmony_ci
6388c2ecf20Sopenharmony_ci/**
6398c2ecf20Sopenharmony_ci *	alloc_ring - allocate resources for an SGE descriptor ring
6408c2ecf20Sopenharmony_ci *	@dev: the PCI device's core device
6418c2ecf20Sopenharmony_ci *	@nelem: the number of descriptors
6428c2ecf20Sopenharmony_ci *	@elem_size: the size of each descriptor
6438c2ecf20Sopenharmony_ci *	@sw_size: the size of the SW state associated with each ring element
6448c2ecf20Sopenharmony_ci *	@phys: the physical address of the allocated ring
6458c2ecf20Sopenharmony_ci *	@metadata: address of the array holding the SW state for the ring
6468c2ecf20Sopenharmony_ci *	@stat_size: extra space in HW ring for status information
6478c2ecf20Sopenharmony_ci *	@node: preferred node for memory allocations
6488c2ecf20Sopenharmony_ci *
6498c2ecf20Sopenharmony_ci *	Allocates resources for an SGE descriptor ring, such as Tx queues,
6508c2ecf20Sopenharmony_ci *	free buffer lists, or response queues.  Each SGE ring requires
6518c2ecf20Sopenharmony_ci *	space for its HW descriptors plus, optionally, space for the SW state
6528c2ecf20Sopenharmony_ci *	associated with each HW entry (the metadata).  The function returns
6538c2ecf20Sopenharmony_ci *	three values: the virtual address for the HW ring (the return value
6548c2ecf20Sopenharmony_ci *	of the function), the bus address of the HW ring, and the address
6558c2ecf20Sopenharmony_ci *	of the SW ring.
6568c2ecf20Sopenharmony_ci */
6578c2ecf20Sopenharmony_cistatic void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
6588c2ecf20Sopenharmony_ci			size_t sw_size, dma_addr_t *phys, void *metadata,
6598c2ecf20Sopenharmony_ci			size_t stat_size, int node)
6608c2ecf20Sopenharmony_ci{
6618c2ecf20Sopenharmony_ci	size_t len = nelem * elem_size + stat_size;
6628c2ecf20Sopenharmony_ci	void *s = NULL;
6638c2ecf20Sopenharmony_ci	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci	if (!p)
6668c2ecf20Sopenharmony_ci		return NULL;
6678c2ecf20Sopenharmony_ci	if (sw_size) {
6688c2ecf20Sopenharmony_ci		s = kcalloc_node(sw_size, nelem, GFP_KERNEL, node);
6698c2ecf20Sopenharmony_ci
6708c2ecf20Sopenharmony_ci		if (!s) {
6718c2ecf20Sopenharmony_ci			dma_free_coherent(dev, len, p, *phys);
6728c2ecf20Sopenharmony_ci			return NULL;
6738c2ecf20Sopenharmony_ci		}
6748c2ecf20Sopenharmony_ci	}
6758c2ecf20Sopenharmony_ci	if (metadata)
6768c2ecf20Sopenharmony_ci		*(void **)metadata = s;
6778c2ecf20Sopenharmony_ci	return p;
6788c2ecf20Sopenharmony_ci}
6798c2ecf20Sopenharmony_ci
6808c2ecf20Sopenharmony_ci/**
6818c2ecf20Sopenharmony_ci *	sgl_len - calculates the size of an SGL of the given capacity
6828c2ecf20Sopenharmony_ci *	@n: the number of SGL entries
6838c2ecf20Sopenharmony_ci *
6848c2ecf20Sopenharmony_ci *	Calculates the number of flits needed for a scatter/gather list that
6858c2ecf20Sopenharmony_ci *	can hold the given number of entries.
6868c2ecf20Sopenharmony_ci */
6878c2ecf20Sopenharmony_cistatic inline unsigned int sgl_len(unsigned int n)
6888c2ecf20Sopenharmony_ci{
6898c2ecf20Sopenharmony_ci	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
6908c2ecf20Sopenharmony_ci	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
6918c2ecf20Sopenharmony_ci	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
6928c2ecf20Sopenharmony_ci	 * repeated sequences of { Length[i], Length[i+1], Address[i],
6938c2ecf20Sopenharmony_ci	 * Address[i+1] } (this ensures that all addresses are on 64-bit
6948c2ecf20Sopenharmony_ci	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
6958c2ecf20Sopenharmony_ci	 * Address[N+1] is omitted.
6968c2ecf20Sopenharmony_ci	 *
6978c2ecf20Sopenharmony_ci	 * The following calculation incorporates all of the above.  It's
6988c2ecf20Sopenharmony_ci	 * somewhat hard to follow but, briefly: the "+2" accounts for the
6998c2ecf20Sopenharmony_ci	 * first two flits which include the DSGL header, Length0 and
7008c2ecf20Sopenharmony_ci	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
7018c2ecf20Sopenharmony_ci	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
7028c2ecf20Sopenharmony_ci	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
7038c2ecf20Sopenharmony_ci	 * (n-1) is odd ...
7048c2ecf20Sopenharmony_ci	 */
7058c2ecf20Sopenharmony_ci	n--;
7068c2ecf20Sopenharmony_ci	return (3 * n) / 2 + (n & 1) + 2;
7078c2ecf20Sopenharmony_ci}
7088c2ecf20Sopenharmony_ci
7098c2ecf20Sopenharmony_ci/**
7108c2ecf20Sopenharmony_ci *	flits_to_desc - returns the num of Tx descriptors for the given flits
7118c2ecf20Sopenharmony_ci *	@n: the number of flits
7128c2ecf20Sopenharmony_ci *
7138c2ecf20Sopenharmony_ci *	Returns the number of Tx descriptors needed for the supplied number
7148c2ecf20Sopenharmony_ci *	of flits.
7158c2ecf20Sopenharmony_ci */
7168c2ecf20Sopenharmony_cistatic inline unsigned int flits_to_desc(unsigned int n)
7178c2ecf20Sopenharmony_ci{
7188c2ecf20Sopenharmony_ci	BUG_ON(n > SGE_MAX_WR_LEN / 8);
7198c2ecf20Sopenharmony_ci	return DIV_ROUND_UP(n, 8);
7208c2ecf20Sopenharmony_ci}
7218c2ecf20Sopenharmony_ci
7228c2ecf20Sopenharmony_ci/**
7238c2ecf20Sopenharmony_ci *	is_eth_imm - can an Ethernet packet be sent as immediate data?
7248c2ecf20Sopenharmony_ci *	@skb: the packet
7258c2ecf20Sopenharmony_ci *	@chip_ver: chip version
7268c2ecf20Sopenharmony_ci *
7278c2ecf20Sopenharmony_ci *	Returns whether an Ethernet packet is small enough to fit as
7288c2ecf20Sopenharmony_ci *	immediate data. Return value corresponds to headroom required.
7298c2ecf20Sopenharmony_ci */
7308c2ecf20Sopenharmony_cistatic inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
7318c2ecf20Sopenharmony_ci{
7328c2ecf20Sopenharmony_ci	int hdrlen = 0;
7338c2ecf20Sopenharmony_ci
7348c2ecf20Sopenharmony_ci	if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
7358c2ecf20Sopenharmony_ci	    chip_ver > CHELSIO_T5) {
7368c2ecf20Sopenharmony_ci		hdrlen = sizeof(struct cpl_tx_tnl_lso);
7378c2ecf20Sopenharmony_ci		hdrlen += sizeof(struct cpl_tx_pkt_core);
7388c2ecf20Sopenharmony_ci	} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
7398c2ecf20Sopenharmony_ci		return 0;
7408c2ecf20Sopenharmony_ci	} else {
7418c2ecf20Sopenharmony_ci		hdrlen = skb_shinfo(skb)->gso_size ?
7428c2ecf20Sopenharmony_ci			 sizeof(struct cpl_tx_pkt_lso_core) : 0;
7438c2ecf20Sopenharmony_ci		hdrlen += sizeof(struct cpl_tx_pkt);
7448c2ecf20Sopenharmony_ci	}
7458c2ecf20Sopenharmony_ci	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
7468c2ecf20Sopenharmony_ci		return hdrlen;
7478c2ecf20Sopenharmony_ci	return 0;
7488c2ecf20Sopenharmony_ci}
7498c2ecf20Sopenharmony_ci
7508c2ecf20Sopenharmony_ci/**
7518c2ecf20Sopenharmony_ci *	calc_tx_flits - calculate the number of flits for a packet Tx WR
7528c2ecf20Sopenharmony_ci *	@skb: the packet
7538c2ecf20Sopenharmony_ci *	@chip_ver: chip version
7548c2ecf20Sopenharmony_ci *
7558c2ecf20Sopenharmony_ci *	Returns the number of flits needed for a Tx WR for the given Ethernet
7568c2ecf20Sopenharmony_ci *	packet, including the needed WR and CPL headers.
7578c2ecf20Sopenharmony_ci */
7588c2ecf20Sopenharmony_cistatic inline unsigned int calc_tx_flits(const struct sk_buff *skb,
7598c2ecf20Sopenharmony_ci					 unsigned int chip_ver)
7608c2ecf20Sopenharmony_ci{
7618c2ecf20Sopenharmony_ci	unsigned int flits;
7628c2ecf20Sopenharmony_ci	int hdrlen = is_eth_imm(skb, chip_ver);
7638c2ecf20Sopenharmony_ci
7648c2ecf20Sopenharmony_ci	/* If the skb is small enough, we can pump it out as a work request
7658c2ecf20Sopenharmony_ci	 * with only immediate data.  In that case we just have to have the
7668c2ecf20Sopenharmony_ci	 * TX Packet header plus the skb data in the Work Request.
7678c2ecf20Sopenharmony_ci	 */
7688c2ecf20Sopenharmony_ci
7698c2ecf20Sopenharmony_ci	if (hdrlen)
7708c2ecf20Sopenharmony_ci		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
7718c2ecf20Sopenharmony_ci
7728c2ecf20Sopenharmony_ci	/* Otherwise, we're going to have to construct a Scatter gather list
7738c2ecf20Sopenharmony_ci	 * of the skb body and fragments.  We also include the flits necessary
7748c2ecf20Sopenharmony_ci	 * for the TX Packet Work Request and CPL.  We always have a firmware
7758c2ecf20Sopenharmony_ci	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
7768c2ecf20Sopenharmony_ci	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
7778c2ecf20Sopenharmony_ci	 * message or, if we're doing a Large Send Offload, an LSO CPL message
7788c2ecf20Sopenharmony_ci	 * with an embedded TX Packet Write CPL message.
7798c2ecf20Sopenharmony_ci	 */
7808c2ecf20Sopenharmony_ci	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
7818c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size) {
7828c2ecf20Sopenharmony_ci		if (skb->encapsulation && chip_ver > CHELSIO_T5) {
7838c2ecf20Sopenharmony_ci			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
7848c2ecf20Sopenharmony_ci				 sizeof(struct cpl_tx_tnl_lso);
7858c2ecf20Sopenharmony_ci		} else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
7868c2ecf20Sopenharmony_ci			u32 pkt_hdrlen;
7878c2ecf20Sopenharmony_ci
7888c2ecf20Sopenharmony_ci			pkt_hdrlen = eth_get_headlen(skb->dev, skb->data,
7898c2ecf20Sopenharmony_ci						     skb_headlen(skb));
7908c2ecf20Sopenharmony_ci			hdrlen = sizeof(struct fw_eth_tx_eo_wr) +
7918c2ecf20Sopenharmony_ci				 round_up(pkt_hdrlen, 16);
7928c2ecf20Sopenharmony_ci		} else {
7938c2ecf20Sopenharmony_ci			hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
7948c2ecf20Sopenharmony_ci				 sizeof(struct cpl_tx_pkt_lso_core);
7958c2ecf20Sopenharmony_ci		}
7968c2ecf20Sopenharmony_ci
7978c2ecf20Sopenharmony_ci		hdrlen += sizeof(struct cpl_tx_pkt_core);
7988c2ecf20Sopenharmony_ci		flits += (hdrlen / sizeof(__be64));
7998c2ecf20Sopenharmony_ci	} else {
8008c2ecf20Sopenharmony_ci		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
8018c2ecf20Sopenharmony_ci			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
8028c2ecf20Sopenharmony_ci	}
8038c2ecf20Sopenharmony_ci	return flits;
8048c2ecf20Sopenharmony_ci}
8058c2ecf20Sopenharmony_ci
8068c2ecf20Sopenharmony_ci/**
8078c2ecf20Sopenharmony_ci *	calc_tx_descs - calculate the number of Tx descriptors for a packet
8088c2ecf20Sopenharmony_ci *	@skb: the packet
8098c2ecf20Sopenharmony_ci *	@chip_ver: chip version
8108c2ecf20Sopenharmony_ci *
8118c2ecf20Sopenharmony_ci *	Returns the number of Tx descriptors needed for the given Ethernet
8128c2ecf20Sopenharmony_ci *	packet, including the needed WR and CPL headers.
8138c2ecf20Sopenharmony_ci */
8148c2ecf20Sopenharmony_cistatic inline unsigned int calc_tx_descs(const struct sk_buff *skb,
8158c2ecf20Sopenharmony_ci					 unsigned int chip_ver)
8168c2ecf20Sopenharmony_ci{
8178c2ecf20Sopenharmony_ci	return flits_to_desc(calc_tx_flits(skb, chip_ver));
8188c2ecf20Sopenharmony_ci}
8198c2ecf20Sopenharmony_ci
8208c2ecf20Sopenharmony_ci/**
8218c2ecf20Sopenharmony_ci *	cxgb4_write_sgl - populate a scatter/gather list for a packet
8228c2ecf20Sopenharmony_ci *	@skb: the packet
8238c2ecf20Sopenharmony_ci *	@q: the Tx queue we are writing into
8248c2ecf20Sopenharmony_ci *	@sgl: starting location for writing the SGL
8258c2ecf20Sopenharmony_ci *	@end: points right after the end of the SGL
8268c2ecf20Sopenharmony_ci *	@start: start offset into skb main-body data to include in the SGL
8278c2ecf20Sopenharmony_ci *	@addr: the list of bus addresses for the SGL elements
8288c2ecf20Sopenharmony_ci *
8298c2ecf20Sopenharmony_ci *	Generates a gather list for the buffers that make up a packet.
8308c2ecf20Sopenharmony_ci *	The caller must provide adequate space for the SGL that will be written.
8318c2ecf20Sopenharmony_ci *	The SGL includes all of the packet's page fragments and the data in its
8328c2ecf20Sopenharmony_ci *	main body except for the first @start bytes.  @sgl must be 16-byte
8338c2ecf20Sopenharmony_ci *	aligned and within a Tx descriptor with available space.  @end points
8348c2ecf20Sopenharmony_ci *	right after the end of the SGL but does not account for any potential
8358c2ecf20Sopenharmony_ci *	wrap around, i.e., @end > @sgl.
8368c2ecf20Sopenharmony_ci */
8378c2ecf20Sopenharmony_civoid cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
8388c2ecf20Sopenharmony_ci		     struct ulptx_sgl *sgl, u64 *end, unsigned int start,
8398c2ecf20Sopenharmony_ci		     const dma_addr_t *addr)
8408c2ecf20Sopenharmony_ci{
8418c2ecf20Sopenharmony_ci	unsigned int i, len;
8428c2ecf20Sopenharmony_ci	struct ulptx_sge_pair *to;
8438c2ecf20Sopenharmony_ci	const struct skb_shared_info *si = skb_shinfo(skb);
8448c2ecf20Sopenharmony_ci	unsigned int nfrags = si->nr_frags;
8458c2ecf20Sopenharmony_ci	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
8468c2ecf20Sopenharmony_ci
8478c2ecf20Sopenharmony_ci	len = skb_headlen(skb) - start;
8488c2ecf20Sopenharmony_ci	if (likely(len)) {
8498c2ecf20Sopenharmony_ci		sgl->len0 = htonl(len);
8508c2ecf20Sopenharmony_ci		sgl->addr0 = cpu_to_be64(addr[0] + start);
8518c2ecf20Sopenharmony_ci		nfrags++;
8528c2ecf20Sopenharmony_ci	} else {
8538c2ecf20Sopenharmony_ci		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
8548c2ecf20Sopenharmony_ci		sgl->addr0 = cpu_to_be64(addr[1]);
8558c2ecf20Sopenharmony_ci	}
8568c2ecf20Sopenharmony_ci
8578c2ecf20Sopenharmony_ci	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
8588c2ecf20Sopenharmony_ci			      ULPTX_NSGE_V(nfrags));
8598c2ecf20Sopenharmony_ci	if (likely(--nfrags == 0))
8608c2ecf20Sopenharmony_ci		return;
8618c2ecf20Sopenharmony_ci	/*
8628c2ecf20Sopenharmony_ci	 * Most of the complexity below deals with the possibility we hit the
8638c2ecf20Sopenharmony_ci	 * end of the queue in the middle of writing the SGL.  For this case
8648c2ecf20Sopenharmony_ci	 * only we create the SGL in a temporary buffer and then copy it.
8658c2ecf20Sopenharmony_ci	 */
8668c2ecf20Sopenharmony_ci	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
8678c2ecf20Sopenharmony_ci
8688c2ecf20Sopenharmony_ci	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
8698c2ecf20Sopenharmony_ci		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
8708c2ecf20Sopenharmony_ci		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
8718c2ecf20Sopenharmony_ci		to->addr[0] = cpu_to_be64(addr[i]);
8728c2ecf20Sopenharmony_ci		to->addr[1] = cpu_to_be64(addr[++i]);
8738c2ecf20Sopenharmony_ci	}
8748c2ecf20Sopenharmony_ci	if (nfrags) {
8758c2ecf20Sopenharmony_ci		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
8768c2ecf20Sopenharmony_ci		to->len[1] = cpu_to_be32(0);
8778c2ecf20Sopenharmony_ci		to->addr[0] = cpu_to_be64(addr[i + 1]);
8788c2ecf20Sopenharmony_ci	}
8798c2ecf20Sopenharmony_ci	if (unlikely((u8 *)end > (u8 *)q->stat)) {
8808c2ecf20Sopenharmony_ci		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
8818c2ecf20Sopenharmony_ci
8828c2ecf20Sopenharmony_ci		if (likely(part0))
8838c2ecf20Sopenharmony_ci			memcpy(sgl->sge, buf, part0);
8848c2ecf20Sopenharmony_ci		part1 = (u8 *)end - (u8 *)q->stat;
8858c2ecf20Sopenharmony_ci		memcpy(q->desc, (u8 *)buf + part0, part1);
8868c2ecf20Sopenharmony_ci		end = (void *)q->desc + part1;
8878c2ecf20Sopenharmony_ci	}
8888c2ecf20Sopenharmony_ci	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
8898c2ecf20Sopenharmony_ci		*end = 0;
8908c2ecf20Sopenharmony_ci}
8918c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_write_sgl);
8928c2ecf20Sopenharmony_ci
8938c2ecf20Sopenharmony_ci/*	cxgb4_write_partial_sgl - populate SGL for partial packet
8948c2ecf20Sopenharmony_ci *	@skb: the packet
8958c2ecf20Sopenharmony_ci *	@q: the Tx queue we are writing into
8968c2ecf20Sopenharmony_ci *	@sgl: starting location for writing the SGL
8978c2ecf20Sopenharmony_ci *	@end: points right after the end of the SGL
8988c2ecf20Sopenharmony_ci *	@addr: the list of bus addresses for the SGL elements
8998c2ecf20Sopenharmony_ci *	@start: start offset in the SKB where partial data starts
9008c2ecf20Sopenharmony_ci *	@len: length of data from @start to send out
9018c2ecf20Sopenharmony_ci *
9028c2ecf20Sopenharmony_ci *	This API will handle sending out partial data of a skb if required.
9038c2ecf20Sopenharmony_ci *	Unlike cxgb4_write_sgl, @start can be any offset into the skb data,
9048c2ecf20Sopenharmony_ci *	and @len will decide how much data after @start offset to send out.
9058c2ecf20Sopenharmony_ci */
9068c2ecf20Sopenharmony_civoid cxgb4_write_partial_sgl(const struct sk_buff *skb, struct sge_txq *q,
9078c2ecf20Sopenharmony_ci			     struct ulptx_sgl *sgl, u64 *end,
9088c2ecf20Sopenharmony_ci			     const dma_addr_t *addr, u32 start, u32 len)
9098c2ecf20Sopenharmony_ci{
9108c2ecf20Sopenharmony_ci	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1] = {0}, *to;
9118c2ecf20Sopenharmony_ci	u32 frag_size, skb_linear_data_len = skb_headlen(skb);
9128c2ecf20Sopenharmony_ci	struct skb_shared_info *si = skb_shinfo(skb);
9138c2ecf20Sopenharmony_ci	u8 i = 0, frag_idx = 0, nfrags = 0;
9148c2ecf20Sopenharmony_ci	skb_frag_t *frag;
9158c2ecf20Sopenharmony_ci
9168c2ecf20Sopenharmony_ci	/* Fill the first SGL either from linear data or from partial
9178c2ecf20Sopenharmony_ci	 * frag based on @start.
9188c2ecf20Sopenharmony_ci	 */
9198c2ecf20Sopenharmony_ci	if (unlikely(start < skb_linear_data_len)) {
9208c2ecf20Sopenharmony_ci		frag_size = min(len, skb_linear_data_len - start);
9218c2ecf20Sopenharmony_ci		sgl->len0 = htonl(frag_size);
9228c2ecf20Sopenharmony_ci		sgl->addr0 = cpu_to_be64(addr[0] + start);
9238c2ecf20Sopenharmony_ci		len -= frag_size;
9248c2ecf20Sopenharmony_ci		nfrags++;
9258c2ecf20Sopenharmony_ci	} else {
9268c2ecf20Sopenharmony_ci		start -= skb_linear_data_len;
9278c2ecf20Sopenharmony_ci		frag = &si->frags[frag_idx];
9288c2ecf20Sopenharmony_ci		frag_size = skb_frag_size(frag);
9298c2ecf20Sopenharmony_ci		/* find the first frag */
9308c2ecf20Sopenharmony_ci		while (start >= frag_size) {
9318c2ecf20Sopenharmony_ci			start -= frag_size;
9328c2ecf20Sopenharmony_ci			frag_idx++;
9338c2ecf20Sopenharmony_ci			frag = &si->frags[frag_idx];
9348c2ecf20Sopenharmony_ci			frag_size = skb_frag_size(frag);
9358c2ecf20Sopenharmony_ci		}
9368c2ecf20Sopenharmony_ci
9378c2ecf20Sopenharmony_ci		frag_size = min(len, skb_frag_size(frag) - start);
9388c2ecf20Sopenharmony_ci		sgl->len0 = cpu_to_be32(frag_size);
9398c2ecf20Sopenharmony_ci		sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start);
9408c2ecf20Sopenharmony_ci		len -= frag_size;
9418c2ecf20Sopenharmony_ci		nfrags++;
9428c2ecf20Sopenharmony_ci		frag_idx++;
9438c2ecf20Sopenharmony_ci	}
9448c2ecf20Sopenharmony_ci
9458c2ecf20Sopenharmony_ci	/* If the entire partial data fit in one SGL, then send it out
9468c2ecf20Sopenharmony_ci	 * now.
9478c2ecf20Sopenharmony_ci	 */
9488c2ecf20Sopenharmony_ci	if (!len)
9498c2ecf20Sopenharmony_ci		goto done;
9508c2ecf20Sopenharmony_ci
9518c2ecf20Sopenharmony_ci	/* Most of the complexity below deals with the possibility we hit the
9528c2ecf20Sopenharmony_ci	 * end of the queue in the middle of writing the SGL.  For this case
9538c2ecf20Sopenharmony_ci	 * only we create the SGL in a temporary buffer and then copy it.
9548c2ecf20Sopenharmony_ci	 */
9558c2ecf20Sopenharmony_ci	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
9568c2ecf20Sopenharmony_ci
9578c2ecf20Sopenharmony_ci	/* If the skb couldn't fit in first SGL completely, fill the
9588c2ecf20Sopenharmony_ci	 * rest of the frags in subsequent SGLs. Note that each SGL
9598c2ecf20Sopenharmony_ci	 * pair can store 2 frags.
9608c2ecf20Sopenharmony_ci	 */
9618c2ecf20Sopenharmony_ci	while (len) {
9628c2ecf20Sopenharmony_ci		frag_size = min(len, skb_frag_size(&si->frags[frag_idx]));
9638c2ecf20Sopenharmony_ci		to->len[i & 1] = cpu_to_be32(frag_size);
9648c2ecf20Sopenharmony_ci		to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]);
9658c2ecf20Sopenharmony_ci		if (i && (i & 1))
9668c2ecf20Sopenharmony_ci			to++;
9678c2ecf20Sopenharmony_ci		nfrags++;
9688c2ecf20Sopenharmony_ci		frag_idx++;
9698c2ecf20Sopenharmony_ci		i++;
9708c2ecf20Sopenharmony_ci		len -= frag_size;
9718c2ecf20Sopenharmony_ci	}
9728c2ecf20Sopenharmony_ci
9738c2ecf20Sopenharmony_ci	/* If we ended in an odd boundary, then set the second SGL's
9748c2ecf20Sopenharmony_ci	 * length in the pair to 0.
9758c2ecf20Sopenharmony_ci	 */
9768c2ecf20Sopenharmony_ci	if (i & 1)
9778c2ecf20Sopenharmony_ci		to->len[1] = cpu_to_be32(0);
9788c2ecf20Sopenharmony_ci
9798c2ecf20Sopenharmony_ci	/* Copy from temporary buffer to Tx ring, in case we hit the
9808c2ecf20Sopenharmony_ci	 * end of the queue in the middle of writing the SGL.
9818c2ecf20Sopenharmony_ci	 */
9828c2ecf20Sopenharmony_ci	if (unlikely((u8 *)end > (u8 *)q->stat)) {
9838c2ecf20Sopenharmony_ci		u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
9848c2ecf20Sopenharmony_ci
9858c2ecf20Sopenharmony_ci		if (likely(part0))
9868c2ecf20Sopenharmony_ci			memcpy(sgl->sge, buf, part0);
9878c2ecf20Sopenharmony_ci		part1 = (u8 *)end - (u8 *)q->stat;
9888c2ecf20Sopenharmony_ci		memcpy(q->desc, (u8 *)buf + part0, part1);
9898c2ecf20Sopenharmony_ci		end = (void *)q->desc + part1;
9908c2ecf20Sopenharmony_ci	}
9918c2ecf20Sopenharmony_ci
9928c2ecf20Sopenharmony_ci	/* 0-pad to multiple of 16 */
9938c2ecf20Sopenharmony_ci	if ((uintptr_t)end & 8)
9948c2ecf20Sopenharmony_ci		*end = 0;
9958c2ecf20Sopenharmony_cidone:
9968c2ecf20Sopenharmony_ci	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
9978c2ecf20Sopenharmony_ci			ULPTX_NSGE_V(nfrags));
9988c2ecf20Sopenharmony_ci}
9998c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_write_partial_sgl);
10008c2ecf20Sopenharmony_ci
10018c2ecf20Sopenharmony_ci/* This function copies 64 byte coalesced work request to
10028c2ecf20Sopenharmony_ci * memory mapped BAR2 space. For coalesced WR SGE fetches
10038c2ecf20Sopenharmony_ci * data from the FIFO instead of from Host.
10048c2ecf20Sopenharmony_ci */
10058c2ecf20Sopenharmony_cistatic void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
10068c2ecf20Sopenharmony_ci{
10078c2ecf20Sopenharmony_ci	int count = 8;
10088c2ecf20Sopenharmony_ci
10098c2ecf20Sopenharmony_ci	while (count) {
10108c2ecf20Sopenharmony_ci		writeq(*src, dst);
10118c2ecf20Sopenharmony_ci		src++;
10128c2ecf20Sopenharmony_ci		dst++;
10138c2ecf20Sopenharmony_ci		count--;
10148c2ecf20Sopenharmony_ci	}
10158c2ecf20Sopenharmony_ci}
10168c2ecf20Sopenharmony_ci
10178c2ecf20Sopenharmony_ci/**
10188c2ecf20Sopenharmony_ci *	cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
10198c2ecf20Sopenharmony_ci *	@adap: the adapter
10208c2ecf20Sopenharmony_ci *	@q: the Tx queue
10218c2ecf20Sopenharmony_ci *	@n: number of new descriptors to give to HW
10228c2ecf20Sopenharmony_ci *
10238c2ecf20Sopenharmony_ci *	Ring the doorbel for a Tx queue.
10248c2ecf20Sopenharmony_ci */
10258c2ecf20Sopenharmony_ciinline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
10268c2ecf20Sopenharmony_ci{
10278c2ecf20Sopenharmony_ci	/* Make sure that all writes to the TX Descriptors are committed
10288c2ecf20Sopenharmony_ci	 * before we tell the hardware about them.
10298c2ecf20Sopenharmony_ci	 */
10308c2ecf20Sopenharmony_ci	wmb();
10318c2ecf20Sopenharmony_ci
10328c2ecf20Sopenharmony_ci	/* If we don't have access to the new User Doorbell (T5+), use the old
10338c2ecf20Sopenharmony_ci	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
10348c2ecf20Sopenharmony_ci	 */
10358c2ecf20Sopenharmony_ci	if (unlikely(q->bar2_addr == NULL)) {
10368c2ecf20Sopenharmony_ci		u32 val = PIDX_V(n);
10378c2ecf20Sopenharmony_ci		unsigned long flags;
10388c2ecf20Sopenharmony_ci
10398c2ecf20Sopenharmony_ci		/* For T4 we need to participate in the Doorbell Recovery
10408c2ecf20Sopenharmony_ci		 * mechanism.
10418c2ecf20Sopenharmony_ci		 */
10428c2ecf20Sopenharmony_ci		spin_lock_irqsave(&q->db_lock, flags);
10438c2ecf20Sopenharmony_ci		if (!q->db_disabled)
10448c2ecf20Sopenharmony_ci			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
10458c2ecf20Sopenharmony_ci				     QID_V(q->cntxt_id) | val);
10468c2ecf20Sopenharmony_ci		else
10478c2ecf20Sopenharmony_ci			q->db_pidx_inc += n;
10488c2ecf20Sopenharmony_ci		q->db_pidx = q->pidx;
10498c2ecf20Sopenharmony_ci		spin_unlock_irqrestore(&q->db_lock, flags);
10508c2ecf20Sopenharmony_ci	} else {
10518c2ecf20Sopenharmony_ci		u32 val = PIDX_T5_V(n);
10528c2ecf20Sopenharmony_ci
10538c2ecf20Sopenharmony_ci		/* T4 and later chips share the same PIDX field offset within
10548c2ecf20Sopenharmony_ci		 * the doorbell, but T5 and later shrank the field in order to
10558c2ecf20Sopenharmony_ci		 * gain a bit for Doorbell Priority.  The field was absurdly
10568c2ecf20Sopenharmony_ci		 * large in the first place (14 bits) so we just use the T5
10578c2ecf20Sopenharmony_ci		 * and later limits and warn if a Queue ID is too large.
10588c2ecf20Sopenharmony_ci		 */
10598c2ecf20Sopenharmony_ci		WARN_ON(val & DBPRIO_F);
10608c2ecf20Sopenharmony_ci
10618c2ecf20Sopenharmony_ci		/* If we're only writing a single TX Descriptor and we can use
10628c2ecf20Sopenharmony_ci		 * Inferred QID registers, we can use the Write Combining
10638c2ecf20Sopenharmony_ci		 * Gather Buffer; otherwise we use the simple doorbell.
10648c2ecf20Sopenharmony_ci		 */
10658c2ecf20Sopenharmony_ci		if (n == 1 && q->bar2_qid == 0) {
10668c2ecf20Sopenharmony_ci			int index = (q->pidx
10678c2ecf20Sopenharmony_ci				     ? (q->pidx - 1)
10688c2ecf20Sopenharmony_ci				     : (q->size - 1));
10698c2ecf20Sopenharmony_ci			u64 *wr = (u64 *)&q->desc[index];
10708c2ecf20Sopenharmony_ci
10718c2ecf20Sopenharmony_ci			cxgb_pio_copy((u64 __iomem *)
10728c2ecf20Sopenharmony_ci				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
10738c2ecf20Sopenharmony_ci				      wr);
10748c2ecf20Sopenharmony_ci		} else {
10758c2ecf20Sopenharmony_ci			writel(val | QID_V(q->bar2_qid),
10768c2ecf20Sopenharmony_ci			       q->bar2_addr + SGE_UDB_KDOORBELL);
10778c2ecf20Sopenharmony_ci		}
10788c2ecf20Sopenharmony_ci
10798c2ecf20Sopenharmony_ci		/* This Write Memory Barrier will force the write to the User
10808c2ecf20Sopenharmony_ci		 * Doorbell area to be flushed.  This is needed to prevent
10818c2ecf20Sopenharmony_ci		 * writes on different CPUs for the same queue from hitting
10828c2ecf20Sopenharmony_ci		 * the adapter out of order.  This is required when some Work
10838c2ecf20Sopenharmony_ci		 * Requests take the Write Combine Gather Buffer path (user
10848c2ecf20Sopenharmony_ci		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
10858c2ecf20Sopenharmony_ci		 * take the traditional path where we simply increment the
10868c2ecf20Sopenharmony_ci		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
10878c2ecf20Sopenharmony_ci		 * hardware DMA read the actual Work Request.
10888c2ecf20Sopenharmony_ci		 */
10898c2ecf20Sopenharmony_ci		wmb();
10908c2ecf20Sopenharmony_ci	}
10918c2ecf20Sopenharmony_ci}
10928c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_ring_tx_db);
10938c2ecf20Sopenharmony_ci
10948c2ecf20Sopenharmony_ci/**
10958c2ecf20Sopenharmony_ci *	cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
10968c2ecf20Sopenharmony_ci *	@skb: the packet
10978c2ecf20Sopenharmony_ci *	@q: the Tx queue where the packet will be inlined
10988c2ecf20Sopenharmony_ci *	@pos: starting position in the Tx queue where to inline the packet
10998c2ecf20Sopenharmony_ci *
11008c2ecf20Sopenharmony_ci *	Inline a packet's contents directly into Tx descriptors, starting at
11018c2ecf20Sopenharmony_ci *	the given position within the Tx DMA ring.
11028c2ecf20Sopenharmony_ci *	Most of the complexity of this operation is dealing with wrap arounds
11038c2ecf20Sopenharmony_ci *	in the middle of the packet we want to inline.
11048c2ecf20Sopenharmony_ci */
11058c2ecf20Sopenharmony_civoid cxgb4_inline_tx_skb(const struct sk_buff *skb,
11068c2ecf20Sopenharmony_ci			 const struct sge_txq *q, void *pos)
11078c2ecf20Sopenharmony_ci{
11088c2ecf20Sopenharmony_ci	int left = (void *)q->stat - pos;
11098c2ecf20Sopenharmony_ci	u64 *p;
11108c2ecf20Sopenharmony_ci
11118c2ecf20Sopenharmony_ci	if (likely(skb->len <= left)) {
11128c2ecf20Sopenharmony_ci		if (likely(!skb->data_len))
11138c2ecf20Sopenharmony_ci			skb_copy_from_linear_data(skb, pos, skb->len);
11148c2ecf20Sopenharmony_ci		else
11158c2ecf20Sopenharmony_ci			skb_copy_bits(skb, 0, pos, skb->len);
11168c2ecf20Sopenharmony_ci		pos += skb->len;
11178c2ecf20Sopenharmony_ci	} else {
11188c2ecf20Sopenharmony_ci		skb_copy_bits(skb, 0, pos, left);
11198c2ecf20Sopenharmony_ci		skb_copy_bits(skb, left, q->desc, skb->len - left);
11208c2ecf20Sopenharmony_ci		pos = (void *)q->desc + (skb->len - left);
11218c2ecf20Sopenharmony_ci	}
11228c2ecf20Sopenharmony_ci
11238c2ecf20Sopenharmony_ci	/* 0-pad to multiple of 16 */
11248c2ecf20Sopenharmony_ci	p = PTR_ALIGN(pos, 8);
11258c2ecf20Sopenharmony_ci	if ((uintptr_t)p & 8)
11268c2ecf20Sopenharmony_ci		*p = 0;
11278c2ecf20Sopenharmony_ci}
11288c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_inline_tx_skb);
11298c2ecf20Sopenharmony_ci
11308c2ecf20Sopenharmony_cistatic void *inline_tx_skb_header(const struct sk_buff *skb,
11318c2ecf20Sopenharmony_ci				  const struct sge_txq *q,  void *pos,
11328c2ecf20Sopenharmony_ci				  int length)
11338c2ecf20Sopenharmony_ci{
11348c2ecf20Sopenharmony_ci	u64 *p;
11358c2ecf20Sopenharmony_ci	int left = (void *)q->stat - pos;
11368c2ecf20Sopenharmony_ci
11378c2ecf20Sopenharmony_ci	if (likely(length <= left)) {
11388c2ecf20Sopenharmony_ci		memcpy(pos, skb->data, length);
11398c2ecf20Sopenharmony_ci		pos += length;
11408c2ecf20Sopenharmony_ci	} else {
11418c2ecf20Sopenharmony_ci		memcpy(pos, skb->data, left);
11428c2ecf20Sopenharmony_ci		memcpy(q->desc, skb->data + left, length - left);
11438c2ecf20Sopenharmony_ci		pos = (void *)q->desc + (length - left);
11448c2ecf20Sopenharmony_ci	}
11458c2ecf20Sopenharmony_ci	/* 0-pad to multiple of 16 */
11468c2ecf20Sopenharmony_ci	p = PTR_ALIGN(pos, 8);
11478c2ecf20Sopenharmony_ci	if ((uintptr_t)p & 8) {
11488c2ecf20Sopenharmony_ci		*p = 0;
11498c2ecf20Sopenharmony_ci		return p + 1;
11508c2ecf20Sopenharmony_ci	}
11518c2ecf20Sopenharmony_ci	return p;
11528c2ecf20Sopenharmony_ci}
11538c2ecf20Sopenharmony_ci
11548c2ecf20Sopenharmony_ci/*
11558c2ecf20Sopenharmony_ci * Figure out what HW csum a packet wants and return the appropriate control
11568c2ecf20Sopenharmony_ci * bits.
11578c2ecf20Sopenharmony_ci */
11588c2ecf20Sopenharmony_cistatic u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
11598c2ecf20Sopenharmony_ci{
11608c2ecf20Sopenharmony_ci	int csum_type;
11618c2ecf20Sopenharmony_ci	bool inner_hdr_csum = false;
11628c2ecf20Sopenharmony_ci	u16 proto, ver;
11638c2ecf20Sopenharmony_ci
11648c2ecf20Sopenharmony_ci	if (skb->encapsulation &&
11658c2ecf20Sopenharmony_ci	    (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5))
11668c2ecf20Sopenharmony_ci		inner_hdr_csum = true;
11678c2ecf20Sopenharmony_ci
11688c2ecf20Sopenharmony_ci	if (inner_hdr_csum) {
11698c2ecf20Sopenharmony_ci		ver = inner_ip_hdr(skb)->version;
11708c2ecf20Sopenharmony_ci		proto = (ver == 4) ? inner_ip_hdr(skb)->protocol :
11718c2ecf20Sopenharmony_ci			inner_ipv6_hdr(skb)->nexthdr;
11728c2ecf20Sopenharmony_ci	} else {
11738c2ecf20Sopenharmony_ci		ver = ip_hdr(skb)->version;
11748c2ecf20Sopenharmony_ci		proto = (ver == 4) ? ip_hdr(skb)->protocol :
11758c2ecf20Sopenharmony_ci			ipv6_hdr(skb)->nexthdr;
11768c2ecf20Sopenharmony_ci	}
11778c2ecf20Sopenharmony_ci
11788c2ecf20Sopenharmony_ci	if (ver == 4) {
11798c2ecf20Sopenharmony_ci		if (proto == IPPROTO_TCP)
11808c2ecf20Sopenharmony_ci			csum_type = TX_CSUM_TCPIP;
11818c2ecf20Sopenharmony_ci		else if (proto == IPPROTO_UDP)
11828c2ecf20Sopenharmony_ci			csum_type = TX_CSUM_UDPIP;
11838c2ecf20Sopenharmony_ci		else {
11848c2ecf20Sopenharmony_cinocsum:			/*
11858c2ecf20Sopenharmony_ci			 * unknown protocol, disable HW csum
11868c2ecf20Sopenharmony_ci			 * and hope a bad packet is detected
11878c2ecf20Sopenharmony_ci			 */
11888c2ecf20Sopenharmony_ci			return TXPKT_L4CSUM_DIS_F;
11898c2ecf20Sopenharmony_ci		}
11908c2ecf20Sopenharmony_ci	} else {
11918c2ecf20Sopenharmony_ci		/*
11928c2ecf20Sopenharmony_ci		 * this doesn't work with extension headers
11938c2ecf20Sopenharmony_ci		 */
11948c2ecf20Sopenharmony_ci		if (proto == IPPROTO_TCP)
11958c2ecf20Sopenharmony_ci			csum_type = TX_CSUM_TCPIP6;
11968c2ecf20Sopenharmony_ci		else if (proto == IPPROTO_UDP)
11978c2ecf20Sopenharmony_ci			csum_type = TX_CSUM_UDPIP6;
11988c2ecf20Sopenharmony_ci		else
11998c2ecf20Sopenharmony_ci			goto nocsum;
12008c2ecf20Sopenharmony_ci	}
12018c2ecf20Sopenharmony_ci
12028c2ecf20Sopenharmony_ci	if (likely(csum_type >= TX_CSUM_TCPIP)) {
12038c2ecf20Sopenharmony_ci		int eth_hdr_len, l4_len;
12048c2ecf20Sopenharmony_ci		u64 hdr_len;
12058c2ecf20Sopenharmony_ci
12068c2ecf20Sopenharmony_ci		if (inner_hdr_csum) {
12078c2ecf20Sopenharmony_ci			/* This allows checksum offload for all encapsulated
12088c2ecf20Sopenharmony_ci			 * packets like GRE etc..
12098c2ecf20Sopenharmony_ci			 */
12108c2ecf20Sopenharmony_ci			l4_len = skb_inner_network_header_len(skb);
12118c2ecf20Sopenharmony_ci			eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN;
12128c2ecf20Sopenharmony_ci		} else {
12138c2ecf20Sopenharmony_ci			l4_len = skb_network_header_len(skb);
12148c2ecf20Sopenharmony_ci			eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
12158c2ecf20Sopenharmony_ci		}
12168c2ecf20Sopenharmony_ci		hdr_len = TXPKT_IPHDR_LEN_V(l4_len);
12178c2ecf20Sopenharmony_ci
12188c2ecf20Sopenharmony_ci		if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
12198c2ecf20Sopenharmony_ci			hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
12208c2ecf20Sopenharmony_ci		else
12218c2ecf20Sopenharmony_ci			hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
12228c2ecf20Sopenharmony_ci		return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
12238c2ecf20Sopenharmony_ci	} else {
12248c2ecf20Sopenharmony_ci		int start = skb_transport_offset(skb);
12258c2ecf20Sopenharmony_ci
12268c2ecf20Sopenharmony_ci		return TXPKT_CSUM_TYPE_V(csum_type) |
12278c2ecf20Sopenharmony_ci			TXPKT_CSUM_START_V(start) |
12288c2ecf20Sopenharmony_ci			TXPKT_CSUM_LOC_V(start + skb->csum_offset);
12298c2ecf20Sopenharmony_ci	}
12308c2ecf20Sopenharmony_ci}
12318c2ecf20Sopenharmony_ci
12328c2ecf20Sopenharmony_cistatic void eth_txq_stop(struct sge_eth_txq *q)
12338c2ecf20Sopenharmony_ci{
12348c2ecf20Sopenharmony_ci	netif_tx_stop_queue(q->txq);
12358c2ecf20Sopenharmony_ci	q->q.stops++;
12368c2ecf20Sopenharmony_ci}
12378c2ecf20Sopenharmony_ci
12388c2ecf20Sopenharmony_cistatic inline void txq_advance(struct sge_txq *q, unsigned int n)
12398c2ecf20Sopenharmony_ci{
12408c2ecf20Sopenharmony_ci	q->in_use += n;
12418c2ecf20Sopenharmony_ci	q->pidx += n;
12428c2ecf20Sopenharmony_ci	if (q->pidx >= q->size)
12438c2ecf20Sopenharmony_ci		q->pidx -= q->size;
12448c2ecf20Sopenharmony_ci}
12458c2ecf20Sopenharmony_ci
12468c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_FCOE
12478c2ecf20Sopenharmony_cistatic inline int
12488c2ecf20Sopenharmony_cicxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
12498c2ecf20Sopenharmony_ci		  const struct port_info *pi, u64 *cntrl)
12508c2ecf20Sopenharmony_ci{
12518c2ecf20Sopenharmony_ci	const struct cxgb_fcoe *fcoe = &pi->fcoe;
12528c2ecf20Sopenharmony_ci
12538c2ecf20Sopenharmony_ci	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
12548c2ecf20Sopenharmony_ci		return 0;
12558c2ecf20Sopenharmony_ci
12568c2ecf20Sopenharmony_ci	if (skb->protocol != htons(ETH_P_FCOE))
12578c2ecf20Sopenharmony_ci		return 0;
12588c2ecf20Sopenharmony_ci
12598c2ecf20Sopenharmony_ci	skb_reset_mac_header(skb);
12608c2ecf20Sopenharmony_ci	skb->mac_len = sizeof(struct ethhdr);
12618c2ecf20Sopenharmony_ci
12628c2ecf20Sopenharmony_ci	skb_set_network_header(skb, skb->mac_len);
12638c2ecf20Sopenharmony_ci	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
12648c2ecf20Sopenharmony_ci
12658c2ecf20Sopenharmony_ci	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
12668c2ecf20Sopenharmony_ci		return -ENOTSUPP;
12678c2ecf20Sopenharmony_ci
12688c2ecf20Sopenharmony_ci	/* FC CRC offload */
12698c2ecf20Sopenharmony_ci	*cntrl = TXPKT_CSUM_TYPE_V(TX_CSUM_FCOE) |
12708c2ecf20Sopenharmony_ci		     TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F |
12718c2ecf20Sopenharmony_ci		     TXPKT_CSUM_START_V(CXGB_FCOE_TXPKT_CSUM_START) |
12728c2ecf20Sopenharmony_ci		     TXPKT_CSUM_END_V(CXGB_FCOE_TXPKT_CSUM_END) |
12738c2ecf20Sopenharmony_ci		     TXPKT_CSUM_LOC_V(CXGB_FCOE_TXPKT_CSUM_END);
12748c2ecf20Sopenharmony_ci	return 0;
12758c2ecf20Sopenharmony_ci}
12768c2ecf20Sopenharmony_ci#endif /* CONFIG_CHELSIO_T4_FCOE */
12778c2ecf20Sopenharmony_ci
12788c2ecf20Sopenharmony_ci/* Returns tunnel type if hardware supports offloading of the same.
12798c2ecf20Sopenharmony_ci * It is called only for T5 and onwards.
12808c2ecf20Sopenharmony_ci */
12818c2ecf20Sopenharmony_cienum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
12828c2ecf20Sopenharmony_ci{
12838c2ecf20Sopenharmony_ci	u8 l4_hdr = 0;
12848c2ecf20Sopenharmony_ci	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
12858c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(skb->dev);
12868c2ecf20Sopenharmony_ci	struct adapter *adapter = pi->adapter;
12878c2ecf20Sopenharmony_ci
12888c2ecf20Sopenharmony_ci	if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
12898c2ecf20Sopenharmony_ci	    skb->inner_protocol != htons(ETH_P_TEB))
12908c2ecf20Sopenharmony_ci		return tnl_type;
12918c2ecf20Sopenharmony_ci
12928c2ecf20Sopenharmony_ci	switch (vlan_get_protocol(skb)) {
12938c2ecf20Sopenharmony_ci	case htons(ETH_P_IP):
12948c2ecf20Sopenharmony_ci		l4_hdr = ip_hdr(skb)->protocol;
12958c2ecf20Sopenharmony_ci		break;
12968c2ecf20Sopenharmony_ci	case htons(ETH_P_IPV6):
12978c2ecf20Sopenharmony_ci		l4_hdr = ipv6_hdr(skb)->nexthdr;
12988c2ecf20Sopenharmony_ci		break;
12998c2ecf20Sopenharmony_ci	default:
13008c2ecf20Sopenharmony_ci		return tnl_type;
13018c2ecf20Sopenharmony_ci	}
13028c2ecf20Sopenharmony_ci
13038c2ecf20Sopenharmony_ci	switch (l4_hdr) {
13048c2ecf20Sopenharmony_ci	case IPPROTO_UDP:
13058c2ecf20Sopenharmony_ci		if (adapter->vxlan_port == udp_hdr(skb)->dest)
13068c2ecf20Sopenharmony_ci			tnl_type = TX_TNL_TYPE_VXLAN;
13078c2ecf20Sopenharmony_ci		else if (adapter->geneve_port == udp_hdr(skb)->dest)
13088c2ecf20Sopenharmony_ci			tnl_type = TX_TNL_TYPE_GENEVE;
13098c2ecf20Sopenharmony_ci		break;
13108c2ecf20Sopenharmony_ci	default:
13118c2ecf20Sopenharmony_ci		return tnl_type;
13128c2ecf20Sopenharmony_ci	}
13138c2ecf20Sopenharmony_ci
13148c2ecf20Sopenharmony_ci	return tnl_type;
13158c2ecf20Sopenharmony_ci}
13168c2ecf20Sopenharmony_ci
13178c2ecf20Sopenharmony_cistatic inline void t6_fill_tnl_lso(struct sk_buff *skb,
13188c2ecf20Sopenharmony_ci				   struct cpl_tx_tnl_lso *tnl_lso,
13198c2ecf20Sopenharmony_ci				   enum cpl_tx_tnl_lso_type tnl_type)
13208c2ecf20Sopenharmony_ci{
13218c2ecf20Sopenharmony_ci	u32 val;
13228c2ecf20Sopenharmony_ci	int in_eth_xtra_len;
13238c2ecf20Sopenharmony_ci	int l3hdr_len = skb_network_header_len(skb);
13248c2ecf20Sopenharmony_ci	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
13258c2ecf20Sopenharmony_ci	const struct skb_shared_info *ssi = skb_shinfo(skb);
13268c2ecf20Sopenharmony_ci	bool v6 = (ip_hdr(skb)->version == 6);
13278c2ecf20Sopenharmony_ci
13288c2ecf20Sopenharmony_ci	val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
13298c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_FIRST_F |
13308c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_LAST_F |
13318c2ecf20Sopenharmony_ci	      (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
13328c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
13338c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
13348c2ecf20Sopenharmony_ci	      (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
13358c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_IPLENSETOUT_F |
13368c2ecf20Sopenharmony_ci	      (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
13378c2ecf20Sopenharmony_ci	tnl_lso->op_to_IpIdSplitOut = htonl(val);
13388c2ecf20Sopenharmony_ci
13398c2ecf20Sopenharmony_ci	tnl_lso->IpIdOffsetOut = 0;
13408c2ecf20Sopenharmony_ci
13418c2ecf20Sopenharmony_ci	/* Get the tunnel header length */
13428c2ecf20Sopenharmony_ci	val = skb_inner_mac_header(skb) - skb_mac_header(skb);
13438c2ecf20Sopenharmony_ci	in_eth_xtra_len = skb_inner_network_header(skb) -
13448c2ecf20Sopenharmony_ci			  skb_inner_mac_header(skb) - ETH_HLEN;
13458c2ecf20Sopenharmony_ci
13468c2ecf20Sopenharmony_ci	switch (tnl_type) {
13478c2ecf20Sopenharmony_ci	case TX_TNL_TYPE_VXLAN:
13488c2ecf20Sopenharmony_ci	case TX_TNL_TYPE_GENEVE:
13498c2ecf20Sopenharmony_ci		tnl_lso->UdpLenSetOut_to_TnlHdrLen =
13508c2ecf20Sopenharmony_ci			htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
13518c2ecf20Sopenharmony_ci			CPL_TX_TNL_LSO_UDPLENSETOUT_F);
13528c2ecf20Sopenharmony_ci		break;
13538c2ecf20Sopenharmony_ci	default:
13548c2ecf20Sopenharmony_ci		tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
13558c2ecf20Sopenharmony_ci		break;
13568c2ecf20Sopenharmony_ci	}
13578c2ecf20Sopenharmony_ci
13588c2ecf20Sopenharmony_ci	tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
13598c2ecf20Sopenharmony_ci		 htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
13608c2ecf20Sopenharmony_ci		       CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
13618c2ecf20Sopenharmony_ci
13628c2ecf20Sopenharmony_ci	tnl_lso->r1 = 0;
13638c2ecf20Sopenharmony_ci
13648c2ecf20Sopenharmony_ci	val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
13658c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
13668c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
13678c2ecf20Sopenharmony_ci	      CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
13688c2ecf20Sopenharmony_ci	tnl_lso->Flow_to_TcpHdrLen = htonl(val);
13698c2ecf20Sopenharmony_ci
13708c2ecf20Sopenharmony_ci	tnl_lso->IpIdOffset = htons(0);
13718c2ecf20Sopenharmony_ci
13728c2ecf20Sopenharmony_ci	tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
13738c2ecf20Sopenharmony_ci	tnl_lso->TCPSeqOffset = htonl(0);
13748c2ecf20Sopenharmony_ci	tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
13758c2ecf20Sopenharmony_ci}
13768c2ecf20Sopenharmony_ci
13778c2ecf20Sopenharmony_cistatic inline void *write_tso_wr(struct adapter *adap, struct sk_buff *skb,
13788c2ecf20Sopenharmony_ci				 struct cpl_tx_pkt_lso_core *lso)
13798c2ecf20Sopenharmony_ci{
13808c2ecf20Sopenharmony_ci	int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
13818c2ecf20Sopenharmony_ci	int l3hdr_len = skb_network_header_len(skb);
13828c2ecf20Sopenharmony_ci	const struct skb_shared_info *ssi;
13838c2ecf20Sopenharmony_ci	bool ipv6 = false;
13848c2ecf20Sopenharmony_ci
13858c2ecf20Sopenharmony_ci	ssi = skb_shinfo(skb);
13868c2ecf20Sopenharmony_ci	if (ssi->gso_type & SKB_GSO_TCPV6)
13878c2ecf20Sopenharmony_ci		ipv6 = true;
13888c2ecf20Sopenharmony_ci
13898c2ecf20Sopenharmony_ci	lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
13908c2ecf20Sopenharmony_ci			      LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
13918c2ecf20Sopenharmony_ci			      LSO_IPV6_V(ipv6) |
13928c2ecf20Sopenharmony_ci			      LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
13938c2ecf20Sopenharmony_ci			      LSO_IPHDR_LEN_V(l3hdr_len / 4) |
13948c2ecf20Sopenharmony_ci			      LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
13958c2ecf20Sopenharmony_ci	lso->ipid_ofst = htons(0);
13968c2ecf20Sopenharmony_ci	lso->mss = htons(ssi->gso_size);
13978c2ecf20Sopenharmony_ci	lso->seqno_offset = htonl(0);
13988c2ecf20Sopenharmony_ci	if (is_t4(adap->params.chip))
13998c2ecf20Sopenharmony_ci		lso->len = htonl(skb->len);
14008c2ecf20Sopenharmony_ci	else
14018c2ecf20Sopenharmony_ci		lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
14028c2ecf20Sopenharmony_ci
14038c2ecf20Sopenharmony_ci	return (void *)(lso + 1);
14048c2ecf20Sopenharmony_ci}
14058c2ecf20Sopenharmony_ci
14068c2ecf20Sopenharmony_ci/**
14078c2ecf20Sopenharmony_ci *	t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
14088c2ecf20Sopenharmony_ci *	@adap: the adapter
14098c2ecf20Sopenharmony_ci *	@eq: the Ethernet TX Queue
14108c2ecf20Sopenharmony_ci *	@maxreclaim: the maximum number of TX Descriptors to reclaim or -1
14118c2ecf20Sopenharmony_ci *
14128c2ecf20Sopenharmony_ci *	We're typically called here to update the state of an Ethernet TX
14138c2ecf20Sopenharmony_ci *	Queue with respect to the hardware's progress in consuming the TX
14148c2ecf20Sopenharmony_ci *	Work Requests that we've put on that Egress Queue.  This happens
14158c2ecf20Sopenharmony_ci *	when we get Egress Queue Update messages and also prophylactically
14168c2ecf20Sopenharmony_ci *	in regular timer-based Ethernet TX Queue maintenance.
14178c2ecf20Sopenharmony_ci */
14188c2ecf20Sopenharmony_ciint t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
14198c2ecf20Sopenharmony_ci				 int maxreclaim)
14208c2ecf20Sopenharmony_ci{
14218c2ecf20Sopenharmony_ci	unsigned int reclaimed, hw_cidx;
14228c2ecf20Sopenharmony_ci	struct sge_txq *q = &eq->q;
14238c2ecf20Sopenharmony_ci	int hw_in_use;
14248c2ecf20Sopenharmony_ci
14258c2ecf20Sopenharmony_ci	if (!q->in_use || !__netif_tx_trylock(eq->txq))
14268c2ecf20Sopenharmony_ci		return 0;
14278c2ecf20Sopenharmony_ci
14288c2ecf20Sopenharmony_ci	/* Reclaim pending completed TX Descriptors. */
14298c2ecf20Sopenharmony_ci	reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
14308c2ecf20Sopenharmony_ci
14318c2ecf20Sopenharmony_ci	hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
14328c2ecf20Sopenharmony_ci	hw_in_use = q->pidx - hw_cidx;
14338c2ecf20Sopenharmony_ci	if (hw_in_use < 0)
14348c2ecf20Sopenharmony_ci		hw_in_use += q->size;
14358c2ecf20Sopenharmony_ci
14368c2ecf20Sopenharmony_ci	/* If the TX Queue is currently stopped and there's now more than half
14378c2ecf20Sopenharmony_ci	 * the queue available, restart it.  Otherwise bail out since the rest
14388c2ecf20Sopenharmony_ci	 * of what we want do here is with the possibility of shipping any
14398c2ecf20Sopenharmony_ci	 * currently buffered Coalesced TX Work Request.
14408c2ecf20Sopenharmony_ci	 */
14418c2ecf20Sopenharmony_ci	if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
14428c2ecf20Sopenharmony_ci		netif_tx_wake_queue(eq->txq);
14438c2ecf20Sopenharmony_ci		eq->q.restarts++;
14448c2ecf20Sopenharmony_ci	}
14458c2ecf20Sopenharmony_ci
14468c2ecf20Sopenharmony_ci	__netif_tx_unlock(eq->txq);
14478c2ecf20Sopenharmony_ci	return reclaimed;
14488c2ecf20Sopenharmony_ci}
14498c2ecf20Sopenharmony_ci
14508c2ecf20Sopenharmony_cistatic inline int cxgb4_validate_skb(struct sk_buff *skb,
14518c2ecf20Sopenharmony_ci				     struct net_device *dev,
14528c2ecf20Sopenharmony_ci				     u32 min_pkt_len)
14538c2ecf20Sopenharmony_ci{
14548c2ecf20Sopenharmony_ci	u32 max_pkt_len;
14558c2ecf20Sopenharmony_ci
14568c2ecf20Sopenharmony_ci	/* The chip min packet length is 10 octets but some firmware
14578c2ecf20Sopenharmony_ci	 * commands have a minimum packet length requirement. So, play
14588c2ecf20Sopenharmony_ci	 * safe and reject anything shorter than @min_pkt_len.
14598c2ecf20Sopenharmony_ci	 */
14608c2ecf20Sopenharmony_ci	if (unlikely(skb->len < min_pkt_len))
14618c2ecf20Sopenharmony_ci		return -EINVAL;
14628c2ecf20Sopenharmony_ci
14638c2ecf20Sopenharmony_ci	/* Discard the packet if the length is greater than mtu */
14648c2ecf20Sopenharmony_ci	max_pkt_len = ETH_HLEN + dev->mtu;
14658c2ecf20Sopenharmony_ci
14668c2ecf20Sopenharmony_ci	if (skb_vlan_tagged(skb))
14678c2ecf20Sopenharmony_ci		max_pkt_len += VLAN_HLEN;
14688c2ecf20Sopenharmony_ci
14698c2ecf20Sopenharmony_ci	if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
14708c2ecf20Sopenharmony_ci		return -EINVAL;
14718c2ecf20Sopenharmony_ci
14728c2ecf20Sopenharmony_ci	return 0;
14738c2ecf20Sopenharmony_ci}
14748c2ecf20Sopenharmony_ci
14758c2ecf20Sopenharmony_cistatic void *write_eo_udp_wr(struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
14768c2ecf20Sopenharmony_ci			     u32 hdr_len)
14778c2ecf20Sopenharmony_ci{
14788c2ecf20Sopenharmony_ci	wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG;
14798c2ecf20Sopenharmony_ci	wr->u.udpseg.ethlen = skb_network_offset(skb);
14808c2ecf20Sopenharmony_ci	wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
14818c2ecf20Sopenharmony_ci	wr->u.udpseg.udplen = sizeof(struct udphdr);
14828c2ecf20Sopenharmony_ci	wr->u.udpseg.rtplen = 0;
14838c2ecf20Sopenharmony_ci	wr->u.udpseg.r4 = 0;
14848c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size)
14858c2ecf20Sopenharmony_ci		wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
14868c2ecf20Sopenharmony_ci	else
14878c2ecf20Sopenharmony_ci		wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len);
14888c2ecf20Sopenharmony_ci	wr->u.udpseg.schedpktsize = wr->u.udpseg.mss;
14898c2ecf20Sopenharmony_ci	wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len);
14908c2ecf20Sopenharmony_ci
14918c2ecf20Sopenharmony_ci	return (void *)(wr + 1);
14928c2ecf20Sopenharmony_ci}
14938c2ecf20Sopenharmony_ci
14948c2ecf20Sopenharmony_ci/**
14958c2ecf20Sopenharmony_ci *	cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
14968c2ecf20Sopenharmony_ci *	@skb: the packet
14978c2ecf20Sopenharmony_ci *	@dev: the egress net device
14988c2ecf20Sopenharmony_ci *
14998c2ecf20Sopenharmony_ci *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
15008c2ecf20Sopenharmony_ci */
15018c2ecf20Sopenharmony_cistatic netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
15028c2ecf20Sopenharmony_ci{
15038c2ecf20Sopenharmony_ci	enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
15048c2ecf20Sopenharmony_ci	bool ptp_enabled = is_ptp_enabled(skb, dev);
15058c2ecf20Sopenharmony_ci	unsigned int last_desc, flits, ndesc;
15068c2ecf20Sopenharmony_ci	u32 wr_mid, ctrl0, op, sgl_off = 0;
15078c2ecf20Sopenharmony_ci	const struct skb_shared_info *ssi;
15088c2ecf20Sopenharmony_ci	int len, qidx, credits, ret, left;
15098c2ecf20Sopenharmony_ci	struct tx_sw_desc *sgl_sdesc;
15108c2ecf20Sopenharmony_ci	struct fw_eth_tx_eo_wr *eowr;
15118c2ecf20Sopenharmony_ci	struct fw_eth_tx_pkt_wr *wr;
15128c2ecf20Sopenharmony_ci	struct cpl_tx_pkt_core *cpl;
15138c2ecf20Sopenharmony_ci	const struct port_info *pi;
15148c2ecf20Sopenharmony_ci	bool immediate = false;
15158c2ecf20Sopenharmony_ci	u64 cntrl, *end, *sgl;
15168c2ecf20Sopenharmony_ci	struct sge_eth_txq *q;
15178c2ecf20Sopenharmony_ci	unsigned int chip_ver;
15188c2ecf20Sopenharmony_ci	struct adapter *adap;
15198c2ecf20Sopenharmony_ci
15208c2ecf20Sopenharmony_ci	ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
15218c2ecf20Sopenharmony_ci	if (ret)
15228c2ecf20Sopenharmony_ci		goto out_free;
15238c2ecf20Sopenharmony_ci
15248c2ecf20Sopenharmony_ci	pi = netdev_priv(dev);
15258c2ecf20Sopenharmony_ci	adap = pi->adapter;
15268c2ecf20Sopenharmony_ci	ssi = skb_shinfo(skb);
15278c2ecf20Sopenharmony_ci#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
15288c2ecf20Sopenharmony_ci	if (xfrm_offload(skb) && !ssi->gso_size)
15298c2ecf20Sopenharmony_ci		return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
15308c2ecf20Sopenharmony_ci#endif /* CHELSIO_IPSEC_INLINE */
15318c2ecf20Sopenharmony_ci
15328c2ecf20Sopenharmony_ci#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
15338c2ecf20Sopenharmony_ci	if (cxgb4_is_ktls_skb(skb) &&
15348c2ecf20Sopenharmony_ci	    (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb))))
15358c2ecf20Sopenharmony_ci		return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
15368c2ecf20Sopenharmony_ci#endif /* CHELSIO_TLS_DEVICE */
15378c2ecf20Sopenharmony_ci
15388c2ecf20Sopenharmony_ci	qidx = skb_get_queue_mapping(skb);
15398c2ecf20Sopenharmony_ci	if (ptp_enabled) {
15408c2ecf20Sopenharmony_ci		if (!(adap->ptp_tx_skb)) {
15418c2ecf20Sopenharmony_ci			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
15428c2ecf20Sopenharmony_ci			adap->ptp_tx_skb = skb_get(skb);
15438c2ecf20Sopenharmony_ci		} else {
15448c2ecf20Sopenharmony_ci			goto out_free;
15458c2ecf20Sopenharmony_ci		}
15468c2ecf20Sopenharmony_ci		q = &adap->sge.ptptxq;
15478c2ecf20Sopenharmony_ci	} else {
15488c2ecf20Sopenharmony_ci		q = &adap->sge.ethtxq[qidx + pi->first_qset];
15498c2ecf20Sopenharmony_ci	}
15508c2ecf20Sopenharmony_ci	skb_tx_timestamp(skb);
15518c2ecf20Sopenharmony_ci
15528c2ecf20Sopenharmony_ci	reclaim_completed_tx(adap, &q->q, -1, true);
15538c2ecf20Sopenharmony_ci	cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
15548c2ecf20Sopenharmony_ci
15558c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_FCOE
15568c2ecf20Sopenharmony_ci	ret = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
15578c2ecf20Sopenharmony_ci	if (unlikely(ret == -EOPNOTSUPP))
15588c2ecf20Sopenharmony_ci		goto out_free;
15598c2ecf20Sopenharmony_ci#endif /* CONFIG_CHELSIO_T4_FCOE */
15608c2ecf20Sopenharmony_ci
15618c2ecf20Sopenharmony_ci	chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
15628c2ecf20Sopenharmony_ci	flits = calc_tx_flits(skb, chip_ver);
15638c2ecf20Sopenharmony_ci	ndesc = flits_to_desc(flits);
15648c2ecf20Sopenharmony_ci	credits = txq_avail(&q->q) - ndesc;
15658c2ecf20Sopenharmony_ci
15668c2ecf20Sopenharmony_ci	if (unlikely(credits < 0)) {
15678c2ecf20Sopenharmony_ci		eth_txq_stop(q);
15688c2ecf20Sopenharmony_ci		dev_err(adap->pdev_dev,
15698c2ecf20Sopenharmony_ci			"%s: Tx ring %u full while queue awake!\n",
15708c2ecf20Sopenharmony_ci			dev->name, qidx);
15718c2ecf20Sopenharmony_ci		return NETDEV_TX_BUSY;
15728c2ecf20Sopenharmony_ci	}
15738c2ecf20Sopenharmony_ci
15748c2ecf20Sopenharmony_ci	if (is_eth_imm(skb, chip_ver))
15758c2ecf20Sopenharmony_ci		immediate = true;
15768c2ecf20Sopenharmony_ci
15778c2ecf20Sopenharmony_ci	if (skb->encapsulation && chip_ver > CHELSIO_T5)
15788c2ecf20Sopenharmony_ci		tnl_type = cxgb_encap_offload_supported(skb);
15798c2ecf20Sopenharmony_ci
15808c2ecf20Sopenharmony_ci	last_desc = q->q.pidx + ndesc - 1;
15818c2ecf20Sopenharmony_ci	if (last_desc >= q->q.size)
15828c2ecf20Sopenharmony_ci		last_desc -= q->q.size;
15838c2ecf20Sopenharmony_ci	sgl_sdesc = &q->q.sdesc[last_desc];
15848c2ecf20Sopenharmony_ci
15858c2ecf20Sopenharmony_ci	if (!immediate &&
15868c2ecf20Sopenharmony_ci	    unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
15878c2ecf20Sopenharmony_ci		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
15888c2ecf20Sopenharmony_ci		q->mapping_err++;
15898c2ecf20Sopenharmony_ci		goto out_free;
15908c2ecf20Sopenharmony_ci	}
15918c2ecf20Sopenharmony_ci
15928c2ecf20Sopenharmony_ci	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
15938c2ecf20Sopenharmony_ci	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
15948c2ecf20Sopenharmony_ci		/* After we're done injecting the Work Request for this
15958c2ecf20Sopenharmony_ci		 * packet, we'll be below our "stop threshold" so stop the TX
15968c2ecf20Sopenharmony_ci		 * Queue now and schedule a request for an SGE Egress Queue
15978c2ecf20Sopenharmony_ci		 * Update message. The queue will get started later on when
15988c2ecf20Sopenharmony_ci		 * the firmware processes this Work Request and sends us an
15998c2ecf20Sopenharmony_ci		 * Egress Queue Status Update message indicating that space
16008c2ecf20Sopenharmony_ci		 * has opened up.
16018c2ecf20Sopenharmony_ci		 */
16028c2ecf20Sopenharmony_ci		eth_txq_stop(q);
16038c2ecf20Sopenharmony_ci		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
16048c2ecf20Sopenharmony_ci	}
16058c2ecf20Sopenharmony_ci
16068c2ecf20Sopenharmony_ci	wr = (void *)&q->q.desc[q->q.pidx];
16078c2ecf20Sopenharmony_ci	eowr = (void *)&q->q.desc[q->q.pidx];
16088c2ecf20Sopenharmony_ci	wr->equiq_to_len16 = htonl(wr_mid);
16098c2ecf20Sopenharmony_ci	wr->r3 = cpu_to_be64(0);
16108c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
16118c2ecf20Sopenharmony_ci		end = (u64 *)eowr + flits;
16128c2ecf20Sopenharmony_ci	else
16138c2ecf20Sopenharmony_ci		end = (u64 *)wr + flits;
16148c2ecf20Sopenharmony_ci
16158c2ecf20Sopenharmony_ci	len = immediate ? skb->len : 0;
16168c2ecf20Sopenharmony_ci	len += sizeof(*cpl);
16178c2ecf20Sopenharmony_ci	if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) {
16188c2ecf20Sopenharmony_ci		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
16198c2ecf20Sopenharmony_ci		struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
16208c2ecf20Sopenharmony_ci
16218c2ecf20Sopenharmony_ci		if (tnl_type)
16228c2ecf20Sopenharmony_ci			len += sizeof(*tnl_lso);
16238c2ecf20Sopenharmony_ci		else
16248c2ecf20Sopenharmony_ci			len += sizeof(*lso);
16258c2ecf20Sopenharmony_ci
16268c2ecf20Sopenharmony_ci		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
16278c2ecf20Sopenharmony_ci				       FW_WR_IMMDLEN_V(len));
16288c2ecf20Sopenharmony_ci		if (tnl_type) {
16298c2ecf20Sopenharmony_ci			struct iphdr *iph = ip_hdr(skb);
16308c2ecf20Sopenharmony_ci
16318c2ecf20Sopenharmony_ci			t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
16328c2ecf20Sopenharmony_ci			cpl = (void *)(tnl_lso + 1);
16338c2ecf20Sopenharmony_ci			/* Driver is expected to compute partial checksum that
16348c2ecf20Sopenharmony_ci			 * does not include the IP Total Length.
16358c2ecf20Sopenharmony_ci			 */
16368c2ecf20Sopenharmony_ci			if (iph->version == 4) {
16378c2ecf20Sopenharmony_ci				iph->check = 0;
16388c2ecf20Sopenharmony_ci				iph->tot_len = 0;
16398c2ecf20Sopenharmony_ci				iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl);
16408c2ecf20Sopenharmony_ci			}
16418c2ecf20Sopenharmony_ci			if (skb->ip_summed == CHECKSUM_PARTIAL)
16428c2ecf20Sopenharmony_ci				cntrl = hwcsum(adap->params.chip, skb);
16438c2ecf20Sopenharmony_ci		} else {
16448c2ecf20Sopenharmony_ci			cpl = write_tso_wr(adap, skb, lso);
16458c2ecf20Sopenharmony_ci			cntrl = hwcsum(adap->params.chip, skb);
16468c2ecf20Sopenharmony_ci		}
16478c2ecf20Sopenharmony_ci		sgl = (u64 *)(cpl + 1); /* sgl start here */
16488c2ecf20Sopenharmony_ci		q->tso++;
16498c2ecf20Sopenharmony_ci		q->tx_cso += ssi->gso_segs;
16508c2ecf20Sopenharmony_ci	} else if (ssi->gso_size) {
16518c2ecf20Sopenharmony_ci		u64 *start;
16528c2ecf20Sopenharmony_ci		u32 hdrlen;
16538c2ecf20Sopenharmony_ci
16548c2ecf20Sopenharmony_ci		hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb));
16558c2ecf20Sopenharmony_ci		len += hdrlen;
16568c2ecf20Sopenharmony_ci		wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
16578c2ecf20Sopenharmony_ci					     FW_ETH_TX_EO_WR_IMMDLEN_V(len));
16588c2ecf20Sopenharmony_ci		cpl = write_eo_udp_wr(skb, eowr, hdrlen);
16598c2ecf20Sopenharmony_ci		cntrl = hwcsum(adap->params.chip, skb);
16608c2ecf20Sopenharmony_ci
16618c2ecf20Sopenharmony_ci		start = (u64 *)(cpl + 1);
16628c2ecf20Sopenharmony_ci		sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start,
16638c2ecf20Sopenharmony_ci						  hdrlen);
16648c2ecf20Sopenharmony_ci		if (unlikely(start > sgl)) {
16658c2ecf20Sopenharmony_ci			left = (u8 *)end - (u8 *)q->q.stat;
16668c2ecf20Sopenharmony_ci			end = (void *)q->q.desc + left;
16678c2ecf20Sopenharmony_ci		}
16688c2ecf20Sopenharmony_ci		sgl_off = hdrlen;
16698c2ecf20Sopenharmony_ci		q->uso++;
16708c2ecf20Sopenharmony_ci		q->tx_cso += ssi->gso_segs;
16718c2ecf20Sopenharmony_ci	} else {
16728c2ecf20Sopenharmony_ci		if (ptp_enabled)
16738c2ecf20Sopenharmony_ci			op = FW_PTP_TX_PKT_WR;
16748c2ecf20Sopenharmony_ci		else
16758c2ecf20Sopenharmony_ci			op = FW_ETH_TX_PKT_WR;
16768c2ecf20Sopenharmony_ci		wr->op_immdlen = htonl(FW_WR_OP_V(op) |
16778c2ecf20Sopenharmony_ci				       FW_WR_IMMDLEN_V(len));
16788c2ecf20Sopenharmony_ci		cpl = (void *)(wr + 1);
16798c2ecf20Sopenharmony_ci		sgl = (u64 *)(cpl + 1);
16808c2ecf20Sopenharmony_ci		if (skb->ip_summed == CHECKSUM_PARTIAL) {
16818c2ecf20Sopenharmony_ci			cntrl = hwcsum(adap->params.chip, skb) |
16828c2ecf20Sopenharmony_ci				TXPKT_IPCSUM_DIS_F;
16838c2ecf20Sopenharmony_ci			q->tx_cso++;
16848c2ecf20Sopenharmony_ci		}
16858c2ecf20Sopenharmony_ci	}
16868c2ecf20Sopenharmony_ci
16878c2ecf20Sopenharmony_ci	if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) {
16888c2ecf20Sopenharmony_ci		/* If current position is already at the end of the
16898c2ecf20Sopenharmony_ci		 * txq, reset the current to point to start of the queue
16908c2ecf20Sopenharmony_ci		 * and update the end ptr as well.
16918c2ecf20Sopenharmony_ci		 */
16928c2ecf20Sopenharmony_ci		left = (u8 *)end - (u8 *)q->q.stat;
16938c2ecf20Sopenharmony_ci		end = (void *)q->q.desc + left;
16948c2ecf20Sopenharmony_ci		sgl = (void *)q->q.desc;
16958c2ecf20Sopenharmony_ci	}
16968c2ecf20Sopenharmony_ci
16978c2ecf20Sopenharmony_ci	if (skb_vlan_tag_present(skb)) {
16988c2ecf20Sopenharmony_ci		q->vlan_ins++;
16998c2ecf20Sopenharmony_ci		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
17008c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_FCOE
17018c2ecf20Sopenharmony_ci		if (skb->protocol == htons(ETH_P_FCOE))
17028c2ecf20Sopenharmony_ci			cntrl |= TXPKT_VLAN_V(
17038c2ecf20Sopenharmony_ci				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
17048c2ecf20Sopenharmony_ci#endif /* CONFIG_CHELSIO_T4_FCOE */
17058c2ecf20Sopenharmony_ci	}
17068c2ecf20Sopenharmony_ci
17078c2ecf20Sopenharmony_ci	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
17088c2ecf20Sopenharmony_ci		TXPKT_PF_V(adap->pf);
17098c2ecf20Sopenharmony_ci	if (ptp_enabled)
17108c2ecf20Sopenharmony_ci		ctrl0 |= TXPKT_TSTAMP_F;
17118c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_DCB
17128c2ecf20Sopenharmony_ci	if (is_t4(adap->params.chip))
17138c2ecf20Sopenharmony_ci		ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio);
17148c2ecf20Sopenharmony_ci	else
17158c2ecf20Sopenharmony_ci		ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio);
17168c2ecf20Sopenharmony_ci#endif
17178c2ecf20Sopenharmony_ci	cpl->ctrl0 = htonl(ctrl0);
17188c2ecf20Sopenharmony_ci	cpl->pack = htons(0);
17198c2ecf20Sopenharmony_ci	cpl->len = htons(skb->len);
17208c2ecf20Sopenharmony_ci	cpl->ctrl1 = cpu_to_be64(cntrl);
17218c2ecf20Sopenharmony_ci
17228c2ecf20Sopenharmony_ci	if (immediate) {
17238c2ecf20Sopenharmony_ci		cxgb4_inline_tx_skb(skb, &q->q, sgl);
17248c2ecf20Sopenharmony_ci		dev_consume_skb_any(skb);
17258c2ecf20Sopenharmony_ci	} else {
17268c2ecf20Sopenharmony_ci		cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off,
17278c2ecf20Sopenharmony_ci				sgl_sdesc->addr);
17288c2ecf20Sopenharmony_ci		skb_orphan(skb);
17298c2ecf20Sopenharmony_ci		sgl_sdesc->skb = skb;
17308c2ecf20Sopenharmony_ci	}
17318c2ecf20Sopenharmony_ci
17328c2ecf20Sopenharmony_ci	txq_advance(&q->q, ndesc);
17338c2ecf20Sopenharmony_ci
17348c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(adap, &q->q, ndesc);
17358c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
17368c2ecf20Sopenharmony_ci
17378c2ecf20Sopenharmony_ciout_free:
17388c2ecf20Sopenharmony_ci	dev_kfree_skb_any(skb);
17398c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
17408c2ecf20Sopenharmony_ci}
17418c2ecf20Sopenharmony_ci
17428c2ecf20Sopenharmony_ci/* Constants ... */
17438c2ecf20Sopenharmony_cienum {
17448c2ecf20Sopenharmony_ci	/* Egress Queue sizes, producer and consumer indices are all in units
17458c2ecf20Sopenharmony_ci	 * of Egress Context Units bytes.  Note that as far as the hardware is
17468c2ecf20Sopenharmony_ci	 * concerned, the free list is an Egress Queue (the host produces free
17478c2ecf20Sopenharmony_ci	 * buffers which the hardware consumes) and free list entries are
17488c2ecf20Sopenharmony_ci	 * 64-bit PCI DMA addresses.
17498c2ecf20Sopenharmony_ci	 */
17508c2ecf20Sopenharmony_ci	EQ_UNIT = SGE_EQ_IDXSIZE,
17518c2ecf20Sopenharmony_ci	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
17528c2ecf20Sopenharmony_ci	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
17538c2ecf20Sopenharmony_ci
17548c2ecf20Sopenharmony_ci	T4VF_ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
17558c2ecf20Sopenharmony_ci			       sizeof(struct cpl_tx_pkt_lso_core) +
17568c2ecf20Sopenharmony_ci			       sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
17578c2ecf20Sopenharmony_ci};
17588c2ecf20Sopenharmony_ci
17598c2ecf20Sopenharmony_ci/**
17608c2ecf20Sopenharmony_ci *	t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
17618c2ecf20Sopenharmony_ci *	@skb: the packet
17628c2ecf20Sopenharmony_ci *
17638c2ecf20Sopenharmony_ci *	Returns whether an Ethernet packet is small enough to fit completely as
17648c2ecf20Sopenharmony_ci *	immediate data.
17658c2ecf20Sopenharmony_ci */
17668c2ecf20Sopenharmony_cistatic inline int t4vf_is_eth_imm(const struct sk_buff *skb)
17678c2ecf20Sopenharmony_ci{
17688c2ecf20Sopenharmony_ci	/* The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
17698c2ecf20Sopenharmony_ci	 * which does not accommodate immediate data.  We could dike out all
17708c2ecf20Sopenharmony_ci	 * of the support code for immediate data but that would tie our hands
17718c2ecf20Sopenharmony_ci	 * too much if we ever want to enhace the firmware.  It would also
17728c2ecf20Sopenharmony_ci	 * create more differences between the PF and VF Drivers.
17738c2ecf20Sopenharmony_ci	 */
17748c2ecf20Sopenharmony_ci	return false;
17758c2ecf20Sopenharmony_ci}
17768c2ecf20Sopenharmony_ci
17778c2ecf20Sopenharmony_ci/**
17788c2ecf20Sopenharmony_ci *	t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
17798c2ecf20Sopenharmony_ci *	@skb: the packet
17808c2ecf20Sopenharmony_ci *
17818c2ecf20Sopenharmony_ci *	Returns the number of flits needed for a TX Work Request for the
17828c2ecf20Sopenharmony_ci *	given Ethernet packet, including the needed WR and CPL headers.
17838c2ecf20Sopenharmony_ci */
17848c2ecf20Sopenharmony_cistatic inline unsigned int t4vf_calc_tx_flits(const struct sk_buff *skb)
17858c2ecf20Sopenharmony_ci{
17868c2ecf20Sopenharmony_ci	unsigned int flits;
17878c2ecf20Sopenharmony_ci
17888c2ecf20Sopenharmony_ci	/* If the skb is small enough, we can pump it out as a work request
17898c2ecf20Sopenharmony_ci	 * with only immediate data.  In that case we just have to have the
17908c2ecf20Sopenharmony_ci	 * TX Packet header plus the skb data in the Work Request.
17918c2ecf20Sopenharmony_ci	 */
17928c2ecf20Sopenharmony_ci	if (t4vf_is_eth_imm(skb))
17938c2ecf20Sopenharmony_ci		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
17948c2ecf20Sopenharmony_ci				    sizeof(__be64));
17958c2ecf20Sopenharmony_ci
17968c2ecf20Sopenharmony_ci	/* Otherwise, we're going to have to construct a Scatter gather list
17978c2ecf20Sopenharmony_ci	 * of the skb body and fragments.  We also include the flits necessary
17988c2ecf20Sopenharmony_ci	 * for the TX Packet Work Request and CPL.  We always have a firmware
17998c2ecf20Sopenharmony_ci	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
18008c2ecf20Sopenharmony_ci	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
18018c2ecf20Sopenharmony_ci	 * message or, if we're doing a Large Send Offload, an LSO CPL message
18028c2ecf20Sopenharmony_ci	 * with an embedded TX Packet Write CPL message.
18038c2ecf20Sopenharmony_ci	 */
18048c2ecf20Sopenharmony_ci	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
18058c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size)
18068c2ecf20Sopenharmony_ci		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
18078c2ecf20Sopenharmony_ci			  sizeof(struct cpl_tx_pkt_lso_core) +
18088c2ecf20Sopenharmony_ci			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
18098c2ecf20Sopenharmony_ci	else
18108c2ecf20Sopenharmony_ci		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
18118c2ecf20Sopenharmony_ci			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
18128c2ecf20Sopenharmony_ci	return flits;
18138c2ecf20Sopenharmony_ci}
18148c2ecf20Sopenharmony_ci
18158c2ecf20Sopenharmony_ci/**
18168c2ecf20Sopenharmony_ci *	cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
18178c2ecf20Sopenharmony_ci *	@skb: the packet
18188c2ecf20Sopenharmony_ci *	@dev: the egress net device
18198c2ecf20Sopenharmony_ci *
18208c2ecf20Sopenharmony_ci *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
18218c2ecf20Sopenharmony_ci */
18228c2ecf20Sopenharmony_cistatic netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
18238c2ecf20Sopenharmony_ci				     struct net_device *dev)
18248c2ecf20Sopenharmony_ci{
18258c2ecf20Sopenharmony_ci	unsigned int last_desc, flits, ndesc;
18268c2ecf20Sopenharmony_ci	const struct skb_shared_info *ssi;
18278c2ecf20Sopenharmony_ci	struct fw_eth_tx_pkt_vm_wr *wr;
18288c2ecf20Sopenharmony_ci	struct tx_sw_desc *sgl_sdesc;
18298c2ecf20Sopenharmony_ci	struct cpl_tx_pkt_core *cpl;
18308c2ecf20Sopenharmony_ci	const struct port_info *pi;
18318c2ecf20Sopenharmony_ci	struct sge_eth_txq *txq;
18328c2ecf20Sopenharmony_ci	struct adapter *adapter;
18338c2ecf20Sopenharmony_ci	int qidx, credits, ret;
18348c2ecf20Sopenharmony_ci	size_t fw_hdr_copy_len;
18358c2ecf20Sopenharmony_ci	u64 cntrl, *end;
18368c2ecf20Sopenharmony_ci	u32 wr_mid;
18378c2ecf20Sopenharmony_ci
18388c2ecf20Sopenharmony_ci	/* The chip minimum packet length is 10 octets but the firmware
18398c2ecf20Sopenharmony_ci	 * command that we are using requires that we copy the Ethernet header
18408c2ecf20Sopenharmony_ci	 * (including the VLAN tag) into the header so we reject anything
18418c2ecf20Sopenharmony_ci	 * smaller than that ...
18428c2ecf20Sopenharmony_ci	 */
18438c2ecf20Sopenharmony_ci	fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) +
18448c2ecf20Sopenharmony_ci			  sizeof(wr->ethtype) + sizeof(wr->vlantci);
18458c2ecf20Sopenharmony_ci	ret = cxgb4_validate_skb(skb, dev, fw_hdr_copy_len);
18468c2ecf20Sopenharmony_ci	if (ret)
18478c2ecf20Sopenharmony_ci		goto out_free;
18488c2ecf20Sopenharmony_ci
18498c2ecf20Sopenharmony_ci	/* Figure out which TX Queue we're going to use. */
18508c2ecf20Sopenharmony_ci	pi = netdev_priv(dev);
18518c2ecf20Sopenharmony_ci	adapter = pi->adapter;
18528c2ecf20Sopenharmony_ci	qidx = skb_get_queue_mapping(skb);
18538c2ecf20Sopenharmony_ci	WARN_ON(qidx >= pi->nqsets);
18548c2ecf20Sopenharmony_ci	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
18558c2ecf20Sopenharmony_ci
18568c2ecf20Sopenharmony_ci	/* Take this opportunity to reclaim any TX Descriptors whose DMA
18578c2ecf20Sopenharmony_ci	 * transfers have completed.
18588c2ecf20Sopenharmony_ci	 */
18598c2ecf20Sopenharmony_ci	reclaim_completed_tx(adapter, &txq->q, -1, true);
18608c2ecf20Sopenharmony_ci
18618c2ecf20Sopenharmony_ci	/* Calculate the number of flits and TX Descriptors we're going to
18628c2ecf20Sopenharmony_ci	 * need along with how many TX Descriptors will be left over after
18638c2ecf20Sopenharmony_ci	 * we inject our Work Request.
18648c2ecf20Sopenharmony_ci	 */
18658c2ecf20Sopenharmony_ci	flits = t4vf_calc_tx_flits(skb);
18668c2ecf20Sopenharmony_ci	ndesc = flits_to_desc(flits);
18678c2ecf20Sopenharmony_ci	credits = txq_avail(&txq->q) - ndesc;
18688c2ecf20Sopenharmony_ci
18698c2ecf20Sopenharmony_ci	if (unlikely(credits < 0)) {
18708c2ecf20Sopenharmony_ci		/* Not enough room for this packet's Work Request.  Stop the
18718c2ecf20Sopenharmony_ci		 * TX Queue and return a "busy" condition.  The queue will get
18728c2ecf20Sopenharmony_ci		 * started later on when the firmware informs us that space
18738c2ecf20Sopenharmony_ci		 * has opened up.
18748c2ecf20Sopenharmony_ci		 */
18758c2ecf20Sopenharmony_ci		eth_txq_stop(txq);
18768c2ecf20Sopenharmony_ci		dev_err(adapter->pdev_dev,
18778c2ecf20Sopenharmony_ci			"%s: TX ring %u full while queue awake!\n",
18788c2ecf20Sopenharmony_ci			dev->name, qidx);
18798c2ecf20Sopenharmony_ci		return NETDEV_TX_BUSY;
18808c2ecf20Sopenharmony_ci	}
18818c2ecf20Sopenharmony_ci
18828c2ecf20Sopenharmony_ci	last_desc = txq->q.pidx + ndesc - 1;
18838c2ecf20Sopenharmony_ci	if (last_desc >= txq->q.size)
18848c2ecf20Sopenharmony_ci		last_desc -= txq->q.size;
18858c2ecf20Sopenharmony_ci	sgl_sdesc = &txq->q.sdesc[last_desc];
18868c2ecf20Sopenharmony_ci
18878c2ecf20Sopenharmony_ci	if (!t4vf_is_eth_imm(skb) &&
18888c2ecf20Sopenharmony_ci	    unlikely(cxgb4_map_skb(adapter->pdev_dev, skb,
18898c2ecf20Sopenharmony_ci				   sgl_sdesc->addr) < 0)) {
18908c2ecf20Sopenharmony_ci		/* We need to map the skb into PCI DMA space (because it can't
18918c2ecf20Sopenharmony_ci		 * be in-lined directly into the Work Request) and the mapping
18928c2ecf20Sopenharmony_ci		 * operation failed.  Record the error and drop the packet.
18938c2ecf20Sopenharmony_ci		 */
18948c2ecf20Sopenharmony_ci		memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
18958c2ecf20Sopenharmony_ci		txq->mapping_err++;
18968c2ecf20Sopenharmony_ci		goto out_free;
18978c2ecf20Sopenharmony_ci	}
18988c2ecf20Sopenharmony_ci
18998c2ecf20Sopenharmony_ci	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
19008c2ecf20Sopenharmony_ci	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
19018c2ecf20Sopenharmony_ci		/* After we're done injecting the Work Request for this
19028c2ecf20Sopenharmony_ci		 * packet, we'll be below our "stop threshold" so stop the TX
19038c2ecf20Sopenharmony_ci		 * Queue now and schedule a request for an SGE Egress Queue
19048c2ecf20Sopenharmony_ci		 * Update message.  The queue will get started later on when
19058c2ecf20Sopenharmony_ci		 * the firmware processes this Work Request and sends us an
19068c2ecf20Sopenharmony_ci		 * Egress Queue Status Update message indicating that space
19078c2ecf20Sopenharmony_ci		 * has opened up.
19088c2ecf20Sopenharmony_ci		 */
19098c2ecf20Sopenharmony_ci		eth_txq_stop(txq);
19108c2ecf20Sopenharmony_ci		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
19118c2ecf20Sopenharmony_ci	}
19128c2ecf20Sopenharmony_ci
19138c2ecf20Sopenharmony_ci	/* Start filling in our Work Request.  Note that we do _not_ handle
19148c2ecf20Sopenharmony_ci	 * the WR Header wrapping around the TX Descriptor Ring.  If our
19158c2ecf20Sopenharmony_ci	 * maximum header size ever exceeds one TX Descriptor, we'll need to
19168c2ecf20Sopenharmony_ci	 * do something else here.
19178c2ecf20Sopenharmony_ci	 */
19188c2ecf20Sopenharmony_ci	WARN_ON(DIV_ROUND_UP(T4VF_ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
19198c2ecf20Sopenharmony_ci	wr = (void *)&txq->q.desc[txq->q.pidx];
19208c2ecf20Sopenharmony_ci	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
19218c2ecf20Sopenharmony_ci	wr->r3[0] = cpu_to_be32(0);
19228c2ecf20Sopenharmony_ci	wr->r3[1] = cpu_to_be32(0);
19238c2ecf20Sopenharmony_ci	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
19248c2ecf20Sopenharmony_ci	end = (u64 *)wr + flits;
19258c2ecf20Sopenharmony_ci
19268c2ecf20Sopenharmony_ci	/* If this is a Large Send Offload packet we'll put in an LSO CPL
19278c2ecf20Sopenharmony_ci	 * message with an encapsulated TX Packet CPL message.  Otherwise we
19288c2ecf20Sopenharmony_ci	 * just use a TX Packet CPL message.
19298c2ecf20Sopenharmony_ci	 */
19308c2ecf20Sopenharmony_ci	ssi = skb_shinfo(skb);
19318c2ecf20Sopenharmony_ci	if (ssi->gso_size) {
19328c2ecf20Sopenharmony_ci		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
19338c2ecf20Sopenharmony_ci		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
19348c2ecf20Sopenharmony_ci		int l3hdr_len = skb_network_header_len(skb);
19358c2ecf20Sopenharmony_ci		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
19368c2ecf20Sopenharmony_ci
19378c2ecf20Sopenharmony_ci		wr->op_immdlen =
19388c2ecf20Sopenharmony_ci			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
19398c2ecf20Sopenharmony_ci				    FW_WR_IMMDLEN_V(sizeof(*lso) +
19408c2ecf20Sopenharmony_ci						    sizeof(*cpl)));
19418c2ecf20Sopenharmony_ci		 /* Fill in the LSO CPL message. */
19428c2ecf20Sopenharmony_ci		lso->lso_ctrl =
19438c2ecf20Sopenharmony_ci			cpu_to_be32(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
19448c2ecf20Sopenharmony_ci				    LSO_FIRST_SLICE_F |
19458c2ecf20Sopenharmony_ci				    LSO_LAST_SLICE_F |
19468c2ecf20Sopenharmony_ci				    LSO_IPV6_V(v6) |
19478c2ecf20Sopenharmony_ci				    LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
19488c2ecf20Sopenharmony_ci				    LSO_IPHDR_LEN_V(l3hdr_len / 4) |
19498c2ecf20Sopenharmony_ci				    LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
19508c2ecf20Sopenharmony_ci		lso->ipid_ofst = cpu_to_be16(0);
19518c2ecf20Sopenharmony_ci		lso->mss = cpu_to_be16(ssi->gso_size);
19528c2ecf20Sopenharmony_ci		lso->seqno_offset = cpu_to_be32(0);
19538c2ecf20Sopenharmony_ci		if (is_t4(adapter->params.chip))
19548c2ecf20Sopenharmony_ci			lso->len = cpu_to_be32(skb->len);
19558c2ecf20Sopenharmony_ci		else
19568c2ecf20Sopenharmony_ci			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len));
19578c2ecf20Sopenharmony_ci
19588c2ecf20Sopenharmony_ci		/* Set up TX Packet CPL pointer, control word and perform
19598c2ecf20Sopenharmony_ci		 * accounting.
19608c2ecf20Sopenharmony_ci		 */
19618c2ecf20Sopenharmony_ci		cpl = (void *)(lso + 1);
19628c2ecf20Sopenharmony_ci
19638c2ecf20Sopenharmony_ci		if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
19648c2ecf20Sopenharmony_ci			cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
19658c2ecf20Sopenharmony_ci		else
19668c2ecf20Sopenharmony_ci			cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
19678c2ecf20Sopenharmony_ci
19688c2ecf20Sopenharmony_ci		cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
19698c2ecf20Sopenharmony_ci					   TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
19708c2ecf20Sopenharmony_ci			 TXPKT_IPHDR_LEN_V(l3hdr_len);
19718c2ecf20Sopenharmony_ci		txq->tso++;
19728c2ecf20Sopenharmony_ci		txq->tx_cso += ssi->gso_segs;
19738c2ecf20Sopenharmony_ci	} else {
19748c2ecf20Sopenharmony_ci		int len;
19758c2ecf20Sopenharmony_ci
19768c2ecf20Sopenharmony_ci		len = (t4vf_is_eth_imm(skb)
19778c2ecf20Sopenharmony_ci		       ? skb->len + sizeof(*cpl)
19788c2ecf20Sopenharmony_ci		       : sizeof(*cpl));
19798c2ecf20Sopenharmony_ci		wr->op_immdlen =
19808c2ecf20Sopenharmony_ci			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
19818c2ecf20Sopenharmony_ci				    FW_WR_IMMDLEN_V(len));
19828c2ecf20Sopenharmony_ci
19838c2ecf20Sopenharmony_ci		/* Set up TX Packet CPL pointer, control word and perform
19848c2ecf20Sopenharmony_ci		 * accounting.
19858c2ecf20Sopenharmony_ci		 */
19868c2ecf20Sopenharmony_ci		cpl = (void *)(wr + 1);
19878c2ecf20Sopenharmony_ci		if (skb->ip_summed == CHECKSUM_PARTIAL) {
19888c2ecf20Sopenharmony_ci			cntrl = hwcsum(adapter->params.chip, skb) |
19898c2ecf20Sopenharmony_ci				TXPKT_IPCSUM_DIS_F;
19908c2ecf20Sopenharmony_ci			txq->tx_cso++;
19918c2ecf20Sopenharmony_ci		} else {
19928c2ecf20Sopenharmony_ci			cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
19938c2ecf20Sopenharmony_ci		}
19948c2ecf20Sopenharmony_ci	}
19958c2ecf20Sopenharmony_ci
19968c2ecf20Sopenharmony_ci	/* If there's a VLAN tag present, add that to the list of things to
19978c2ecf20Sopenharmony_ci	 * do in this Work Request.
19988c2ecf20Sopenharmony_ci	 */
19998c2ecf20Sopenharmony_ci	if (skb_vlan_tag_present(skb)) {
20008c2ecf20Sopenharmony_ci		txq->vlan_ins++;
20018c2ecf20Sopenharmony_ci		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
20028c2ecf20Sopenharmony_ci	}
20038c2ecf20Sopenharmony_ci
20048c2ecf20Sopenharmony_ci	 /* Fill in the TX Packet CPL message header. */
20058c2ecf20Sopenharmony_ci	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
20068c2ecf20Sopenharmony_ci				 TXPKT_INTF_V(pi->port_id) |
20078c2ecf20Sopenharmony_ci				 TXPKT_PF_V(0));
20088c2ecf20Sopenharmony_ci	cpl->pack = cpu_to_be16(0);
20098c2ecf20Sopenharmony_ci	cpl->len = cpu_to_be16(skb->len);
20108c2ecf20Sopenharmony_ci	cpl->ctrl1 = cpu_to_be64(cntrl);
20118c2ecf20Sopenharmony_ci
20128c2ecf20Sopenharmony_ci	/* Fill in the body of the TX Packet CPL message with either in-lined
20138c2ecf20Sopenharmony_ci	 * data or a Scatter/Gather List.
20148c2ecf20Sopenharmony_ci	 */
20158c2ecf20Sopenharmony_ci	if (t4vf_is_eth_imm(skb)) {
20168c2ecf20Sopenharmony_ci		/* In-line the packet's data and free the skb since we don't
20178c2ecf20Sopenharmony_ci		 * need it any longer.
20188c2ecf20Sopenharmony_ci		 */
20198c2ecf20Sopenharmony_ci		cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1);
20208c2ecf20Sopenharmony_ci		dev_consume_skb_any(skb);
20218c2ecf20Sopenharmony_ci	} else {
20228c2ecf20Sopenharmony_ci		/* Write the skb's Scatter/Gather list into the TX Packet CPL
20238c2ecf20Sopenharmony_ci		 * message and retain a pointer to the skb so we can free it
20248c2ecf20Sopenharmony_ci		 * later when its DMA completes.  (We store the skb pointer
20258c2ecf20Sopenharmony_ci		 * in the Software Descriptor corresponding to the last TX
20268c2ecf20Sopenharmony_ci		 * Descriptor used by the Work Request.)
20278c2ecf20Sopenharmony_ci		 *
20288c2ecf20Sopenharmony_ci		 * The retained skb will be freed when the corresponding TX
20298c2ecf20Sopenharmony_ci		 * Descriptors are reclaimed after their DMAs complete.
20308c2ecf20Sopenharmony_ci		 * However, this could take quite a while since, in general,
20318c2ecf20Sopenharmony_ci		 * the hardware is set up to be lazy about sending DMA
20328c2ecf20Sopenharmony_ci		 * completion notifications to us and we mostly perform TX
20338c2ecf20Sopenharmony_ci		 * reclaims in the transmit routine.
20348c2ecf20Sopenharmony_ci		 *
20358c2ecf20Sopenharmony_ci		 * This is good for performamce but means that we rely on new
20368c2ecf20Sopenharmony_ci		 * TX packets arriving to run the destructors of completed
20378c2ecf20Sopenharmony_ci		 * packets, which open up space in their sockets' send queues.
20388c2ecf20Sopenharmony_ci		 * Sometimes we do not get such new packets causing TX to
20398c2ecf20Sopenharmony_ci		 * stall.  A single UDP transmitter is a good example of this
20408c2ecf20Sopenharmony_ci		 * situation.  We have a clean up timer that periodically
20418c2ecf20Sopenharmony_ci		 * reclaims completed packets but it doesn't run often enough
20428c2ecf20Sopenharmony_ci		 * (nor do we want it to) to prevent lengthy stalls.  A
20438c2ecf20Sopenharmony_ci		 * solution to this problem is to run the destructor early,
20448c2ecf20Sopenharmony_ci		 * after the packet is queued but before it's DMAd.  A con is
20458c2ecf20Sopenharmony_ci		 * that we lie to socket memory accounting, but the amount of
20468c2ecf20Sopenharmony_ci		 * extra memory is reasonable (limited by the number of TX
20478c2ecf20Sopenharmony_ci		 * descriptors), the packets do actually get freed quickly by
20488c2ecf20Sopenharmony_ci		 * new packets almost always, and for protocols like TCP that
20498c2ecf20Sopenharmony_ci		 * wait for acks to really free up the data the extra memory
20508c2ecf20Sopenharmony_ci		 * is even less.  On the positive side we run the destructors
20518c2ecf20Sopenharmony_ci		 * on the sending CPU rather than on a potentially different
20528c2ecf20Sopenharmony_ci		 * completing CPU, usually a good thing.
20538c2ecf20Sopenharmony_ci		 *
20548c2ecf20Sopenharmony_ci		 * Run the destructor before telling the DMA engine about the
20558c2ecf20Sopenharmony_ci		 * packet to make sure it doesn't complete and get freed
20568c2ecf20Sopenharmony_ci		 * prematurely.
20578c2ecf20Sopenharmony_ci		 */
20588c2ecf20Sopenharmony_ci		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
20598c2ecf20Sopenharmony_ci		struct sge_txq *tq = &txq->q;
20608c2ecf20Sopenharmony_ci
20618c2ecf20Sopenharmony_ci		/* If the Work Request header was an exact multiple of our TX
20628c2ecf20Sopenharmony_ci		 * Descriptor length, then it's possible that the starting SGL
20638c2ecf20Sopenharmony_ci		 * pointer lines up exactly with the end of our TX Descriptor
20648c2ecf20Sopenharmony_ci		 * ring.  If that's the case, wrap around to the beginning
20658c2ecf20Sopenharmony_ci		 * here ...
20668c2ecf20Sopenharmony_ci		 */
20678c2ecf20Sopenharmony_ci		if (unlikely((void *)sgl == (void *)tq->stat)) {
20688c2ecf20Sopenharmony_ci			sgl = (void *)tq->desc;
20698c2ecf20Sopenharmony_ci			end = (void *)((void *)tq->desc +
20708c2ecf20Sopenharmony_ci				       ((void *)end - (void *)tq->stat));
20718c2ecf20Sopenharmony_ci		}
20728c2ecf20Sopenharmony_ci
20738c2ecf20Sopenharmony_ci		cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr);
20748c2ecf20Sopenharmony_ci		skb_orphan(skb);
20758c2ecf20Sopenharmony_ci		sgl_sdesc->skb = skb;
20768c2ecf20Sopenharmony_ci	}
20778c2ecf20Sopenharmony_ci
20788c2ecf20Sopenharmony_ci	/* Advance our internal TX Queue state, tell the hardware about
20798c2ecf20Sopenharmony_ci	 * the new TX descriptors and return success.
20808c2ecf20Sopenharmony_ci	 */
20818c2ecf20Sopenharmony_ci	txq_advance(&txq->q, ndesc);
20828c2ecf20Sopenharmony_ci
20838c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(adapter, &txq->q, ndesc);
20848c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
20858c2ecf20Sopenharmony_ci
20868c2ecf20Sopenharmony_ciout_free:
20878c2ecf20Sopenharmony_ci	/* An error of some sort happened.  Free the TX skb and tell the
20888c2ecf20Sopenharmony_ci	 * OS that we've "dealt" with the packet ...
20898c2ecf20Sopenharmony_ci	 */
20908c2ecf20Sopenharmony_ci	dev_kfree_skb_any(skb);
20918c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
20928c2ecf20Sopenharmony_ci}
20938c2ecf20Sopenharmony_ci
20948c2ecf20Sopenharmony_ci/**
20958c2ecf20Sopenharmony_ci * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
20968c2ecf20Sopenharmony_ci * @q: the SGE control Tx queue
20978c2ecf20Sopenharmony_ci *
20988c2ecf20Sopenharmony_ci * This is a variant of cxgb4_reclaim_completed_tx() that is used
20998c2ecf20Sopenharmony_ci * for Tx queues that send only immediate data (presently just
21008c2ecf20Sopenharmony_ci * the control queues) and	thus do not have any sk_buffs to release.
21018c2ecf20Sopenharmony_ci */
21028c2ecf20Sopenharmony_cistatic inline void reclaim_completed_tx_imm(struct sge_txq *q)
21038c2ecf20Sopenharmony_ci{
21048c2ecf20Sopenharmony_ci	int hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
21058c2ecf20Sopenharmony_ci	int reclaim = hw_cidx - q->cidx;
21068c2ecf20Sopenharmony_ci
21078c2ecf20Sopenharmony_ci	if (reclaim < 0)
21088c2ecf20Sopenharmony_ci		reclaim += q->size;
21098c2ecf20Sopenharmony_ci
21108c2ecf20Sopenharmony_ci	q->in_use -= reclaim;
21118c2ecf20Sopenharmony_ci	q->cidx = hw_cidx;
21128c2ecf20Sopenharmony_ci}
21138c2ecf20Sopenharmony_ci
21148c2ecf20Sopenharmony_cistatic inline void eosw_txq_advance_index(u32 *idx, u32 n, u32 max)
21158c2ecf20Sopenharmony_ci{
21168c2ecf20Sopenharmony_ci	u32 val = *idx + n;
21178c2ecf20Sopenharmony_ci
21188c2ecf20Sopenharmony_ci	if (val >= max)
21198c2ecf20Sopenharmony_ci		val -= max;
21208c2ecf20Sopenharmony_ci
21218c2ecf20Sopenharmony_ci	*idx = val;
21228c2ecf20Sopenharmony_ci}
21238c2ecf20Sopenharmony_ci
21248c2ecf20Sopenharmony_civoid cxgb4_eosw_txq_free_desc(struct adapter *adap,
21258c2ecf20Sopenharmony_ci			      struct sge_eosw_txq *eosw_txq, u32 ndesc)
21268c2ecf20Sopenharmony_ci{
21278c2ecf20Sopenharmony_ci	struct tx_sw_desc *d;
21288c2ecf20Sopenharmony_ci
21298c2ecf20Sopenharmony_ci	d = &eosw_txq->desc[eosw_txq->last_cidx];
21308c2ecf20Sopenharmony_ci	while (ndesc--) {
21318c2ecf20Sopenharmony_ci		if (d->skb) {
21328c2ecf20Sopenharmony_ci			if (d->addr[0]) {
21338c2ecf20Sopenharmony_ci				unmap_skb(adap->pdev_dev, d->skb, d->addr);
21348c2ecf20Sopenharmony_ci				memset(d->addr, 0, sizeof(d->addr));
21358c2ecf20Sopenharmony_ci			}
21368c2ecf20Sopenharmony_ci			dev_consume_skb_any(d->skb);
21378c2ecf20Sopenharmony_ci			d->skb = NULL;
21388c2ecf20Sopenharmony_ci		}
21398c2ecf20Sopenharmony_ci		eosw_txq_advance_index(&eosw_txq->last_cidx, 1,
21408c2ecf20Sopenharmony_ci				       eosw_txq->ndesc);
21418c2ecf20Sopenharmony_ci		d = &eosw_txq->desc[eosw_txq->last_cidx];
21428c2ecf20Sopenharmony_ci	}
21438c2ecf20Sopenharmony_ci}
21448c2ecf20Sopenharmony_ci
21458c2ecf20Sopenharmony_cistatic inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n)
21468c2ecf20Sopenharmony_ci{
21478c2ecf20Sopenharmony_ci	eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc);
21488c2ecf20Sopenharmony_ci	eosw_txq->inuse += n;
21498c2ecf20Sopenharmony_ci}
21508c2ecf20Sopenharmony_ci
21518c2ecf20Sopenharmony_cistatic inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq,
21528c2ecf20Sopenharmony_ci				   struct sk_buff *skb)
21538c2ecf20Sopenharmony_ci{
21548c2ecf20Sopenharmony_ci	if (eosw_txq->inuse == eosw_txq->ndesc)
21558c2ecf20Sopenharmony_ci		return -ENOMEM;
21568c2ecf20Sopenharmony_ci
21578c2ecf20Sopenharmony_ci	eosw_txq->desc[eosw_txq->pidx].skb = skb;
21588c2ecf20Sopenharmony_ci	return 0;
21598c2ecf20Sopenharmony_ci}
21608c2ecf20Sopenharmony_ci
21618c2ecf20Sopenharmony_cistatic inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq)
21628c2ecf20Sopenharmony_ci{
21638c2ecf20Sopenharmony_ci	return eosw_txq->desc[eosw_txq->last_pidx].skb;
21648c2ecf20Sopenharmony_ci}
21658c2ecf20Sopenharmony_ci
21668c2ecf20Sopenharmony_cistatic inline u8 ethofld_calc_tx_flits(struct adapter *adap,
21678c2ecf20Sopenharmony_ci				       struct sk_buff *skb, u32 hdr_len)
21688c2ecf20Sopenharmony_ci{
21698c2ecf20Sopenharmony_ci	u8 flits, nsgl = 0;
21708c2ecf20Sopenharmony_ci	u32 wrlen;
21718c2ecf20Sopenharmony_ci
21728c2ecf20Sopenharmony_ci	wrlen = sizeof(struct fw_eth_tx_eo_wr) + sizeof(struct cpl_tx_pkt_core);
21738c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size &&
21748c2ecf20Sopenharmony_ci	    !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
21758c2ecf20Sopenharmony_ci		wrlen += sizeof(struct cpl_tx_pkt_lso_core);
21768c2ecf20Sopenharmony_ci
21778c2ecf20Sopenharmony_ci	wrlen += roundup(hdr_len, 16);
21788c2ecf20Sopenharmony_ci
21798c2ecf20Sopenharmony_ci	/* Packet headers + WR + CPLs */
21808c2ecf20Sopenharmony_ci	flits = DIV_ROUND_UP(wrlen, 8);
21818c2ecf20Sopenharmony_ci
21828c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->nr_frags > 0) {
21838c2ecf20Sopenharmony_ci		if (skb_headlen(skb) - hdr_len)
21848c2ecf20Sopenharmony_ci			nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1);
21858c2ecf20Sopenharmony_ci		else
21868c2ecf20Sopenharmony_ci			nsgl = sgl_len(skb_shinfo(skb)->nr_frags);
21878c2ecf20Sopenharmony_ci	} else if (skb->len - hdr_len) {
21888c2ecf20Sopenharmony_ci		nsgl = sgl_len(1);
21898c2ecf20Sopenharmony_ci	}
21908c2ecf20Sopenharmony_ci
21918c2ecf20Sopenharmony_ci	return flits + nsgl;
21928c2ecf20Sopenharmony_ci}
21938c2ecf20Sopenharmony_ci
21948c2ecf20Sopenharmony_cistatic void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq,
21958c2ecf20Sopenharmony_ci			 struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr,
21968c2ecf20Sopenharmony_ci			 u32 hdr_len, u32 wrlen)
21978c2ecf20Sopenharmony_ci{
21988c2ecf20Sopenharmony_ci	const struct skb_shared_info *ssi = skb_shinfo(skb);
21998c2ecf20Sopenharmony_ci	struct cpl_tx_pkt_core *cpl;
22008c2ecf20Sopenharmony_ci	u32 immd_len, wrlen16;
22018c2ecf20Sopenharmony_ci	bool compl = false;
22028c2ecf20Sopenharmony_ci	u8 ver, proto;
22038c2ecf20Sopenharmony_ci
22048c2ecf20Sopenharmony_ci	ver = ip_hdr(skb)->version;
22058c2ecf20Sopenharmony_ci	proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol;
22068c2ecf20Sopenharmony_ci
22078c2ecf20Sopenharmony_ci	wrlen16 = DIV_ROUND_UP(wrlen, 16);
22088c2ecf20Sopenharmony_ci	immd_len = sizeof(struct cpl_tx_pkt_core);
22098c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size &&
22108c2ecf20Sopenharmony_ci	    !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4))
22118c2ecf20Sopenharmony_ci		immd_len += sizeof(struct cpl_tx_pkt_lso_core);
22128c2ecf20Sopenharmony_ci	immd_len += hdr_len;
22138c2ecf20Sopenharmony_ci
22148c2ecf20Sopenharmony_ci	if (!eosw_txq->ncompl ||
22158c2ecf20Sopenharmony_ci	    (eosw_txq->last_compl + wrlen16) >=
22168c2ecf20Sopenharmony_ci	    (adap->params.ofldq_wr_cred / 2)) {
22178c2ecf20Sopenharmony_ci		compl = true;
22188c2ecf20Sopenharmony_ci		eosw_txq->ncompl++;
22198c2ecf20Sopenharmony_ci		eosw_txq->last_compl = 0;
22208c2ecf20Sopenharmony_ci	}
22218c2ecf20Sopenharmony_ci
22228c2ecf20Sopenharmony_ci	wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) |
22238c2ecf20Sopenharmony_ci				     FW_ETH_TX_EO_WR_IMMDLEN_V(immd_len) |
22248c2ecf20Sopenharmony_ci				     FW_WR_COMPL_V(compl));
22258c2ecf20Sopenharmony_ci	wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) |
22268c2ecf20Sopenharmony_ci					 FW_WR_FLOWID_V(eosw_txq->hwtid));
22278c2ecf20Sopenharmony_ci	wr->r3 = 0;
22288c2ecf20Sopenharmony_ci	if (proto == IPPROTO_UDP) {
22298c2ecf20Sopenharmony_ci		cpl = write_eo_udp_wr(skb, wr, hdr_len);
22308c2ecf20Sopenharmony_ci	} else {
22318c2ecf20Sopenharmony_ci		wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG;
22328c2ecf20Sopenharmony_ci		wr->u.tcpseg.ethlen = skb_network_offset(skb);
22338c2ecf20Sopenharmony_ci		wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb));
22348c2ecf20Sopenharmony_ci		wr->u.tcpseg.tcplen = tcp_hdrlen(skb);
22358c2ecf20Sopenharmony_ci		wr->u.tcpseg.tsclk_tsoff = 0;
22368c2ecf20Sopenharmony_ci		wr->u.tcpseg.r4 = 0;
22378c2ecf20Sopenharmony_ci		wr->u.tcpseg.r5 = 0;
22388c2ecf20Sopenharmony_ci		wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len);
22398c2ecf20Sopenharmony_ci
22408c2ecf20Sopenharmony_ci		if (ssi->gso_size) {
22418c2ecf20Sopenharmony_ci			struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
22428c2ecf20Sopenharmony_ci
22438c2ecf20Sopenharmony_ci			wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size);
22448c2ecf20Sopenharmony_ci			cpl = write_tso_wr(adap, skb, lso);
22458c2ecf20Sopenharmony_ci		} else {
22468c2ecf20Sopenharmony_ci			wr->u.tcpseg.mss = cpu_to_be16(0xffff);
22478c2ecf20Sopenharmony_ci			cpl = (void *)(wr + 1);
22488c2ecf20Sopenharmony_ci		}
22498c2ecf20Sopenharmony_ci	}
22508c2ecf20Sopenharmony_ci
22518c2ecf20Sopenharmony_ci	eosw_txq->cred -= wrlen16;
22528c2ecf20Sopenharmony_ci	eosw_txq->last_compl += wrlen16;
22538c2ecf20Sopenharmony_ci	return cpl;
22548c2ecf20Sopenharmony_ci}
22558c2ecf20Sopenharmony_ci
22568c2ecf20Sopenharmony_cistatic int ethofld_hard_xmit(struct net_device *dev,
22578c2ecf20Sopenharmony_ci			     struct sge_eosw_txq *eosw_txq)
22588c2ecf20Sopenharmony_ci{
22598c2ecf20Sopenharmony_ci	struct port_info *pi = netdev2pinfo(dev);
22608c2ecf20Sopenharmony_ci	struct adapter *adap = netdev2adap(dev);
22618c2ecf20Sopenharmony_ci	u32 wrlen, wrlen16, hdr_len, data_len;
22628c2ecf20Sopenharmony_ci	enum sge_eosw_state next_state;
22638c2ecf20Sopenharmony_ci	u64 cntrl, *start, *end, *sgl;
22648c2ecf20Sopenharmony_ci	struct sge_eohw_txq *eohw_txq;
22658c2ecf20Sopenharmony_ci	struct cpl_tx_pkt_core *cpl;
22668c2ecf20Sopenharmony_ci	struct fw_eth_tx_eo_wr *wr;
22678c2ecf20Sopenharmony_ci	bool skip_eotx_wr = false;
22688c2ecf20Sopenharmony_ci	struct tx_sw_desc *d;
22698c2ecf20Sopenharmony_ci	struct sk_buff *skb;
22708c2ecf20Sopenharmony_ci	int left, ret = 0;
22718c2ecf20Sopenharmony_ci	u8 flits, ndesc;
22728c2ecf20Sopenharmony_ci
22738c2ecf20Sopenharmony_ci	eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid];
22748c2ecf20Sopenharmony_ci	spin_lock(&eohw_txq->lock);
22758c2ecf20Sopenharmony_ci	reclaim_completed_tx_imm(&eohw_txq->q);
22768c2ecf20Sopenharmony_ci
22778c2ecf20Sopenharmony_ci	d = &eosw_txq->desc[eosw_txq->last_pidx];
22788c2ecf20Sopenharmony_ci	skb = d->skb;
22798c2ecf20Sopenharmony_ci	skb_tx_timestamp(skb);
22808c2ecf20Sopenharmony_ci
22818c2ecf20Sopenharmony_ci	wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx];
22828c2ecf20Sopenharmony_ci	if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE &&
22838c2ecf20Sopenharmony_ci		     eosw_txq->last_pidx == eosw_txq->flowc_idx)) {
22848c2ecf20Sopenharmony_ci		hdr_len = skb->len;
22858c2ecf20Sopenharmony_ci		data_len = 0;
22868c2ecf20Sopenharmony_ci		flits = DIV_ROUND_UP(hdr_len, 8);
22878c2ecf20Sopenharmony_ci		if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND)
22888c2ecf20Sopenharmony_ci			next_state = CXGB4_EO_STATE_FLOWC_OPEN_REPLY;
22898c2ecf20Sopenharmony_ci		else
22908c2ecf20Sopenharmony_ci			next_state = CXGB4_EO_STATE_FLOWC_CLOSE_REPLY;
22918c2ecf20Sopenharmony_ci		skip_eotx_wr = true;
22928c2ecf20Sopenharmony_ci	} else {
22938c2ecf20Sopenharmony_ci		hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb));
22948c2ecf20Sopenharmony_ci		data_len = skb->len - hdr_len;
22958c2ecf20Sopenharmony_ci		flits = ethofld_calc_tx_flits(adap, skb, hdr_len);
22968c2ecf20Sopenharmony_ci	}
22978c2ecf20Sopenharmony_ci	ndesc = flits_to_desc(flits);
22988c2ecf20Sopenharmony_ci	wrlen = flits * 8;
22998c2ecf20Sopenharmony_ci	wrlen16 = DIV_ROUND_UP(wrlen, 16);
23008c2ecf20Sopenharmony_ci
23018c2ecf20Sopenharmony_ci	left = txq_avail(&eohw_txq->q) - ndesc;
23028c2ecf20Sopenharmony_ci
23038c2ecf20Sopenharmony_ci	/* If there are no descriptors left in hardware queues or no
23048c2ecf20Sopenharmony_ci	 * CPL credits left in software queues, then wait for them
23058c2ecf20Sopenharmony_ci	 * to come back and retry again. Note that we always request
23068c2ecf20Sopenharmony_ci	 * for credits update via interrupt for every half credits
23078c2ecf20Sopenharmony_ci	 * consumed. So, the interrupt will eventually restore the
23088c2ecf20Sopenharmony_ci	 * credits and invoke the Tx path again.
23098c2ecf20Sopenharmony_ci	 */
23108c2ecf20Sopenharmony_ci	if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) {
23118c2ecf20Sopenharmony_ci		ret = -ENOMEM;
23128c2ecf20Sopenharmony_ci		goto out_unlock;
23138c2ecf20Sopenharmony_ci	}
23148c2ecf20Sopenharmony_ci
23158c2ecf20Sopenharmony_ci	if (unlikely(skip_eotx_wr)) {
23168c2ecf20Sopenharmony_ci		start = (u64 *)wr;
23178c2ecf20Sopenharmony_ci		eosw_txq->state = next_state;
23188c2ecf20Sopenharmony_ci		eosw_txq->cred -= wrlen16;
23198c2ecf20Sopenharmony_ci		eosw_txq->ncompl++;
23208c2ecf20Sopenharmony_ci		eosw_txq->last_compl = 0;
23218c2ecf20Sopenharmony_ci		goto write_wr_headers;
23228c2ecf20Sopenharmony_ci	}
23238c2ecf20Sopenharmony_ci
23248c2ecf20Sopenharmony_ci	cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen);
23258c2ecf20Sopenharmony_ci	cntrl = hwcsum(adap->params.chip, skb);
23268c2ecf20Sopenharmony_ci	if (skb_vlan_tag_present(skb))
23278c2ecf20Sopenharmony_ci		cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
23288c2ecf20Sopenharmony_ci
23298c2ecf20Sopenharmony_ci	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
23308c2ecf20Sopenharmony_ci				 TXPKT_INTF_V(pi->tx_chan) |
23318c2ecf20Sopenharmony_ci				 TXPKT_PF_V(adap->pf));
23328c2ecf20Sopenharmony_ci	cpl->pack = 0;
23338c2ecf20Sopenharmony_ci	cpl->len = cpu_to_be16(skb->len);
23348c2ecf20Sopenharmony_ci	cpl->ctrl1 = cpu_to_be64(cntrl);
23358c2ecf20Sopenharmony_ci
23368c2ecf20Sopenharmony_ci	start = (u64 *)(cpl + 1);
23378c2ecf20Sopenharmony_ci
23388c2ecf20Sopenharmony_ciwrite_wr_headers:
23398c2ecf20Sopenharmony_ci	sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start,
23408c2ecf20Sopenharmony_ci					  hdr_len);
23418c2ecf20Sopenharmony_ci	if (data_len) {
23428c2ecf20Sopenharmony_ci		ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr);
23438c2ecf20Sopenharmony_ci		if (unlikely(ret)) {
23448c2ecf20Sopenharmony_ci			memset(d->addr, 0, sizeof(d->addr));
23458c2ecf20Sopenharmony_ci			eohw_txq->mapping_err++;
23468c2ecf20Sopenharmony_ci			goto out_unlock;
23478c2ecf20Sopenharmony_ci		}
23488c2ecf20Sopenharmony_ci
23498c2ecf20Sopenharmony_ci		end = (u64 *)wr + flits;
23508c2ecf20Sopenharmony_ci		if (unlikely(start > sgl)) {
23518c2ecf20Sopenharmony_ci			left = (u8 *)end - (u8 *)eohw_txq->q.stat;
23528c2ecf20Sopenharmony_ci			end = (void *)eohw_txq->q.desc + left;
23538c2ecf20Sopenharmony_ci		}
23548c2ecf20Sopenharmony_ci
23558c2ecf20Sopenharmony_ci		if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) {
23568c2ecf20Sopenharmony_ci			/* If current position is already at the end of the
23578c2ecf20Sopenharmony_ci			 * txq, reset the current to point to start of the queue
23588c2ecf20Sopenharmony_ci			 * and update the end ptr as well.
23598c2ecf20Sopenharmony_ci			 */
23608c2ecf20Sopenharmony_ci			left = (u8 *)end - (u8 *)eohw_txq->q.stat;
23618c2ecf20Sopenharmony_ci
23628c2ecf20Sopenharmony_ci			end = (void *)eohw_txq->q.desc + left;
23638c2ecf20Sopenharmony_ci			sgl = (void *)eohw_txq->q.desc;
23648c2ecf20Sopenharmony_ci		}
23658c2ecf20Sopenharmony_ci
23668c2ecf20Sopenharmony_ci		cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len,
23678c2ecf20Sopenharmony_ci				d->addr);
23688c2ecf20Sopenharmony_ci	}
23698c2ecf20Sopenharmony_ci
23708c2ecf20Sopenharmony_ci	if (skb_shinfo(skb)->gso_size) {
23718c2ecf20Sopenharmony_ci		if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
23728c2ecf20Sopenharmony_ci			eohw_txq->uso++;
23738c2ecf20Sopenharmony_ci		else
23748c2ecf20Sopenharmony_ci			eohw_txq->tso++;
23758c2ecf20Sopenharmony_ci		eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs;
23768c2ecf20Sopenharmony_ci	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
23778c2ecf20Sopenharmony_ci		eohw_txq->tx_cso++;
23788c2ecf20Sopenharmony_ci	}
23798c2ecf20Sopenharmony_ci
23808c2ecf20Sopenharmony_ci	if (skb_vlan_tag_present(skb))
23818c2ecf20Sopenharmony_ci		eohw_txq->vlan_ins++;
23828c2ecf20Sopenharmony_ci
23838c2ecf20Sopenharmony_ci	txq_advance(&eohw_txq->q, ndesc);
23848c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc);
23858c2ecf20Sopenharmony_ci	eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc);
23868c2ecf20Sopenharmony_ci
23878c2ecf20Sopenharmony_ciout_unlock:
23888c2ecf20Sopenharmony_ci	spin_unlock(&eohw_txq->lock);
23898c2ecf20Sopenharmony_ci	return ret;
23908c2ecf20Sopenharmony_ci}
23918c2ecf20Sopenharmony_ci
23928c2ecf20Sopenharmony_cistatic void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq)
23938c2ecf20Sopenharmony_ci{
23948c2ecf20Sopenharmony_ci	struct sk_buff *skb;
23958c2ecf20Sopenharmony_ci	int pktcount, ret;
23968c2ecf20Sopenharmony_ci
23978c2ecf20Sopenharmony_ci	switch (eosw_txq->state) {
23988c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_ACTIVE:
23998c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_FLOWC_OPEN_SEND:
24008c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_FLOWC_CLOSE_SEND:
24018c2ecf20Sopenharmony_ci		pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
24028c2ecf20Sopenharmony_ci		if (pktcount < 0)
24038c2ecf20Sopenharmony_ci			pktcount += eosw_txq->ndesc;
24048c2ecf20Sopenharmony_ci		break;
24058c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_FLOWC_OPEN_REPLY:
24068c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_FLOWC_CLOSE_REPLY:
24078c2ecf20Sopenharmony_ci	case CXGB4_EO_STATE_CLOSED:
24088c2ecf20Sopenharmony_ci	default:
24098c2ecf20Sopenharmony_ci		return;
24108c2ecf20Sopenharmony_ci	}
24118c2ecf20Sopenharmony_ci
24128c2ecf20Sopenharmony_ci	while (pktcount--) {
24138c2ecf20Sopenharmony_ci		skb = eosw_txq_peek(eosw_txq);
24148c2ecf20Sopenharmony_ci		if (!skb) {
24158c2ecf20Sopenharmony_ci			eosw_txq_advance_index(&eosw_txq->last_pidx, 1,
24168c2ecf20Sopenharmony_ci					       eosw_txq->ndesc);
24178c2ecf20Sopenharmony_ci			continue;
24188c2ecf20Sopenharmony_ci		}
24198c2ecf20Sopenharmony_ci
24208c2ecf20Sopenharmony_ci		ret = ethofld_hard_xmit(dev, eosw_txq);
24218c2ecf20Sopenharmony_ci		if (ret)
24228c2ecf20Sopenharmony_ci			break;
24238c2ecf20Sopenharmony_ci	}
24248c2ecf20Sopenharmony_ci}
24258c2ecf20Sopenharmony_ci
24268c2ecf20Sopenharmony_cistatic netdev_tx_t cxgb4_ethofld_xmit(struct sk_buff *skb,
24278c2ecf20Sopenharmony_ci				      struct net_device *dev)
24288c2ecf20Sopenharmony_ci{
24298c2ecf20Sopenharmony_ci	struct cxgb4_tc_port_mqprio *tc_port_mqprio;
24308c2ecf20Sopenharmony_ci	struct port_info *pi = netdev2pinfo(dev);
24318c2ecf20Sopenharmony_ci	struct adapter *adap = netdev2adap(dev);
24328c2ecf20Sopenharmony_ci	struct sge_eosw_txq *eosw_txq;
24338c2ecf20Sopenharmony_ci	u32 qid;
24348c2ecf20Sopenharmony_ci	int ret;
24358c2ecf20Sopenharmony_ci
24368c2ecf20Sopenharmony_ci	ret = cxgb4_validate_skb(skb, dev, ETH_HLEN);
24378c2ecf20Sopenharmony_ci	if (ret)
24388c2ecf20Sopenharmony_ci		goto out_free;
24398c2ecf20Sopenharmony_ci
24408c2ecf20Sopenharmony_ci	tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id];
24418c2ecf20Sopenharmony_ci	qid = skb_get_queue_mapping(skb) - pi->nqsets;
24428c2ecf20Sopenharmony_ci	eosw_txq = &tc_port_mqprio->eosw_txq[qid];
24438c2ecf20Sopenharmony_ci	spin_lock_bh(&eosw_txq->lock);
24448c2ecf20Sopenharmony_ci	if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
24458c2ecf20Sopenharmony_ci		goto out_unlock;
24468c2ecf20Sopenharmony_ci
24478c2ecf20Sopenharmony_ci	ret = eosw_txq_enqueue(eosw_txq, skb);
24488c2ecf20Sopenharmony_ci	if (ret)
24498c2ecf20Sopenharmony_ci		goto out_unlock;
24508c2ecf20Sopenharmony_ci
24518c2ecf20Sopenharmony_ci	/* SKB is queued for processing until credits are available.
24528c2ecf20Sopenharmony_ci	 * So, call the destructor now and we'll free the skb later
24538c2ecf20Sopenharmony_ci	 * after it has been successfully transmitted.
24548c2ecf20Sopenharmony_ci	 */
24558c2ecf20Sopenharmony_ci	skb_orphan(skb);
24568c2ecf20Sopenharmony_ci
24578c2ecf20Sopenharmony_ci	eosw_txq_advance(eosw_txq, 1);
24588c2ecf20Sopenharmony_ci	ethofld_xmit(dev, eosw_txq);
24598c2ecf20Sopenharmony_ci	spin_unlock_bh(&eosw_txq->lock);
24608c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
24618c2ecf20Sopenharmony_ci
24628c2ecf20Sopenharmony_ciout_unlock:
24638c2ecf20Sopenharmony_ci	spin_unlock_bh(&eosw_txq->lock);
24648c2ecf20Sopenharmony_ciout_free:
24658c2ecf20Sopenharmony_ci	dev_kfree_skb_any(skb);
24668c2ecf20Sopenharmony_ci	return NETDEV_TX_OK;
24678c2ecf20Sopenharmony_ci}
24688c2ecf20Sopenharmony_ci
24698c2ecf20Sopenharmony_cinetdev_tx_t t4_start_xmit(struct sk_buff *skb, struct net_device *dev)
24708c2ecf20Sopenharmony_ci{
24718c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
24728c2ecf20Sopenharmony_ci	u16 qid = skb_get_queue_mapping(skb);
24738c2ecf20Sopenharmony_ci
24748c2ecf20Sopenharmony_ci	if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM))
24758c2ecf20Sopenharmony_ci		return cxgb4_vf_eth_xmit(skb, dev);
24768c2ecf20Sopenharmony_ci
24778c2ecf20Sopenharmony_ci	if (unlikely(qid >= pi->nqsets))
24788c2ecf20Sopenharmony_ci		return cxgb4_ethofld_xmit(skb, dev);
24798c2ecf20Sopenharmony_ci
24808c2ecf20Sopenharmony_ci	if (is_ptp_enabled(skb, dev)) {
24818c2ecf20Sopenharmony_ci		struct adapter *adap = netdev2adap(dev);
24828c2ecf20Sopenharmony_ci		netdev_tx_t ret;
24838c2ecf20Sopenharmony_ci
24848c2ecf20Sopenharmony_ci		spin_lock(&adap->ptp_lock);
24858c2ecf20Sopenharmony_ci		ret = cxgb4_eth_xmit(skb, dev);
24868c2ecf20Sopenharmony_ci		spin_unlock(&adap->ptp_lock);
24878c2ecf20Sopenharmony_ci		return ret;
24888c2ecf20Sopenharmony_ci	}
24898c2ecf20Sopenharmony_ci
24908c2ecf20Sopenharmony_ci	return cxgb4_eth_xmit(skb, dev);
24918c2ecf20Sopenharmony_ci}
24928c2ecf20Sopenharmony_ci
24938c2ecf20Sopenharmony_cistatic void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq)
24948c2ecf20Sopenharmony_ci{
24958c2ecf20Sopenharmony_ci	int pktcount = eosw_txq->pidx - eosw_txq->last_pidx;
24968c2ecf20Sopenharmony_ci	int pidx = eosw_txq->pidx;
24978c2ecf20Sopenharmony_ci	struct sk_buff *skb;
24988c2ecf20Sopenharmony_ci
24998c2ecf20Sopenharmony_ci	if (!pktcount)
25008c2ecf20Sopenharmony_ci		return;
25018c2ecf20Sopenharmony_ci
25028c2ecf20Sopenharmony_ci	if (pktcount < 0)
25038c2ecf20Sopenharmony_ci		pktcount += eosw_txq->ndesc;
25048c2ecf20Sopenharmony_ci
25058c2ecf20Sopenharmony_ci	while (pktcount--) {
25068c2ecf20Sopenharmony_ci		pidx--;
25078c2ecf20Sopenharmony_ci		if (pidx < 0)
25088c2ecf20Sopenharmony_ci			pidx += eosw_txq->ndesc;
25098c2ecf20Sopenharmony_ci
25108c2ecf20Sopenharmony_ci		skb = eosw_txq->desc[pidx].skb;
25118c2ecf20Sopenharmony_ci		if (skb) {
25128c2ecf20Sopenharmony_ci			dev_consume_skb_any(skb);
25138c2ecf20Sopenharmony_ci			eosw_txq->desc[pidx].skb = NULL;
25148c2ecf20Sopenharmony_ci			eosw_txq->inuse--;
25158c2ecf20Sopenharmony_ci		}
25168c2ecf20Sopenharmony_ci	}
25178c2ecf20Sopenharmony_ci
25188c2ecf20Sopenharmony_ci	eosw_txq->pidx = eosw_txq->last_pidx + 1;
25198c2ecf20Sopenharmony_ci}
25208c2ecf20Sopenharmony_ci
25218c2ecf20Sopenharmony_ci/**
25228c2ecf20Sopenharmony_ci * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
25238c2ecf20Sopenharmony_ci * @dev: netdevice
25248c2ecf20Sopenharmony_ci * @eotid: ETHOFLD tid to bind/unbind
25258c2ecf20Sopenharmony_ci * @tc: traffic class. If set to FW_SCHED_CLS_NONE, then unbinds the @eotid
25268c2ecf20Sopenharmony_ci *
25278c2ecf20Sopenharmony_ci * Send a FLOWC work request to bind an ETHOFLD TID to a traffic class.
25288c2ecf20Sopenharmony_ci * If @tc is set to FW_SCHED_CLS_NONE, then the @eotid is unbound from
25298c2ecf20Sopenharmony_ci * a traffic class.
25308c2ecf20Sopenharmony_ci */
25318c2ecf20Sopenharmony_ciint cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc)
25328c2ecf20Sopenharmony_ci{
25338c2ecf20Sopenharmony_ci	struct port_info *pi = netdev2pinfo(dev);
25348c2ecf20Sopenharmony_ci	struct adapter *adap = netdev2adap(dev);
25358c2ecf20Sopenharmony_ci	enum sge_eosw_state next_state;
25368c2ecf20Sopenharmony_ci	struct sge_eosw_txq *eosw_txq;
25378c2ecf20Sopenharmony_ci	u32 len, len16, nparams = 6;
25388c2ecf20Sopenharmony_ci	struct fw_flowc_wr *flowc;
25398c2ecf20Sopenharmony_ci	struct eotid_entry *entry;
25408c2ecf20Sopenharmony_ci	struct sge_ofld_rxq *rxq;
25418c2ecf20Sopenharmony_ci	struct sk_buff *skb;
25428c2ecf20Sopenharmony_ci	int ret = 0;
25438c2ecf20Sopenharmony_ci
25448c2ecf20Sopenharmony_ci	len = struct_size(flowc, mnemval, nparams);
25458c2ecf20Sopenharmony_ci	len16 = DIV_ROUND_UP(len, 16);
25468c2ecf20Sopenharmony_ci
25478c2ecf20Sopenharmony_ci	entry = cxgb4_lookup_eotid(&adap->tids, eotid);
25488c2ecf20Sopenharmony_ci	if (!entry)
25498c2ecf20Sopenharmony_ci		return -ENOMEM;
25508c2ecf20Sopenharmony_ci
25518c2ecf20Sopenharmony_ci	eosw_txq = (struct sge_eosw_txq *)entry->data;
25528c2ecf20Sopenharmony_ci	if (!eosw_txq)
25538c2ecf20Sopenharmony_ci		return -ENOMEM;
25548c2ecf20Sopenharmony_ci
25558c2ecf20Sopenharmony_ci	if (!(adap->flags & CXGB4_FW_OK)) {
25568c2ecf20Sopenharmony_ci		/* Don't stall caller when access to FW is lost */
25578c2ecf20Sopenharmony_ci		complete(&eosw_txq->completion);
25588c2ecf20Sopenharmony_ci		return -EIO;
25598c2ecf20Sopenharmony_ci	}
25608c2ecf20Sopenharmony_ci
25618c2ecf20Sopenharmony_ci	skb = alloc_skb(len, GFP_KERNEL);
25628c2ecf20Sopenharmony_ci	if (!skb)
25638c2ecf20Sopenharmony_ci		return -ENOMEM;
25648c2ecf20Sopenharmony_ci
25658c2ecf20Sopenharmony_ci	spin_lock_bh(&eosw_txq->lock);
25668c2ecf20Sopenharmony_ci	if (tc != FW_SCHED_CLS_NONE) {
25678c2ecf20Sopenharmony_ci		if (eosw_txq->state != CXGB4_EO_STATE_CLOSED)
25688c2ecf20Sopenharmony_ci			goto out_free_skb;
25698c2ecf20Sopenharmony_ci
25708c2ecf20Sopenharmony_ci		next_state = CXGB4_EO_STATE_FLOWC_OPEN_SEND;
25718c2ecf20Sopenharmony_ci	} else {
25728c2ecf20Sopenharmony_ci		if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE)
25738c2ecf20Sopenharmony_ci			goto out_free_skb;
25748c2ecf20Sopenharmony_ci
25758c2ecf20Sopenharmony_ci		next_state = CXGB4_EO_STATE_FLOWC_CLOSE_SEND;
25768c2ecf20Sopenharmony_ci	}
25778c2ecf20Sopenharmony_ci
25788c2ecf20Sopenharmony_ci	flowc = __skb_put(skb, len);
25798c2ecf20Sopenharmony_ci	memset(flowc, 0, len);
25808c2ecf20Sopenharmony_ci
25818c2ecf20Sopenharmony_ci	rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid];
25828c2ecf20Sopenharmony_ci	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) |
25838c2ecf20Sopenharmony_ci					  FW_WR_FLOWID_V(eosw_txq->hwtid));
25848c2ecf20Sopenharmony_ci	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
25858c2ecf20Sopenharmony_ci					   FW_FLOWC_WR_NPARAMS_V(nparams) |
25868c2ecf20Sopenharmony_ci					   FW_WR_COMPL_V(1));
25878c2ecf20Sopenharmony_ci	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
25888c2ecf20Sopenharmony_ci	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf));
25898c2ecf20Sopenharmony_ci	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
25908c2ecf20Sopenharmony_ci	flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan);
25918c2ecf20Sopenharmony_ci	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
25928c2ecf20Sopenharmony_ci	flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan);
25938c2ecf20Sopenharmony_ci	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
25948c2ecf20Sopenharmony_ci	flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id);
25958c2ecf20Sopenharmony_ci	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
25968c2ecf20Sopenharmony_ci	flowc->mnemval[4].val = cpu_to_be32(tc);
25978c2ecf20Sopenharmony_ci	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE;
25988c2ecf20Sopenharmony_ci	flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ?
25998c2ecf20Sopenharmony_ci					    FW_FLOWC_MNEM_EOSTATE_CLOSING :
26008c2ecf20Sopenharmony_ci					    FW_FLOWC_MNEM_EOSTATE_ESTABLISHED);
26018c2ecf20Sopenharmony_ci
26028c2ecf20Sopenharmony_ci	/* Free up any pending skbs to ensure there's room for
26038c2ecf20Sopenharmony_ci	 * termination FLOWC.
26048c2ecf20Sopenharmony_ci	 */
26058c2ecf20Sopenharmony_ci	if (tc == FW_SCHED_CLS_NONE)
26068c2ecf20Sopenharmony_ci		eosw_txq_flush_pending_skbs(eosw_txq);
26078c2ecf20Sopenharmony_ci
26088c2ecf20Sopenharmony_ci	ret = eosw_txq_enqueue(eosw_txq, skb);
26098c2ecf20Sopenharmony_ci	if (ret)
26108c2ecf20Sopenharmony_ci		goto out_free_skb;
26118c2ecf20Sopenharmony_ci
26128c2ecf20Sopenharmony_ci	eosw_txq->state = next_state;
26138c2ecf20Sopenharmony_ci	eosw_txq->flowc_idx = eosw_txq->pidx;
26148c2ecf20Sopenharmony_ci	eosw_txq_advance(eosw_txq, 1);
26158c2ecf20Sopenharmony_ci	ethofld_xmit(dev, eosw_txq);
26168c2ecf20Sopenharmony_ci
26178c2ecf20Sopenharmony_ci	spin_unlock_bh(&eosw_txq->lock);
26188c2ecf20Sopenharmony_ci	return 0;
26198c2ecf20Sopenharmony_ci
26208c2ecf20Sopenharmony_ciout_free_skb:
26218c2ecf20Sopenharmony_ci	dev_consume_skb_any(skb);
26228c2ecf20Sopenharmony_ci	spin_unlock_bh(&eosw_txq->lock);
26238c2ecf20Sopenharmony_ci	return ret;
26248c2ecf20Sopenharmony_ci}
26258c2ecf20Sopenharmony_ci
26268c2ecf20Sopenharmony_ci/**
26278c2ecf20Sopenharmony_ci *	is_imm - check whether a packet can be sent as immediate data
26288c2ecf20Sopenharmony_ci *	@skb: the packet
26298c2ecf20Sopenharmony_ci *
26308c2ecf20Sopenharmony_ci *	Returns true if a packet can be sent as a WR with immediate data.
26318c2ecf20Sopenharmony_ci */
26328c2ecf20Sopenharmony_cistatic inline int is_imm(const struct sk_buff *skb)
26338c2ecf20Sopenharmony_ci{
26348c2ecf20Sopenharmony_ci	return skb->len <= MAX_CTRL_WR_LEN;
26358c2ecf20Sopenharmony_ci}
26368c2ecf20Sopenharmony_ci
26378c2ecf20Sopenharmony_ci/**
26388c2ecf20Sopenharmony_ci *	ctrlq_check_stop - check if a control queue is full and should stop
26398c2ecf20Sopenharmony_ci *	@q: the queue
26408c2ecf20Sopenharmony_ci *	@wr: most recent WR written to the queue
26418c2ecf20Sopenharmony_ci *
26428c2ecf20Sopenharmony_ci *	Check if a control queue has become full and should be stopped.
26438c2ecf20Sopenharmony_ci *	We clean up control queue descriptors very lazily, only when we are out.
26448c2ecf20Sopenharmony_ci *	If the queue is still full after reclaiming any completed descriptors
26458c2ecf20Sopenharmony_ci *	we suspend it and have the last WR wake it up.
26468c2ecf20Sopenharmony_ci */
26478c2ecf20Sopenharmony_cistatic void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
26488c2ecf20Sopenharmony_ci{
26498c2ecf20Sopenharmony_ci	reclaim_completed_tx_imm(&q->q);
26508c2ecf20Sopenharmony_ci	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
26518c2ecf20Sopenharmony_ci		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
26528c2ecf20Sopenharmony_ci		q->q.stops++;
26538c2ecf20Sopenharmony_ci		q->full = 1;
26548c2ecf20Sopenharmony_ci	}
26558c2ecf20Sopenharmony_ci}
26568c2ecf20Sopenharmony_ci
26578c2ecf20Sopenharmony_ci#define CXGB4_SELFTEST_LB_STR "CHELSIO_SELFTEST"
26588c2ecf20Sopenharmony_ci
26598c2ecf20Sopenharmony_ciint cxgb4_selftest_lb_pkt(struct net_device *netdev)
26608c2ecf20Sopenharmony_ci{
26618c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(netdev);
26628c2ecf20Sopenharmony_ci	struct adapter *adap = pi->adapter;
26638c2ecf20Sopenharmony_ci	struct cxgb4_ethtool_lb_test *lb;
26648c2ecf20Sopenharmony_ci	int ret, i = 0, pkt_len, credits;
26658c2ecf20Sopenharmony_ci	struct fw_eth_tx_pkt_wr *wr;
26668c2ecf20Sopenharmony_ci	struct cpl_tx_pkt_core *cpl;
26678c2ecf20Sopenharmony_ci	u32 ctrl0, ndesc, flits;
26688c2ecf20Sopenharmony_ci	struct sge_eth_txq *q;
26698c2ecf20Sopenharmony_ci	u8 *sgl;
26708c2ecf20Sopenharmony_ci
26718c2ecf20Sopenharmony_ci	pkt_len = ETH_HLEN + sizeof(CXGB4_SELFTEST_LB_STR);
26728c2ecf20Sopenharmony_ci
26738c2ecf20Sopenharmony_ci	flits = DIV_ROUND_UP(pkt_len + sizeof(*cpl) + sizeof(*wr),
26748c2ecf20Sopenharmony_ci			     sizeof(__be64));
26758c2ecf20Sopenharmony_ci	ndesc = flits_to_desc(flits);
26768c2ecf20Sopenharmony_ci
26778c2ecf20Sopenharmony_ci	lb = &pi->ethtool_lb;
26788c2ecf20Sopenharmony_ci	lb->loopback = 1;
26798c2ecf20Sopenharmony_ci
26808c2ecf20Sopenharmony_ci	q = &adap->sge.ethtxq[pi->first_qset];
26818c2ecf20Sopenharmony_ci	__netif_tx_lock(q->txq, smp_processor_id());
26828c2ecf20Sopenharmony_ci
26838c2ecf20Sopenharmony_ci	reclaim_completed_tx(adap, &q->q, -1, true);
26848c2ecf20Sopenharmony_ci	credits = txq_avail(&q->q) - ndesc;
26858c2ecf20Sopenharmony_ci	if (unlikely(credits < 0)) {
26868c2ecf20Sopenharmony_ci		__netif_tx_unlock(q->txq);
26878c2ecf20Sopenharmony_ci		return -ENOMEM;
26888c2ecf20Sopenharmony_ci	}
26898c2ecf20Sopenharmony_ci
26908c2ecf20Sopenharmony_ci	wr = (void *)&q->q.desc[q->q.pidx];
26918c2ecf20Sopenharmony_ci	memset(wr, 0, sizeof(struct tx_desc));
26928c2ecf20Sopenharmony_ci
26938c2ecf20Sopenharmony_ci	wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
26948c2ecf20Sopenharmony_ci			       FW_WR_IMMDLEN_V(pkt_len +
26958c2ecf20Sopenharmony_ci			       sizeof(*cpl)));
26968c2ecf20Sopenharmony_ci	wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2)));
26978c2ecf20Sopenharmony_ci	wr->r3 = cpu_to_be64(0);
26988c2ecf20Sopenharmony_ci
26998c2ecf20Sopenharmony_ci	cpl = (void *)(wr + 1);
27008c2ecf20Sopenharmony_ci	sgl = (u8 *)(cpl + 1);
27018c2ecf20Sopenharmony_ci
27028c2ecf20Sopenharmony_ci	ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) |
27038c2ecf20Sopenharmony_ci		TXPKT_INTF_V(pi->tx_chan + 4);
27048c2ecf20Sopenharmony_ci
27058c2ecf20Sopenharmony_ci	cpl->ctrl0 = htonl(ctrl0);
27068c2ecf20Sopenharmony_ci	cpl->pack = htons(0);
27078c2ecf20Sopenharmony_ci	cpl->len = htons(pkt_len);
27088c2ecf20Sopenharmony_ci	cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F);
27098c2ecf20Sopenharmony_ci
27108c2ecf20Sopenharmony_ci	eth_broadcast_addr(sgl);
27118c2ecf20Sopenharmony_ci	i += ETH_ALEN;
27128c2ecf20Sopenharmony_ci	ether_addr_copy(&sgl[i], netdev->dev_addr);
27138c2ecf20Sopenharmony_ci	i += ETH_ALEN;
27148c2ecf20Sopenharmony_ci
27158c2ecf20Sopenharmony_ci	snprintf(&sgl[i], sizeof(CXGB4_SELFTEST_LB_STR), "%s",
27168c2ecf20Sopenharmony_ci		 CXGB4_SELFTEST_LB_STR);
27178c2ecf20Sopenharmony_ci
27188c2ecf20Sopenharmony_ci	init_completion(&lb->completion);
27198c2ecf20Sopenharmony_ci	txq_advance(&q->q, ndesc);
27208c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(adap, &q->q, ndesc);
27218c2ecf20Sopenharmony_ci	__netif_tx_unlock(q->txq);
27228c2ecf20Sopenharmony_ci
27238c2ecf20Sopenharmony_ci	/* wait for the pkt to return */
27248c2ecf20Sopenharmony_ci	ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
27258c2ecf20Sopenharmony_ci	if (!ret)
27268c2ecf20Sopenharmony_ci		ret = -ETIMEDOUT;
27278c2ecf20Sopenharmony_ci	else
27288c2ecf20Sopenharmony_ci		ret = lb->result;
27298c2ecf20Sopenharmony_ci
27308c2ecf20Sopenharmony_ci	lb->loopback = 0;
27318c2ecf20Sopenharmony_ci
27328c2ecf20Sopenharmony_ci	return ret;
27338c2ecf20Sopenharmony_ci}
27348c2ecf20Sopenharmony_ci
27358c2ecf20Sopenharmony_ci/**
27368c2ecf20Sopenharmony_ci *	ctrl_xmit - send a packet through an SGE control Tx queue
27378c2ecf20Sopenharmony_ci *	@q: the control queue
27388c2ecf20Sopenharmony_ci *	@skb: the packet
27398c2ecf20Sopenharmony_ci *
27408c2ecf20Sopenharmony_ci *	Send a packet through an SGE control Tx queue.  Packets sent through
27418c2ecf20Sopenharmony_ci *	a control queue must fit entirely as immediate data.
27428c2ecf20Sopenharmony_ci */
27438c2ecf20Sopenharmony_cistatic int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
27448c2ecf20Sopenharmony_ci{
27458c2ecf20Sopenharmony_ci	unsigned int ndesc;
27468c2ecf20Sopenharmony_ci	struct fw_wr_hdr *wr;
27478c2ecf20Sopenharmony_ci
27488c2ecf20Sopenharmony_ci	if (unlikely(!is_imm(skb))) {
27498c2ecf20Sopenharmony_ci		WARN_ON(1);
27508c2ecf20Sopenharmony_ci		dev_kfree_skb(skb);
27518c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
27528c2ecf20Sopenharmony_ci	}
27538c2ecf20Sopenharmony_ci
27548c2ecf20Sopenharmony_ci	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
27558c2ecf20Sopenharmony_ci	spin_lock(&q->sendq.lock);
27568c2ecf20Sopenharmony_ci
27578c2ecf20Sopenharmony_ci	if (unlikely(q->full)) {
27588c2ecf20Sopenharmony_ci		skb->priority = ndesc;                  /* save for restart */
27598c2ecf20Sopenharmony_ci		__skb_queue_tail(&q->sendq, skb);
27608c2ecf20Sopenharmony_ci		spin_unlock(&q->sendq.lock);
27618c2ecf20Sopenharmony_ci		return NET_XMIT_CN;
27628c2ecf20Sopenharmony_ci	}
27638c2ecf20Sopenharmony_ci
27648c2ecf20Sopenharmony_ci	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
27658c2ecf20Sopenharmony_ci	cxgb4_inline_tx_skb(skb, &q->q, wr);
27668c2ecf20Sopenharmony_ci
27678c2ecf20Sopenharmony_ci	txq_advance(&q->q, ndesc);
27688c2ecf20Sopenharmony_ci	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
27698c2ecf20Sopenharmony_ci		ctrlq_check_stop(q, wr);
27708c2ecf20Sopenharmony_ci
27718c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
27728c2ecf20Sopenharmony_ci	spin_unlock(&q->sendq.lock);
27738c2ecf20Sopenharmony_ci
27748c2ecf20Sopenharmony_ci	kfree_skb(skb);
27758c2ecf20Sopenharmony_ci	return NET_XMIT_SUCCESS;
27768c2ecf20Sopenharmony_ci}
27778c2ecf20Sopenharmony_ci
27788c2ecf20Sopenharmony_ci/**
27798c2ecf20Sopenharmony_ci *	restart_ctrlq - restart a suspended control queue
27808c2ecf20Sopenharmony_ci *	@t: pointer to the tasklet associated with this handler
27818c2ecf20Sopenharmony_ci *
27828c2ecf20Sopenharmony_ci *	Resumes transmission on a suspended Tx control queue.
27838c2ecf20Sopenharmony_ci */
27848c2ecf20Sopenharmony_cistatic void restart_ctrlq(struct tasklet_struct *t)
27858c2ecf20Sopenharmony_ci{
27868c2ecf20Sopenharmony_ci	struct sk_buff *skb;
27878c2ecf20Sopenharmony_ci	unsigned int written = 0;
27888c2ecf20Sopenharmony_ci	struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
27898c2ecf20Sopenharmony_ci
27908c2ecf20Sopenharmony_ci	spin_lock(&q->sendq.lock);
27918c2ecf20Sopenharmony_ci	reclaim_completed_tx_imm(&q->q);
27928c2ecf20Sopenharmony_ci	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
27938c2ecf20Sopenharmony_ci
27948c2ecf20Sopenharmony_ci	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
27958c2ecf20Sopenharmony_ci		struct fw_wr_hdr *wr;
27968c2ecf20Sopenharmony_ci		unsigned int ndesc = skb->priority;     /* previously saved */
27978c2ecf20Sopenharmony_ci
27988c2ecf20Sopenharmony_ci		written += ndesc;
27998c2ecf20Sopenharmony_ci		/* Write descriptors and free skbs outside the lock to limit
28008c2ecf20Sopenharmony_ci		 * wait times.  q->full is still set so new skbs will be queued.
28018c2ecf20Sopenharmony_ci		 */
28028c2ecf20Sopenharmony_ci		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
28038c2ecf20Sopenharmony_ci		txq_advance(&q->q, ndesc);
28048c2ecf20Sopenharmony_ci		spin_unlock(&q->sendq.lock);
28058c2ecf20Sopenharmony_ci
28068c2ecf20Sopenharmony_ci		cxgb4_inline_tx_skb(skb, &q->q, wr);
28078c2ecf20Sopenharmony_ci		kfree_skb(skb);
28088c2ecf20Sopenharmony_ci
28098c2ecf20Sopenharmony_ci		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
28108c2ecf20Sopenharmony_ci			unsigned long old = q->q.stops;
28118c2ecf20Sopenharmony_ci
28128c2ecf20Sopenharmony_ci			ctrlq_check_stop(q, wr);
28138c2ecf20Sopenharmony_ci			if (q->q.stops != old) {          /* suspended anew */
28148c2ecf20Sopenharmony_ci				spin_lock(&q->sendq.lock);
28158c2ecf20Sopenharmony_ci				goto ringdb;
28168c2ecf20Sopenharmony_ci			}
28178c2ecf20Sopenharmony_ci		}
28188c2ecf20Sopenharmony_ci		if (written > 16) {
28198c2ecf20Sopenharmony_ci			cxgb4_ring_tx_db(q->adap, &q->q, written);
28208c2ecf20Sopenharmony_ci			written = 0;
28218c2ecf20Sopenharmony_ci		}
28228c2ecf20Sopenharmony_ci		spin_lock(&q->sendq.lock);
28238c2ecf20Sopenharmony_ci	}
28248c2ecf20Sopenharmony_ci	q->full = 0;
28258c2ecf20Sopenharmony_ciringdb:
28268c2ecf20Sopenharmony_ci	if (written)
28278c2ecf20Sopenharmony_ci		cxgb4_ring_tx_db(q->adap, &q->q, written);
28288c2ecf20Sopenharmony_ci	spin_unlock(&q->sendq.lock);
28298c2ecf20Sopenharmony_ci}
28308c2ecf20Sopenharmony_ci
28318c2ecf20Sopenharmony_ci/**
28328c2ecf20Sopenharmony_ci *	t4_mgmt_tx - send a management message
28338c2ecf20Sopenharmony_ci *	@adap: the adapter
28348c2ecf20Sopenharmony_ci *	@skb: the packet containing the management message
28358c2ecf20Sopenharmony_ci *
28368c2ecf20Sopenharmony_ci *	Send a management message through control queue 0.
28378c2ecf20Sopenharmony_ci */
28388c2ecf20Sopenharmony_ciint t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
28398c2ecf20Sopenharmony_ci{
28408c2ecf20Sopenharmony_ci	int ret;
28418c2ecf20Sopenharmony_ci
28428c2ecf20Sopenharmony_ci	local_bh_disable();
28438c2ecf20Sopenharmony_ci	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
28448c2ecf20Sopenharmony_ci	local_bh_enable();
28458c2ecf20Sopenharmony_ci	return ret;
28468c2ecf20Sopenharmony_ci}
28478c2ecf20Sopenharmony_ci
28488c2ecf20Sopenharmony_ci/**
28498c2ecf20Sopenharmony_ci *	is_ofld_imm - check whether a packet can be sent as immediate data
28508c2ecf20Sopenharmony_ci *	@skb: the packet
28518c2ecf20Sopenharmony_ci *
28528c2ecf20Sopenharmony_ci *	Returns true if a packet can be sent as an offload WR with immediate
28538c2ecf20Sopenharmony_ci *	data.
28548c2ecf20Sopenharmony_ci *	FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
28558c2ecf20Sopenharmony_ci *      However, FW_ULPTX_WR commands have a 256 byte immediate only
28568c2ecf20Sopenharmony_ci *      payload limit.
28578c2ecf20Sopenharmony_ci */
28588c2ecf20Sopenharmony_cistatic inline int is_ofld_imm(const struct sk_buff *skb)
28598c2ecf20Sopenharmony_ci{
28608c2ecf20Sopenharmony_ci	struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
28618c2ecf20Sopenharmony_ci	unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
28628c2ecf20Sopenharmony_ci
28638c2ecf20Sopenharmony_ci	if (unlikely(opcode == FW_ULPTX_WR))
28648c2ecf20Sopenharmony_ci		return skb->len <= MAX_IMM_ULPTX_WR_LEN;
28658c2ecf20Sopenharmony_ci	else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
28668c2ecf20Sopenharmony_ci		return skb->len <= SGE_MAX_WR_LEN;
28678c2ecf20Sopenharmony_ci	else
28688c2ecf20Sopenharmony_ci		return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
28698c2ecf20Sopenharmony_ci}
28708c2ecf20Sopenharmony_ci
28718c2ecf20Sopenharmony_ci/**
28728c2ecf20Sopenharmony_ci *	calc_tx_flits_ofld - calculate # of flits for an offload packet
28738c2ecf20Sopenharmony_ci *	@skb: the packet
28748c2ecf20Sopenharmony_ci *
28758c2ecf20Sopenharmony_ci *	Returns the number of flits needed for the given offload packet.
28768c2ecf20Sopenharmony_ci *	These packets are already fully constructed and no additional headers
28778c2ecf20Sopenharmony_ci *	will be added.
28788c2ecf20Sopenharmony_ci */
28798c2ecf20Sopenharmony_cistatic inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
28808c2ecf20Sopenharmony_ci{
28818c2ecf20Sopenharmony_ci	unsigned int flits, cnt;
28828c2ecf20Sopenharmony_ci
28838c2ecf20Sopenharmony_ci	if (is_ofld_imm(skb))
28848c2ecf20Sopenharmony_ci		return DIV_ROUND_UP(skb->len, 8);
28858c2ecf20Sopenharmony_ci
28868c2ecf20Sopenharmony_ci	flits = skb_transport_offset(skb) / 8U;   /* headers */
28878c2ecf20Sopenharmony_ci	cnt = skb_shinfo(skb)->nr_frags;
28888c2ecf20Sopenharmony_ci	if (skb_tail_pointer(skb) != skb_transport_header(skb))
28898c2ecf20Sopenharmony_ci		cnt++;
28908c2ecf20Sopenharmony_ci	return flits + sgl_len(cnt);
28918c2ecf20Sopenharmony_ci}
28928c2ecf20Sopenharmony_ci
28938c2ecf20Sopenharmony_ci/**
28948c2ecf20Sopenharmony_ci *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
28958c2ecf20Sopenharmony_ci *	@q: the queue to stop
28968c2ecf20Sopenharmony_ci *
28978c2ecf20Sopenharmony_ci *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
28988c2ecf20Sopenharmony_ci *	inability to map packets.  A periodic timer attempts to restart
28998c2ecf20Sopenharmony_ci *	queues so marked.
29008c2ecf20Sopenharmony_ci */
29018c2ecf20Sopenharmony_cistatic void txq_stop_maperr(struct sge_uld_txq *q)
29028c2ecf20Sopenharmony_ci{
29038c2ecf20Sopenharmony_ci	q->mapping_err++;
29048c2ecf20Sopenharmony_ci	q->q.stops++;
29058c2ecf20Sopenharmony_ci	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
29068c2ecf20Sopenharmony_ci		q->adap->sge.txq_maperr);
29078c2ecf20Sopenharmony_ci}
29088c2ecf20Sopenharmony_ci
29098c2ecf20Sopenharmony_ci/**
29108c2ecf20Sopenharmony_ci *	ofldtxq_stop - stop an offload Tx queue that has become full
29118c2ecf20Sopenharmony_ci *	@q: the queue to stop
29128c2ecf20Sopenharmony_ci *	@wr: the Work Request causing the queue to become full
29138c2ecf20Sopenharmony_ci *
29148c2ecf20Sopenharmony_ci *	Stops an offload Tx queue that has become full and modifies the packet
29158c2ecf20Sopenharmony_ci *	being written to request a wakeup.
29168c2ecf20Sopenharmony_ci */
29178c2ecf20Sopenharmony_cistatic void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr)
29188c2ecf20Sopenharmony_ci{
29198c2ecf20Sopenharmony_ci	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
29208c2ecf20Sopenharmony_ci	q->q.stops++;
29218c2ecf20Sopenharmony_ci	q->full = 1;
29228c2ecf20Sopenharmony_ci}
29238c2ecf20Sopenharmony_ci
29248c2ecf20Sopenharmony_ci/**
29258c2ecf20Sopenharmony_ci *	service_ofldq - service/restart a suspended offload queue
29268c2ecf20Sopenharmony_ci *	@q: the offload queue
29278c2ecf20Sopenharmony_ci *
29288c2ecf20Sopenharmony_ci *	Services an offload Tx queue by moving packets from its Pending Send
29298c2ecf20Sopenharmony_ci *	Queue to the Hardware TX ring.  The function starts and ends with the
29308c2ecf20Sopenharmony_ci *	Send Queue locked, but drops the lock while putting the skb at the
29318c2ecf20Sopenharmony_ci *	head of the Send Queue onto the Hardware TX Ring.  Dropping the lock
29328c2ecf20Sopenharmony_ci *	allows more skbs to be added to the Send Queue by other threads.
29338c2ecf20Sopenharmony_ci *	The packet being processed at the head of the Pending Send Queue is
29348c2ecf20Sopenharmony_ci *	left on the queue in case we experience DMA Mapping errors, etc.
29358c2ecf20Sopenharmony_ci *	and need to give up and restart later.
29368c2ecf20Sopenharmony_ci *
29378c2ecf20Sopenharmony_ci *	service_ofldq() can be thought of as a task which opportunistically
29388c2ecf20Sopenharmony_ci *	uses other threads execution contexts.  We use the Offload Queue
29398c2ecf20Sopenharmony_ci *	boolean "service_ofldq_running" to make sure that only one instance
29408c2ecf20Sopenharmony_ci *	is ever running at a time ...
29418c2ecf20Sopenharmony_ci */
29428c2ecf20Sopenharmony_cistatic void service_ofldq(struct sge_uld_txq *q)
29438c2ecf20Sopenharmony_ci	__must_hold(&q->sendq.lock)
29448c2ecf20Sopenharmony_ci{
29458c2ecf20Sopenharmony_ci	u64 *pos, *before, *end;
29468c2ecf20Sopenharmony_ci	int credits;
29478c2ecf20Sopenharmony_ci	struct sk_buff *skb;
29488c2ecf20Sopenharmony_ci	struct sge_txq *txq;
29498c2ecf20Sopenharmony_ci	unsigned int left;
29508c2ecf20Sopenharmony_ci	unsigned int written = 0;
29518c2ecf20Sopenharmony_ci	unsigned int flits, ndesc;
29528c2ecf20Sopenharmony_ci
29538c2ecf20Sopenharmony_ci	/* If another thread is currently in service_ofldq() processing the
29548c2ecf20Sopenharmony_ci	 * Pending Send Queue then there's nothing to do. Otherwise, flag
29558c2ecf20Sopenharmony_ci	 * that we're doing the work and continue.  Examining/modifying
29568c2ecf20Sopenharmony_ci	 * the Offload Queue boolean "service_ofldq_running" must be done
29578c2ecf20Sopenharmony_ci	 * while holding the Pending Send Queue Lock.
29588c2ecf20Sopenharmony_ci	 */
29598c2ecf20Sopenharmony_ci	if (q->service_ofldq_running)
29608c2ecf20Sopenharmony_ci		return;
29618c2ecf20Sopenharmony_ci	q->service_ofldq_running = true;
29628c2ecf20Sopenharmony_ci
29638c2ecf20Sopenharmony_ci	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
29648c2ecf20Sopenharmony_ci		/* We drop the lock while we're working with the skb at the
29658c2ecf20Sopenharmony_ci		 * head of the Pending Send Queue.  This allows more skbs to
29668c2ecf20Sopenharmony_ci		 * be added to the Pending Send Queue while we're working on
29678c2ecf20Sopenharmony_ci		 * this one.  We don't need to lock to guard the TX Ring
29688c2ecf20Sopenharmony_ci		 * updates because only one thread of execution is ever
29698c2ecf20Sopenharmony_ci		 * allowed into service_ofldq() at a time.
29708c2ecf20Sopenharmony_ci		 */
29718c2ecf20Sopenharmony_ci		spin_unlock(&q->sendq.lock);
29728c2ecf20Sopenharmony_ci
29738c2ecf20Sopenharmony_ci		cxgb4_reclaim_completed_tx(q->adap, &q->q, false);
29748c2ecf20Sopenharmony_ci
29758c2ecf20Sopenharmony_ci		flits = skb->priority;                /* previously saved */
29768c2ecf20Sopenharmony_ci		ndesc = flits_to_desc(flits);
29778c2ecf20Sopenharmony_ci		credits = txq_avail(&q->q) - ndesc;
29788c2ecf20Sopenharmony_ci		BUG_ON(credits < 0);
29798c2ecf20Sopenharmony_ci		if (unlikely(credits < TXQ_STOP_THRES))
29808c2ecf20Sopenharmony_ci			ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data);
29818c2ecf20Sopenharmony_ci
29828c2ecf20Sopenharmony_ci		pos = (u64 *)&q->q.desc[q->q.pidx];
29838c2ecf20Sopenharmony_ci		if (is_ofld_imm(skb))
29848c2ecf20Sopenharmony_ci			cxgb4_inline_tx_skb(skb, &q->q, pos);
29858c2ecf20Sopenharmony_ci		else if (cxgb4_map_skb(q->adap->pdev_dev, skb,
29868c2ecf20Sopenharmony_ci				       (dma_addr_t *)skb->head)) {
29878c2ecf20Sopenharmony_ci			txq_stop_maperr(q);
29888c2ecf20Sopenharmony_ci			spin_lock(&q->sendq.lock);
29898c2ecf20Sopenharmony_ci			break;
29908c2ecf20Sopenharmony_ci		} else {
29918c2ecf20Sopenharmony_ci			int last_desc, hdr_len = skb_transport_offset(skb);
29928c2ecf20Sopenharmony_ci
29938c2ecf20Sopenharmony_ci			/* The WR headers  may not fit within one descriptor.
29948c2ecf20Sopenharmony_ci			 * So we need to deal with wrap-around here.
29958c2ecf20Sopenharmony_ci			 */
29968c2ecf20Sopenharmony_ci			before = (u64 *)pos;
29978c2ecf20Sopenharmony_ci			end = (u64 *)pos + flits;
29988c2ecf20Sopenharmony_ci			txq = &q->q;
29998c2ecf20Sopenharmony_ci			pos = (void *)inline_tx_skb_header(skb, &q->q,
30008c2ecf20Sopenharmony_ci							   (void *)pos,
30018c2ecf20Sopenharmony_ci							   hdr_len);
30028c2ecf20Sopenharmony_ci			if (before > (u64 *)pos) {
30038c2ecf20Sopenharmony_ci				left = (u8 *)end - (u8 *)txq->stat;
30048c2ecf20Sopenharmony_ci				end = (void *)txq->desc + left;
30058c2ecf20Sopenharmony_ci			}
30068c2ecf20Sopenharmony_ci
30078c2ecf20Sopenharmony_ci			/* If current position is already at the end of the
30088c2ecf20Sopenharmony_ci			 * ofld queue, reset the current to point to
30098c2ecf20Sopenharmony_ci			 * start of the queue and update the end ptr as well.
30108c2ecf20Sopenharmony_ci			 */
30118c2ecf20Sopenharmony_ci			if (pos == (u64 *)txq->stat) {
30128c2ecf20Sopenharmony_ci				left = (u8 *)end - (u8 *)txq->stat;
30138c2ecf20Sopenharmony_ci				end = (void *)txq->desc + left;
30148c2ecf20Sopenharmony_ci				pos = (void *)txq->desc;
30158c2ecf20Sopenharmony_ci			}
30168c2ecf20Sopenharmony_ci
30178c2ecf20Sopenharmony_ci			cxgb4_write_sgl(skb, &q->q, (void *)pos,
30188c2ecf20Sopenharmony_ci					end, hdr_len,
30198c2ecf20Sopenharmony_ci					(dma_addr_t *)skb->head);
30208c2ecf20Sopenharmony_ci#ifdef CONFIG_NEED_DMA_MAP_STATE
30218c2ecf20Sopenharmony_ci			skb->dev = q->adap->port[0];
30228c2ecf20Sopenharmony_ci			skb->destructor = deferred_unmap_destructor;
30238c2ecf20Sopenharmony_ci#endif
30248c2ecf20Sopenharmony_ci			last_desc = q->q.pidx + ndesc - 1;
30258c2ecf20Sopenharmony_ci			if (last_desc >= q->q.size)
30268c2ecf20Sopenharmony_ci				last_desc -= q->q.size;
30278c2ecf20Sopenharmony_ci			q->q.sdesc[last_desc].skb = skb;
30288c2ecf20Sopenharmony_ci		}
30298c2ecf20Sopenharmony_ci
30308c2ecf20Sopenharmony_ci		txq_advance(&q->q, ndesc);
30318c2ecf20Sopenharmony_ci		written += ndesc;
30328c2ecf20Sopenharmony_ci		if (unlikely(written > 32)) {
30338c2ecf20Sopenharmony_ci			cxgb4_ring_tx_db(q->adap, &q->q, written);
30348c2ecf20Sopenharmony_ci			written = 0;
30358c2ecf20Sopenharmony_ci		}
30368c2ecf20Sopenharmony_ci
30378c2ecf20Sopenharmony_ci		/* Reacquire the Pending Send Queue Lock so we can unlink the
30388c2ecf20Sopenharmony_ci		 * skb we've just successfully transferred to the TX Ring and
30398c2ecf20Sopenharmony_ci		 * loop for the next skb which may be at the head of the
30408c2ecf20Sopenharmony_ci		 * Pending Send Queue.
30418c2ecf20Sopenharmony_ci		 */
30428c2ecf20Sopenharmony_ci		spin_lock(&q->sendq.lock);
30438c2ecf20Sopenharmony_ci		__skb_unlink(skb, &q->sendq);
30448c2ecf20Sopenharmony_ci		if (is_ofld_imm(skb))
30458c2ecf20Sopenharmony_ci			kfree_skb(skb);
30468c2ecf20Sopenharmony_ci	}
30478c2ecf20Sopenharmony_ci	if (likely(written))
30488c2ecf20Sopenharmony_ci		cxgb4_ring_tx_db(q->adap, &q->q, written);
30498c2ecf20Sopenharmony_ci
30508c2ecf20Sopenharmony_ci	/*Indicate that no thread is processing the Pending Send Queue
30518c2ecf20Sopenharmony_ci	 * currently.
30528c2ecf20Sopenharmony_ci	 */
30538c2ecf20Sopenharmony_ci	q->service_ofldq_running = false;
30548c2ecf20Sopenharmony_ci}
30558c2ecf20Sopenharmony_ci
30568c2ecf20Sopenharmony_ci/**
30578c2ecf20Sopenharmony_ci *	ofld_xmit - send a packet through an offload queue
30588c2ecf20Sopenharmony_ci *	@q: the Tx offload queue
30598c2ecf20Sopenharmony_ci *	@skb: the packet
30608c2ecf20Sopenharmony_ci *
30618c2ecf20Sopenharmony_ci *	Send an offload packet through an SGE offload queue.
30628c2ecf20Sopenharmony_ci */
30638c2ecf20Sopenharmony_cistatic int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
30648c2ecf20Sopenharmony_ci{
30658c2ecf20Sopenharmony_ci	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
30668c2ecf20Sopenharmony_ci	spin_lock(&q->sendq.lock);
30678c2ecf20Sopenharmony_ci
30688c2ecf20Sopenharmony_ci	/* Queue the new skb onto the Offload Queue's Pending Send Queue.  If
30698c2ecf20Sopenharmony_ci	 * that results in this new skb being the only one on the queue, start
30708c2ecf20Sopenharmony_ci	 * servicing it.  If there are other skbs already on the list, then
30718c2ecf20Sopenharmony_ci	 * either the queue is currently being processed or it's been stopped
30728c2ecf20Sopenharmony_ci	 * for some reason and it'll be restarted at a later time.  Restart
30738c2ecf20Sopenharmony_ci	 * paths are triggered by events like experiencing a DMA Mapping Error
30748c2ecf20Sopenharmony_ci	 * or filling the Hardware TX Ring.
30758c2ecf20Sopenharmony_ci	 */
30768c2ecf20Sopenharmony_ci	__skb_queue_tail(&q->sendq, skb);
30778c2ecf20Sopenharmony_ci	if (q->sendq.qlen == 1)
30788c2ecf20Sopenharmony_ci		service_ofldq(q);
30798c2ecf20Sopenharmony_ci
30808c2ecf20Sopenharmony_ci	spin_unlock(&q->sendq.lock);
30818c2ecf20Sopenharmony_ci	return NET_XMIT_SUCCESS;
30828c2ecf20Sopenharmony_ci}
30838c2ecf20Sopenharmony_ci
30848c2ecf20Sopenharmony_ci/**
30858c2ecf20Sopenharmony_ci *	restart_ofldq - restart a suspended offload queue
30868c2ecf20Sopenharmony_ci *	@t: pointer to the tasklet associated with this handler
30878c2ecf20Sopenharmony_ci *
30888c2ecf20Sopenharmony_ci *	Resumes transmission on a suspended Tx offload queue.
30898c2ecf20Sopenharmony_ci */
30908c2ecf20Sopenharmony_cistatic void restart_ofldq(struct tasklet_struct *t)
30918c2ecf20Sopenharmony_ci{
30928c2ecf20Sopenharmony_ci	struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
30938c2ecf20Sopenharmony_ci
30948c2ecf20Sopenharmony_ci	spin_lock(&q->sendq.lock);
30958c2ecf20Sopenharmony_ci	q->full = 0;            /* the queue actually is completely empty now */
30968c2ecf20Sopenharmony_ci	service_ofldq(q);
30978c2ecf20Sopenharmony_ci	spin_unlock(&q->sendq.lock);
30988c2ecf20Sopenharmony_ci}
30998c2ecf20Sopenharmony_ci
31008c2ecf20Sopenharmony_ci/**
31018c2ecf20Sopenharmony_ci *	skb_txq - return the Tx queue an offload packet should use
31028c2ecf20Sopenharmony_ci *	@skb: the packet
31038c2ecf20Sopenharmony_ci *
31048c2ecf20Sopenharmony_ci *	Returns the Tx queue an offload packet should use as indicated by bits
31058c2ecf20Sopenharmony_ci *	1-15 in the packet's queue_mapping.
31068c2ecf20Sopenharmony_ci */
31078c2ecf20Sopenharmony_cistatic inline unsigned int skb_txq(const struct sk_buff *skb)
31088c2ecf20Sopenharmony_ci{
31098c2ecf20Sopenharmony_ci	return skb->queue_mapping >> 1;
31108c2ecf20Sopenharmony_ci}
31118c2ecf20Sopenharmony_ci
31128c2ecf20Sopenharmony_ci/**
31138c2ecf20Sopenharmony_ci *	is_ctrl_pkt - return whether an offload packet is a control packet
31148c2ecf20Sopenharmony_ci *	@skb: the packet
31158c2ecf20Sopenharmony_ci *
31168c2ecf20Sopenharmony_ci *	Returns whether an offload packet should use an OFLD or a CTRL
31178c2ecf20Sopenharmony_ci *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
31188c2ecf20Sopenharmony_ci */
31198c2ecf20Sopenharmony_cistatic inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
31208c2ecf20Sopenharmony_ci{
31218c2ecf20Sopenharmony_ci	return skb->queue_mapping & 1;
31228c2ecf20Sopenharmony_ci}
31238c2ecf20Sopenharmony_ci
31248c2ecf20Sopenharmony_cistatic inline int uld_send(struct adapter *adap, struct sk_buff *skb,
31258c2ecf20Sopenharmony_ci			   unsigned int tx_uld_type)
31268c2ecf20Sopenharmony_ci{
31278c2ecf20Sopenharmony_ci	struct sge_uld_txq_info *txq_info;
31288c2ecf20Sopenharmony_ci	struct sge_uld_txq *txq;
31298c2ecf20Sopenharmony_ci	unsigned int idx = skb_txq(skb);
31308c2ecf20Sopenharmony_ci
31318c2ecf20Sopenharmony_ci	if (unlikely(is_ctrl_pkt(skb))) {
31328c2ecf20Sopenharmony_ci		/* Single ctrl queue is a requirement for LE workaround path */
31338c2ecf20Sopenharmony_ci		if (adap->tids.nsftids)
31348c2ecf20Sopenharmony_ci			idx = 0;
31358c2ecf20Sopenharmony_ci		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
31368c2ecf20Sopenharmony_ci	}
31378c2ecf20Sopenharmony_ci
31388c2ecf20Sopenharmony_ci	txq_info = adap->sge.uld_txq_info[tx_uld_type];
31398c2ecf20Sopenharmony_ci	if (unlikely(!txq_info)) {
31408c2ecf20Sopenharmony_ci		WARN_ON(true);
31418c2ecf20Sopenharmony_ci		kfree_skb(skb);
31428c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
31438c2ecf20Sopenharmony_ci	}
31448c2ecf20Sopenharmony_ci
31458c2ecf20Sopenharmony_ci	txq = &txq_info->uldtxq[idx];
31468c2ecf20Sopenharmony_ci	return ofld_xmit(txq, skb);
31478c2ecf20Sopenharmony_ci}
31488c2ecf20Sopenharmony_ci
31498c2ecf20Sopenharmony_ci/**
31508c2ecf20Sopenharmony_ci *	t4_ofld_send - send an offload packet
31518c2ecf20Sopenharmony_ci *	@adap: the adapter
31528c2ecf20Sopenharmony_ci *	@skb: the packet
31538c2ecf20Sopenharmony_ci *
31548c2ecf20Sopenharmony_ci *	Sends an offload packet.  We use the packet queue_mapping to select the
31558c2ecf20Sopenharmony_ci *	appropriate Tx queue as follows: bit 0 indicates whether the packet
31568c2ecf20Sopenharmony_ci *	should be sent as regular or control, bits 1-15 select the queue.
31578c2ecf20Sopenharmony_ci */
31588c2ecf20Sopenharmony_ciint t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
31598c2ecf20Sopenharmony_ci{
31608c2ecf20Sopenharmony_ci	int ret;
31618c2ecf20Sopenharmony_ci
31628c2ecf20Sopenharmony_ci	local_bh_disable();
31638c2ecf20Sopenharmony_ci	ret = uld_send(adap, skb, CXGB4_TX_OFLD);
31648c2ecf20Sopenharmony_ci	local_bh_enable();
31658c2ecf20Sopenharmony_ci	return ret;
31668c2ecf20Sopenharmony_ci}
31678c2ecf20Sopenharmony_ci
31688c2ecf20Sopenharmony_ci/**
31698c2ecf20Sopenharmony_ci *	cxgb4_ofld_send - send an offload packet
31708c2ecf20Sopenharmony_ci *	@dev: the net device
31718c2ecf20Sopenharmony_ci *	@skb: the packet
31728c2ecf20Sopenharmony_ci *
31738c2ecf20Sopenharmony_ci *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
31748c2ecf20Sopenharmony_ci *	intended for ULDs.
31758c2ecf20Sopenharmony_ci */
31768c2ecf20Sopenharmony_ciint cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
31778c2ecf20Sopenharmony_ci{
31788c2ecf20Sopenharmony_ci	return t4_ofld_send(netdev2adap(dev), skb);
31798c2ecf20Sopenharmony_ci}
31808c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_ofld_send);
31818c2ecf20Sopenharmony_ci
31828c2ecf20Sopenharmony_cistatic void *inline_tx_header(const void *src,
31838c2ecf20Sopenharmony_ci			      const struct sge_txq *q,
31848c2ecf20Sopenharmony_ci			      void *pos, int length)
31858c2ecf20Sopenharmony_ci{
31868c2ecf20Sopenharmony_ci	int left = (void *)q->stat - pos;
31878c2ecf20Sopenharmony_ci	u64 *p;
31888c2ecf20Sopenharmony_ci
31898c2ecf20Sopenharmony_ci	if (likely(length <= left)) {
31908c2ecf20Sopenharmony_ci		memcpy(pos, src, length);
31918c2ecf20Sopenharmony_ci		pos += length;
31928c2ecf20Sopenharmony_ci	} else {
31938c2ecf20Sopenharmony_ci		memcpy(pos, src, left);
31948c2ecf20Sopenharmony_ci		memcpy(q->desc, src + left, length - left);
31958c2ecf20Sopenharmony_ci		pos = (void *)q->desc + (length - left);
31968c2ecf20Sopenharmony_ci	}
31978c2ecf20Sopenharmony_ci	/* 0-pad to multiple of 16 */
31988c2ecf20Sopenharmony_ci	p = PTR_ALIGN(pos, 8);
31998c2ecf20Sopenharmony_ci	if ((uintptr_t)p & 8) {
32008c2ecf20Sopenharmony_ci		*p = 0;
32018c2ecf20Sopenharmony_ci		return p + 1;
32028c2ecf20Sopenharmony_ci	}
32038c2ecf20Sopenharmony_ci	return p;
32048c2ecf20Sopenharmony_ci}
32058c2ecf20Sopenharmony_ci
32068c2ecf20Sopenharmony_ci/**
32078c2ecf20Sopenharmony_ci *      ofld_xmit_direct - copy a WR into offload queue
32088c2ecf20Sopenharmony_ci *      @q: the Tx offload queue
32098c2ecf20Sopenharmony_ci *      @src: location of WR
32108c2ecf20Sopenharmony_ci *      @len: WR length
32118c2ecf20Sopenharmony_ci *
32128c2ecf20Sopenharmony_ci *      Copy an immediate WR into an uncontended SGE offload queue.
32138c2ecf20Sopenharmony_ci */
32148c2ecf20Sopenharmony_cistatic int ofld_xmit_direct(struct sge_uld_txq *q, const void *src,
32158c2ecf20Sopenharmony_ci			    unsigned int len)
32168c2ecf20Sopenharmony_ci{
32178c2ecf20Sopenharmony_ci	unsigned int ndesc;
32188c2ecf20Sopenharmony_ci	int credits;
32198c2ecf20Sopenharmony_ci	u64 *pos;
32208c2ecf20Sopenharmony_ci
32218c2ecf20Sopenharmony_ci	/* Use the lower limit as the cut-off */
32228c2ecf20Sopenharmony_ci	if (len > MAX_IMM_OFLD_TX_DATA_WR_LEN) {
32238c2ecf20Sopenharmony_ci		WARN_ON(1);
32248c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
32258c2ecf20Sopenharmony_ci	}
32268c2ecf20Sopenharmony_ci
32278c2ecf20Sopenharmony_ci	/* Don't return NET_XMIT_CN here as the current
32288c2ecf20Sopenharmony_ci	 * implementation doesn't queue the request
32298c2ecf20Sopenharmony_ci	 * using an skb when the following conditions not met
32308c2ecf20Sopenharmony_ci	 */
32318c2ecf20Sopenharmony_ci	if (!spin_trylock(&q->sendq.lock))
32328c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
32338c2ecf20Sopenharmony_ci
32348c2ecf20Sopenharmony_ci	if (q->full || !skb_queue_empty(&q->sendq) ||
32358c2ecf20Sopenharmony_ci	    q->service_ofldq_running) {
32368c2ecf20Sopenharmony_ci		spin_unlock(&q->sendq.lock);
32378c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
32388c2ecf20Sopenharmony_ci	}
32398c2ecf20Sopenharmony_ci	ndesc = flits_to_desc(DIV_ROUND_UP(len, 8));
32408c2ecf20Sopenharmony_ci	credits = txq_avail(&q->q) - ndesc;
32418c2ecf20Sopenharmony_ci	pos = (u64 *)&q->q.desc[q->q.pidx];
32428c2ecf20Sopenharmony_ci
32438c2ecf20Sopenharmony_ci	/* ofldtxq_stop modifies WR header in-situ */
32448c2ecf20Sopenharmony_ci	inline_tx_header(src, &q->q, pos, len);
32458c2ecf20Sopenharmony_ci	if (unlikely(credits < TXQ_STOP_THRES))
32468c2ecf20Sopenharmony_ci		ofldtxq_stop(q, (struct fw_wr_hdr *)pos);
32478c2ecf20Sopenharmony_ci	txq_advance(&q->q, ndesc);
32488c2ecf20Sopenharmony_ci	cxgb4_ring_tx_db(q->adap, &q->q, ndesc);
32498c2ecf20Sopenharmony_ci
32508c2ecf20Sopenharmony_ci	spin_unlock(&q->sendq.lock);
32518c2ecf20Sopenharmony_ci	return NET_XMIT_SUCCESS;
32528c2ecf20Sopenharmony_ci}
32538c2ecf20Sopenharmony_ci
32548c2ecf20Sopenharmony_ciint cxgb4_immdata_send(struct net_device *dev, unsigned int idx,
32558c2ecf20Sopenharmony_ci		       const void *src, unsigned int len)
32568c2ecf20Sopenharmony_ci{
32578c2ecf20Sopenharmony_ci	struct sge_uld_txq_info *txq_info;
32588c2ecf20Sopenharmony_ci	struct sge_uld_txq *txq;
32598c2ecf20Sopenharmony_ci	struct adapter *adap;
32608c2ecf20Sopenharmony_ci	int ret;
32618c2ecf20Sopenharmony_ci
32628c2ecf20Sopenharmony_ci	adap = netdev2adap(dev);
32638c2ecf20Sopenharmony_ci
32648c2ecf20Sopenharmony_ci	local_bh_disable();
32658c2ecf20Sopenharmony_ci	txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
32668c2ecf20Sopenharmony_ci	if (unlikely(!txq_info)) {
32678c2ecf20Sopenharmony_ci		WARN_ON(true);
32688c2ecf20Sopenharmony_ci		local_bh_enable();
32698c2ecf20Sopenharmony_ci		return NET_XMIT_DROP;
32708c2ecf20Sopenharmony_ci	}
32718c2ecf20Sopenharmony_ci	txq = &txq_info->uldtxq[idx];
32728c2ecf20Sopenharmony_ci
32738c2ecf20Sopenharmony_ci	ret = ofld_xmit_direct(txq, src, len);
32748c2ecf20Sopenharmony_ci	local_bh_enable();
32758c2ecf20Sopenharmony_ci	return net_xmit_eval(ret);
32768c2ecf20Sopenharmony_ci}
32778c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_immdata_send);
32788c2ecf20Sopenharmony_ci
32798c2ecf20Sopenharmony_ci/**
32808c2ecf20Sopenharmony_ci *	t4_crypto_send - send crypto packet
32818c2ecf20Sopenharmony_ci *	@adap: the adapter
32828c2ecf20Sopenharmony_ci *	@skb: the packet
32838c2ecf20Sopenharmony_ci *
32848c2ecf20Sopenharmony_ci *	Sends crypto packet.  We use the packet queue_mapping to select the
32858c2ecf20Sopenharmony_ci *	appropriate Tx queue as follows: bit 0 indicates whether the packet
32868c2ecf20Sopenharmony_ci *	should be sent as regular or control, bits 1-15 select the queue.
32878c2ecf20Sopenharmony_ci */
32888c2ecf20Sopenharmony_cistatic int t4_crypto_send(struct adapter *adap, struct sk_buff *skb)
32898c2ecf20Sopenharmony_ci{
32908c2ecf20Sopenharmony_ci	int ret;
32918c2ecf20Sopenharmony_ci
32928c2ecf20Sopenharmony_ci	local_bh_disable();
32938c2ecf20Sopenharmony_ci	ret = uld_send(adap, skb, CXGB4_TX_CRYPTO);
32948c2ecf20Sopenharmony_ci	local_bh_enable();
32958c2ecf20Sopenharmony_ci	return ret;
32968c2ecf20Sopenharmony_ci}
32978c2ecf20Sopenharmony_ci
32988c2ecf20Sopenharmony_ci/**
32998c2ecf20Sopenharmony_ci *	cxgb4_crypto_send - send crypto packet
33008c2ecf20Sopenharmony_ci *	@dev: the net device
33018c2ecf20Sopenharmony_ci *	@skb: the packet
33028c2ecf20Sopenharmony_ci *
33038c2ecf20Sopenharmony_ci *	Sends crypto packet.  This is an exported version of @t4_crypto_send,
33048c2ecf20Sopenharmony_ci *	intended for ULDs.
33058c2ecf20Sopenharmony_ci */
33068c2ecf20Sopenharmony_ciint cxgb4_crypto_send(struct net_device *dev, struct sk_buff *skb)
33078c2ecf20Sopenharmony_ci{
33088c2ecf20Sopenharmony_ci	return t4_crypto_send(netdev2adap(dev), skb);
33098c2ecf20Sopenharmony_ci}
33108c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_crypto_send);
33118c2ecf20Sopenharmony_ci
33128c2ecf20Sopenharmony_cistatic inline void copy_frags(struct sk_buff *skb,
33138c2ecf20Sopenharmony_ci			      const struct pkt_gl *gl, unsigned int offset)
33148c2ecf20Sopenharmony_ci{
33158c2ecf20Sopenharmony_ci	int i;
33168c2ecf20Sopenharmony_ci
33178c2ecf20Sopenharmony_ci	/* usually there's just one frag */
33188c2ecf20Sopenharmony_ci	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
33198c2ecf20Sopenharmony_ci			     gl->frags[0].offset + offset,
33208c2ecf20Sopenharmony_ci			     gl->frags[0].size - offset);
33218c2ecf20Sopenharmony_ci	skb_shinfo(skb)->nr_frags = gl->nfrags;
33228c2ecf20Sopenharmony_ci	for (i = 1; i < gl->nfrags; i++)
33238c2ecf20Sopenharmony_ci		__skb_fill_page_desc(skb, i, gl->frags[i].page,
33248c2ecf20Sopenharmony_ci				     gl->frags[i].offset,
33258c2ecf20Sopenharmony_ci				     gl->frags[i].size);
33268c2ecf20Sopenharmony_ci
33278c2ecf20Sopenharmony_ci	/* get a reference to the last page, we don't own it */
33288c2ecf20Sopenharmony_ci	get_page(gl->frags[gl->nfrags - 1].page);
33298c2ecf20Sopenharmony_ci}
33308c2ecf20Sopenharmony_ci
33318c2ecf20Sopenharmony_ci/**
33328c2ecf20Sopenharmony_ci *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
33338c2ecf20Sopenharmony_ci *	@gl: the gather list
33348c2ecf20Sopenharmony_ci *	@skb_len: size of sk_buff main body if it carries fragments
33358c2ecf20Sopenharmony_ci *	@pull_len: amount of data to move to the sk_buff's main body
33368c2ecf20Sopenharmony_ci *
33378c2ecf20Sopenharmony_ci *	Builds an sk_buff from the given packet gather list.  Returns the
33388c2ecf20Sopenharmony_ci *	sk_buff or %NULL if sk_buff allocation failed.
33398c2ecf20Sopenharmony_ci */
33408c2ecf20Sopenharmony_cistruct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
33418c2ecf20Sopenharmony_ci				   unsigned int skb_len, unsigned int pull_len)
33428c2ecf20Sopenharmony_ci{
33438c2ecf20Sopenharmony_ci	struct sk_buff *skb;
33448c2ecf20Sopenharmony_ci
33458c2ecf20Sopenharmony_ci	/*
33468c2ecf20Sopenharmony_ci	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
33478c2ecf20Sopenharmony_ci	 * size, which is expected since buffers are at least PAGE_SIZEd.
33488c2ecf20Sopenharmony_ci	 * In this case packets up to RX_COPY_THRES have only one fragment.
33498c2ecf20Sopenharmony_ci	 */
33508c2ecf20Sopenharmony_ci	if (gl->tot_len <= RX_COPY_THRES) {
33518c2ecf20Sopenharmony_ci		skb = dev_alloc_skb(gl->tot_len);
33528c2ecf20Sopenharmony_ci		if (unlikely(!skb))
33538c2ecf20Sopenharmony_ci			goto out;
33548c2ecf20Sopenharmony_ci		__skb_put(skb, gl->tot_len);
33558c2ecf20Sopenharmony_ci		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
33568c2ecf20Sopenharmony_ci	} else {
33578c2ecf20Sopenharmony_ci		skb = dev_alloc_skb(skb_len);
33588c2ecf20Sopenharmony_ci		if (unlikely(!skb))
33598c2ecf20Sopenharmony_ci			goto out;
33608c2ecf20Sopenharmony_ci		__skb_put(skb, pull_len);
33618c2ecf20Sopenharmony_ci		skb_copy_to_linear_data(skb, gl->va, pull_len);
33628c2ecf20Sopenharmony_ci
33638c2ecf20Sopenharmony_ci		copy_frags(skb, gl, pull_len);
33648c2ecf20Sopenharmony_ci		skb->len = gl->tot_len;
33658c2ecf20Sopenharmony_ci		skb->data_len = skb->len - pull_len;
33668c2ecf20Sopenharmony_ci		skb->truesize += skb->data_len;
33678c2ecf20Sopenharmony_ci	}
33688c2ecf20Sopenharmony_ciout:	return skb;
33698c2ecf20Sopenharmony_ci}
33708c2ecf20Sopenharmony_ciEXPORT_SYMBOL(cxgb4_pktgl_to_skb);
33718c2ecf20Sopenharmony_ci
33728c2ecf20Sopenharmony_ci/**
33738c2ecf20Sopenharmony_ci *	t4_pktgl_free - free a packet gather list
33748c2ecf20Sopenharmony_ci *	@gl: the gather list
33758c2ecf20Sopenharmony_ci *
33768c2ecf20Sopenharmony_ci *	Releases the pages of a packet gather list.  We do not own the last
33778c2ecf20Sopenharmony_ci *	page on the list and do not free it.
33788c2ecf20Sopenharmony_ci */
33798c2ecf20Sopenharmony_cistatic void t4_pktgl_free(const struct pkt_gl *gl)
33808c2ecf20Sopenharmony_ci{
33818c2ecf20Sopenharmony_ci	int n;
33828c2ecf20Sopenharmony_ci	const struct page_frag *p;
33838c2ecf20Sopenharmony_ci
33848c2ecf20Sopenharmony_ci	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
33858c2ecf20Sopenharmony_ci		put_page(p->page);
33868c2ecf20Sopenharmony_ci}
33878c2ecf20Sopenharmony_ci
33888c2ecf20Sopenharmony_ci/*
33898c2ecf20Sopenharmony_ci * Process an MPS trace packet.  Give it an unused protocol number so it won't
33908c2ecf20Sopenharmony_ci * be delivered to anyone and send it to the stack for capture.
33918c2ecf20Sopenharmony_ci */
33928c2ecf20Sopenharmony_cistatic noinline int handle_trace_pkt(struct adapter *adap,
33938c2ecf20Sopenharmony_ci				     const struct pkt_gl *gl)
33948c2ecf20Sopenharmony_ci{
33958c2ecf20Sopenharmony_ci	struct sk_buff *skb;
33968c2ecf20Sopenharmony_ci
33978c2ecf20Sopenharmony_ci	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
33988c2ecf20Sopenharmony_ci	if (unlikely(!skb)) {
33998c2ecf20Sopenharmony_ci		t4_pktgl_free(gl);
34008c2ecf20Sopenharmony_ci		return 0;
34018c2ecf20Sopenharmony_ci	}
34028c2ecf20Sopenharmony_ci
34038c2ecf20Sopenharmony_ci	if (is_t4(adap->params.chip))
34048c2ecf20Sopenharmony_ci		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
34058c2ecf20Sopenharmony_ci	else
34068c2ecf20Sopenharmony_ci		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
34078c2ecf20Sopenharmony_ci
34088c2ecf20Sopenharmony_ci	skb_reset_mac_header(skb);
34098c2ecf20Sopenharmony_ci	skb->protocol = htons(0xffff);
34108c2ecf20Sopenharmony_ci	skb->dev = adap->port[0];
34118c2ecf20Sopenharmony_ci	netif_receive_skb(skb);
34128c2ecf20Sopenharmony_ci	return 0;
34138c2ecf20Sopenharmony_ci}
34148c2ecf20Sopenharmony_ci
34158c2ecf20Sopenharmony_ci/**
34168c2ecf20Sopenharmony_ci * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
34178c2ecf20Sopenharmony_ci * @adap: the adapter
34188c2ecf20Sopenharmony_ci * @hwtstamps: time stamp structure to update
34198c2ecf20Sopenharmony_ci * @sgetstamp: 60bit iqe timestamp
34208c2ecf20Sopenharmony_ci *
34218c2ecf20Sopenharmony_ci * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
34228c2ecf20Sopenharmony_ci * which is in Core Clock ticks into ktime_t and assign it
34238c2ecf20Sopenharmony_ci **/
34248c2ecf20Sopenharmony_cistatic void cxgb4_sgetim_to_hwtstamp(struct adapter *adap,
34258c2ecf20Sopenharmony_ci				     struct skb_shared_hwtstamps *hwtstamps,
34268c2ecf20Sopenharmony_ci				     u64 sgetstamp)
34278c2ecf20Sopenharmony_ci{
34288c2ecf20Sopenharmony_ci	u64 ns;
34298c2ecf20Sopenharmony_ci	u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2);
34308c2ecf20Sopenharmony_ci
34318c2ecf20Sopenharmony_ci	ns = div_u64(tmp, adap->params.vpd.cclk);
34328c2ecf20Sopenharmony_ci
34338c2ecf20Sopenharmony_ci	memset(hwtstamps, 0, sizeof(*hwtstamps));
34348c2ecf20Sopenharmony_ci	hwtstamps->hwtstamp = ns_to_ktime(ns);
34358c2ecf20Sopenharmony_ci}
34368c2ecf20Sopenharmony_ci
34378c2ecf20Sopenharmony_cistatic void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
34388c2ecf20Sopenharmony_ci		   const struct cpl_rx_pkt *pkt, unsigned long tnl_hdr_len)
34398c2ecf20Sopenharmony_ci{
34408c2ecf20Sopenharmony_ci	struct adapter *adapter = rxq->rspq.adap;
34418c2ecf20Sopenharmony_ci	struct sge *s = &adapter->sge;
34428c2ecf20Sopenharmony_ci	struct port_info *pi;
34438c2ecf20Sopenharmony_ci	int ret;
34448c2ecf20Sopenharmony_ci	struct sk_buff *skb;
34458c2ecf20Sopenharmony_ci
34468c2ecf20Sopenharmony_ci	skb = napi_get_frags(&rxq->rspq.napi);
34478c2ecf20Sopenharmony_ci	if (unlikely(!skb)) {
34488c2ecf20Sopenharmony_ci		t4_pktgl_free(gl);
34498c2ecf20Sopenharmony_ci		rxq->stats.rx_drops++;
34508c2ecf20Sopenharmony_ci		return;
34518c2ecf20Sopenharmony_ci	}
34528c2ecf20Sopenharmony_ci
34538c2ecf20Sopenharmony_ci	copy_frags(skb, gl, s->pktshift);
34548c2ecf20Sopenharmony_ci	if (tnl_hdr_len)
34558c2ecf20Sopenharmony_ci		skb->csum_level = 1;
34568c2ecf20Sopenharmony_ci	skb->len = gl->tot_len - s->pktshift;
34578c2ecf20Sopenharmony_ci	skb->data_len = skb->len;
34588c2ecf20Sopenharmony_ci	skb->truesize += skb->data_len;
34598c2ecf20Sopenharmony_ci	skb->ip_summed = CHECKSUM_UNNECESSARY;
34608c2ecf20Sopenharmony_ci	skb_record_rx_queue(skb, rxq->rspq.idx);
34618c2ecf20Sopenharmony_ci	pi = netdev_priv(skb->dev);
34628c2ecf20Sopenharmony_ci	if (pi->rxtstamp)
34638c2ecf20Sopenharmony_ci		cxgb4_sgetim_to_hwtstamp(adapter, skb_hwtstamps(skb),
34648c2ecf20Sopenharmony_ci					 gl->sgetstamp);
34658c2ecf20Sopenharmony_ci	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
34668c2ecf20Sopenharmony_ci		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
34678c2ecf20Sopenharmony_ci			     PKT_HASH_TYPE_L3);
34688c2ecf20Sopenharmony_ci
34698c2ecf20Sopenharmony_ci	if (unlikely(pkt->vlan_ex)) {
34708c2ecf20Sopenharmony_ci		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
34718c2ecf20Sopenharmony_ci		rxq->stats.vlan_ex++;
34728c2ecf20Sopenharmony_ci	}
34738c2ecf20Sopenharmony_ci	ret = napi_gro_frags(&rxq->rspq.napi);
34748c2ecf20Sopenharmony_ci	if (ret == GRO_HELD)
34758c2ecf20Sopenharmony_ci		rxq->stats.lro_pkts++;
34768c2ecf20Sopenharmony_ci	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
34778c2ecf20Sopenharmony_ci		rxq->stats.lro_merged++;
34788c2ecf20Sopenharmony_ci	rxq->stats.pkts++;
34798c2ecf20Sopenharmony_ci	rxq->stats.rx_cso++;
34808c2ecf20Sopenharmony_ci}
34818c2ecf20Sopenharmony_ci
34828c2ecf20Sopenharmony_cienum {
34838c2ecf20Sopenharmony_ci	RX_NON_PTP_PKT = 0,
34848c2ecf20Sopenharmony_ci	RX_PTP_PKT_SUC = 1,
34858c2ecf20Sopenharmony_ci	RX_PTP_PKT_ERR = 2
34868c2ecf20Sopenharmony_ci};
34878c2ecf20Sopenharmony_ci
34888c2ecf20Sopenharmony_ci/**
34898c2ecf20Sopenharmony_ci *     t4_systim_to_hwstamp - read hardware time stamp
34908c2ecf20Sopenharmony_ci *     @adapter: the adapter
34918c2ecf20Sopenharmony_ci *     @skb: the packet
34928c2ecf20Sopenharmony_ci *
34938c2ecf20Sopenharmony_ci *     Read Time Stamp from MPS packet and insert in skb which
34948c2ecf20Sopenharmony_ci *     is forwarded to PTP application
34958c2ecf20Sopenharmony_ci */
34968c2ecf20Sopenharmony_cistatic noinline int t4_systim_to_hwstamp(struct adapter *adapter,
34978c2ecf20Sopenharmony_ci					 struct sk_buff *skb)
34988c2ecf20Sopenharmony_ci{
34998c2ecf20Sopenharmony_ci	struct skb_shared_hwtstamps *hwtstamps;
35008c2ecf20Sopenharmony_ci	struct cpl_rx_mps_pkt *cpl = NULL;
35018c2ecf20Sopenharmony_ci	unsigned char *data;
35028c2ecf20Sopenharmony_ci	int offset;
35038c2ecf20Sopenharmony_ci
35048c2ecf20Sopenharmony_ci	cpl = (struct cpl_rx_mps_pkt *)skb->data;
35058c2ecf20Sopenharmony_ci	if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) &
35068c2ecf20Sopenharmony_ci	     X_CPL_RX_MPS_PKT_TYPE_PTP))
35078c2ecf20Sopenharmony_ci		return RX_PTP_PKT_ERR;
35088c2ecf20Sopenharmony_ci
35098c2ecf20Sopenharmony_ci	data = skb->data + sizeof(*cpl);
35108c2ecf20Sopenharmony_ci	skb_pull(skb, 2 * sizeof(u64) + sizeof(struct cpl_rx_mps_pkt));
35118c2ecf20Sopenharmony_ci	offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN;
35128c2ecf20Sopenharmony_ci	if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short))
35138c2ecf20Sopenharmony_ci		return RX_PTP_PKT_ERR;
35148c2ecf20Sopenharmony_ci
35158c2ecf20Sopenharmony_ci	hwtstamps = skb_hwtstamps(skb);
35168c2ecf20Sopenharmony_ci	memset(hwtstamps, 0, sizeof(*hwtstamps));
35178c2ecf20Sopenharmony_ci	hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data));
35188c2ecf20Sopenharmony_ci
35198c2ecf20Sopenharmony_ci	return RX_PTP_PKT_SUC;
35208c2ecf20Sopenharmony_ci}
35218c2ecf20Sopenharmony_ci
35228c2ecf20Sopenharmony_ci/**
35238c2ecf20Sopenharmony_ci *     t4_rx_hststamp - Recv PTP Event Message
35248c2ecf20Sopenharmony_ci *     @adapter: the adapter
35258c2ecf20Sopenharmony_ci *     @rsp: the response queue descriptor holding the RX_PKT message
35268c2ecf20Sopenharmony_ci *     @rxq: the response queue holding the RX_PKT message
35278c2ecf20Sopenharmony_ci *     @skb: the packet
35288c2ecf20Sopenharmony_ci *
35298c2ecf20Sopenharmony_ci *     PTP enabled and MPS packet, read HW timestamp
35308c2ecf20Sopenharmony_ci */
35318c2ecf20Sopenharmony_cistatic int t4_rx_hststamp(struct adapter *adapter, const __be64 *rsp,
35328c2ecf20Sopenharmony_ci			  struct sge_eth_rxq *rxq, struct sk_buff *skb)
35338c2ecf20Sopenharmony_ci{
35348c2ecf20Sopenharmony_ci	int ret;
35358c2ecf20Sopenharmony_ci
35368c2ecf20Sopenharmony_ci	if (unlikely((*(u8 *)rsp == CPL_RX_MPS_PKT) &&
35378c2ecf20Sopenharmony_ci		     !is_t4(adapter->params.chip))) {
35388c2ecf20Sopenharmony_ci		ret = t4_systim_to_hwstamp(adapter, skb);
35398c2ecf20Sopenharmony_ci		if (ret == RX_PTP_PKT_ERR) {
35408c2ecf20Sopenharmony_ci			kfree_skb(skb);
35418c2ecf20Sopenharmony_ci			rxq->stats.rx_drops++;
35428c2ecf20Sopenharmony_ci		}
35438c2ecf20Sopenharmony_ci		return ret;
35448c2ecf20Sopenharmony_ci	}
35458c2ecf20Sopenharmony_ci	return RX_NON_PTP_PKT;
35468c2ecf20Sopenharmony_ci}
35478c2ecf20Sopenharmony_ci
35488c2ecf20Sopenharmony_ci/**
35498c2ecf20Sopenharmony_ci *      t4_tx_hststamp - Loopback PTP Transmit Event Message
35508c2ecf20Sopenharmony_ci *      @adapter: the adapter
35518c2ecf20Sopenharmony_ci *      @skb: the packet
35528c2ecf20Sopenharmony_ci *      @dev: the ingress net device
35538c2ecf20Sopenharmony_ci *
35548c2ecf20Sopenharmony_ci *      Read hardware timestamp for the loopback PTP Tx event message
35558c2ecf20Sopenharmony_ci */
35568c2ecf20Sopenharmony_cistatic int t4_tx_hststamp(struct adapter *adapter, struct sk_buff *skb,
35578c2ecf20Sopenharmony_ci			  struct net_device *dev)
35588c2ecf20Sopenharmony_ci{
35598c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
35608c2ecf20Sopenharmony_ci
35618c2ecf20Sopenharmony_ci	if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) {
35628c2ecf20Sopenharmony_ci		cxgb4_ptp_read_hwstamp(adapter, pi);
35638c2ecf20Sopenharmony_ci		kfree_skb(skb);
35648c2ecf20Sopenharmony_ci		return 0;
35658c2ecf20Sopenharmony_ci	}
35668c2ecf20Sopenharmony_ci	return 1;
35678c2ecf20Sopenharmony_ci}
35688c2ecf20Sopenharmony_ci
35698c2ecf20Sopenharmony_ci/**
35708c2ecf20Sopenharmony_ci *	t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
35718c2ecf20Sopenharmony_ci *	@rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
35728c2ecf20Sopenharmony_ci *	@rsp: Response Entry pointer into Response Queue
35738c2ecf20Sopenharmony_ci *	@gl: Gather List pointer
35748c2ecf20Sopenharmony_ci *
35758c2ecf20Sopenharmony_ci *	For adapters which support the SGE Doorbell Queue Timer facility,
35768c2ecf20Sopenharmony_ci *	we configure the Ethernet TX Queues to send CIDX Updates to the
35778c2ecf20Sopenharmony_ci *	Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
35788c2ecf20Sopenharmony_ci *	messages.  This adds a small load to PCIe Link RX bandwidth and,
35798c2ecf20Sopenharmony_ci *	potentially, higher CPU Interrupt load, but allows us to respond
35808c2ecf20Sopenharmony_ci *	much more quickly to the CIDX Updates.  This is important for
35818c2ecf20Sopenharmony_ci *	Upper Layer Software which isn't willing to have a large amount
35828c2ecf20Sopenharmony_ci *	of TX Data outstanding before receiving DMA Completions.
35838c2ecf20Sopenharmony_ci */
35848c2ecf20Sopenharmony_cistatic void t4_tx_completion_handler(struct sge_rspq *rspq,
35858c2ecf20Sopenharmony_ci				     const __be64 *rsp,
35868c2ecf20Sopenharmony_ci				     const struct pkt_gl *gl)
35878c2ecf20Sopenharmony_ci{
35888c2ecf20Sopenharmony_ci	u8 opcode = ((const struct rss_header *)rsp)->opcode;
35898c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(rspq->netdev);
35908c2ecf20Sopenharmony_ci	struct adapter *adapter = rspq->adap;
35918c2ecf20Sopenharmony_ci	struct sge *s = &adapter->sge;
35928c2ecf20Sopenharmony_ci	struct sge_eth_txq *txq;
35938c2ecf20Sopenharmony_ci
35948c2ecf20Sopenharmony_ci	/* skip RSS header */
35958c2ecf20Sopenharmony_ci	rsp++;
35968c2ecf20Sopenharmony_ci
35978c2ecf20Sopenharmony_ci	/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
35988c2ecf20Sopenharmony_ci	 */
35998c2ecf20Sopenharmony_ci	if (unlikely(opcode == CPL_FW4_MSG &&
36008c2ecf20Sopenharmony_ci		     ((const struct cpl_fw4_msg *)rsp)->type ==
36018c2ecf20Sopenharmony_ci							FW_TYPE_RSSCPL)) {
36028c2ecf20Sopenharmony_ci		rsp++;
36038c2ecf20Sopenharmony_ci		opcode = ((const struct rss_header *)rsp)->opcode;
36048c2ecf20Sopenharmony_ci		rsp++;
36058c2ecf20Sopenharmony_ci	}
36068c2ecf20Sopenharmony_ci
36078c2ecf20Sopenharmony_ci	if (unlikely(opcode != CPL_SGE_EGR_UPDATE)) {
36088c2ecf20Sopenharmony_ci		pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n",
36098c2ecf20Sopenharmony_ci			__func__, opcode);
36108c2ecf20Sopenharmony_ci		return;
36118c2ecf20Sopenharmony_ci	}
36128c2ecf20Sopenharmony_ci
36138c2ecf20Sopenharmony_ci	txq = &s->ethtxq[pi->first_qset + rspq->idx];
36148c2ecf20Sopenharmony_ci	t4_sge_eth_txq_egress_update(adapter, txq, -1);
36158c2ecf20Sopenharmony_ci}
36168c2ecf20Sopenharmony_ci
36178c2ecf20Sopenharmony_cistatic int cxgb4_validate_lb_pkt(struct port_info *pi, const struct pkt_gl *si)
36188c2ecf20Sopenharmony_ci{
36198c2ecf20Sopenharmony_ci	struct adapter *adap = pi->adapter;
36208c2ecf20Sopenharmony_ci	struct cxgb4_ethtool_lb_test *lb;
36218c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
36228c2ecf20Sopenharmony_ci	struct net_device *netdev;
36238c2ecf20Sopenharmony_ci	u8 *data;
36248c2ecf20Sopenharmony_ci	int i;
36258c2ecf20Sopenharmony_ci
36268c2ecf20Sopenharmony_ci	netdev = adap->port[pi->port_id];
36278c2ecf20Sopenharmony_ci	lb = &pi->ethtool_lb;
36288c2ecf20Sopenharmony_ci	data = si->va + s->pktshift;
36298c2ecf20Sopenharmony_ci
36308c2ecf20Sopenharmony_ci	i = ETH_ALEN;
36318c2ecf20Sopenharmony_ci	if (!ether_addr_equal(data + i, netdev->dev_addr))
36328c2ecf20Sopenharmony_ci		return -1;
36338c2ecf20Sopenharmony_ci
36348c2ecf20Sopenharmony_ci	i += ETH_ALEN;
36358c2ecf20Sopenharmony_ci	if (strcmp(&data[i], CXGB4_SELFTEST_LB_STR))
36368c2ecf20Sopenharmony_ci		lb->result = -EIO;
36378c2ecf20Sopenharmony_ci
36388c2ecf20Sopenharmony_ci	complete(&lb->completion);
36398c2ecf20Sopenharmony_ci	return 0;
36408c2ecf20Sopenharmony_ci}
36418c2ecf20Sopenharmony_ci
36428c2ecf20Sopenharmony_ci/**
36438c2ecf20Sopenharmony_ci *	t4_ethrx_handler - process an ingress ethernet packet
36448c2ecf20Sopenharmony_ci *	@q: the response queue that received the packet
36458c2ecf20Sopenharmony_ci *	@rsp: the response queue descriptor holding the RX_PKT message
36468c2ecf20Sopenharmony_ci *	@si: the gather list of packet fragments
36478c2ecf20Sopenharmony_ci *
36488c2ecf20Sopenharmony_ci *	Process an ingress ethernet packet and deliver it to the stack.
36498c2ecf20Sopenharmony_ci */
36508c2ecf20Sopenharmony_ciint t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
36518c2ecf20Sopenharmony_ci		     const struct pkt_gl *si)
36528c2ecf20Sopenharmony_ci{
36538c2ecf20Sopenharmony_ci	bool csum_ok;
36548c2ecf20Sopenharmony_ci	struct sk_buff *skb;
36558c2ecf20Sopenharmony_ci	const struct cpl_rx_pkt *pkt;
36568c2ecf20Sopenharmony_ci	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
36578c2ecf20Sopenharmony_ci	struct adapter *adapter = q->adap;
36588c2ecf20Sopenharmony_ci	struct sge *s = &q->adap->sge;
36598c2ecf20Sopenharmony_ci	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
36608c2ecf20Sopenharmony_ci			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
36618c2ecf20Sopenharmony_ci	u16 err_vec, tnl_hdr_len = 0;
36628c2ecf20Sopenharmony_ci	struct port_info *pi;
36638c2ecf20Sopenharmony_ci	int ret = 0;
36648c2ecf20Sopenharmony_ci
36658c2ecf20Sopenharmony_ci	pi = netdev_priv(q->netdev);
36668c2ecf20Sopenharmony_ci	/* If we're looking at TX Queue CIDX Update, handle that separately
36678c2ecf20Sopenharmony_ci	 * and return.
36688c2ecf20Sopenharmony_ci	 */
36698c2ecf20Sopenharmony_ci	if (unlikely((*(u8 *)rsp == CPL_FW4_MSG) ||
36708c2ecf20Sopenharmony_ci		     (*(u8 *)rsp == CPL_SGE_EGR_UPDATE))) {
36718c2ecf20Sopenharmony_ci		t4_tx_completion_handler(q, rsp, si);
36728c2ecf20Sopenharmony_ci		return 0;
36738c2ecf20Sopenharmony_ci	}
36748c2ecf20Sopenharmony_ci
36758c2ecf20Sopenharmony_ci	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
36768c2ecf20Sopenharmony_ci		return handle_trace_pkt(q->adap, si);
36778c2ecf20Sopenharmony_ci
36788c2ecf20Sopenharmony_ci	pkt = (const struct cpl_rx_pkt *)rsp;
36798c2ecf20Sopenharmony_ci	/* Compressed error vector is enabled for T6 only */
36808c2ecf20Sopenharmony_ci	if (q->adap->params.tp.rx_pkt_encap) {
36818c2ecf20Sopenharmony_ci		err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec));
36828c2ecf20Sopenharmony_ci		tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec));
36838c2ecf20Sopenharmony_ci	} else {
36848c2ecf20Sopenharmony_ci		err_vec = be16_to_cpu(pkt->err_vec);
36858c2ecf20Sopenharmony_ci	}
36868c2ecf20Sopenharmony_ci
36878c2ecf20Sopenharmony_ci	csum_ok = pkt->csum_calc && !err_vec &&
36888c2ecf20Sopenharmony_ci		  (q->netdev->features & NETIF_F_RXCSUM);
36898c2ecf20Sopenharmony_ci
36908c2ecf20Sopenharmony_ci	if (err_vec)
36918c2ecf20Sopenharmony_ci		rxq->stats.bad_rx_pkts++;
36928c2ecf20Sopenharmony_ci
36938c2ecf20Sopenharmony_ci	if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) {
36948c2ecf20Sopenharmony_ci		ret = cxgb4_validate_lb_pkt(pi, si);
36958c2ecf20Sopenharmony_ci		if (!ret)
36968c2ecf20Sopenharmony_ci			return 0;
36978c2ecf20Sopenharmony_ci	}
36988c2ecf20Sopenharmony_ci
36998c2ecf20Sopenharmony_ci	if (((pkt->l2info & htonl(RXF_TCP_F)) ||
37008c2ecf20Sopenharmony_ci	     tnl_hdr_len) &&
37018c2ecf20Sopenharmony_ci	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
37028c2ecf20Sopenharmony_ci		do_gro(rxq, si, pkt, tnl_hdr_len);
37038c2ecf20Sopenharmony_ci		return 0;
37048c2ecf20Sopenharmony_ci	}
37058c2ecf20Sopenharmony_ci
37068c2ecf20Sopenharmony_ci	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
37078c2ecf20Sopenharmony_ci	if (unlikely(!skb)) {
37088c2ecf20Sopenharmony_ci		t4_pktgl_free(si);
37098c2ecf20Sopenharmony_ci		rxq->stats.rx_drops++;
37108c2ecf20Sopenharmony_ci		return 0;
37118c2ecf20Sopenharmony_ci	}
37128c2ecf20Sopenharmony_ci
37138c2ecf20Sopenharmony_ci	/* Handle PTP Event Rx packet */
37148c2ecf20Sopenharmony_ci	if (unlikely(pi->ptp_enable)) {
37158c2ecf20Sopenharmony_ci		ret = t4_rx_hststamp(adapter, rsp, rxq, skb);
37168c2ecf20Sopenharmony_ci		if (ret == RX_PTP_PKT_ERR)
37178c2ecf20Sopenharmony_ci			return 0;
37188c2ecf20Sopenharmony_ci	}
37198c2ecf20Sopenharmony_ci	if (likely(!ret))
37208c2ecf20Sopenharmony_ci		__skb_pull(skb, s->pktshift); /* remove ethernet header pad */
37218c2ecf20Sopenharmony_ci
37228c2ecf20Sopenharmony_ci	/* Handle the PTP Event Tx Loopback packet */
37238c2ecf20Sopenharmony_ci	if (unlikely(pi->ptp_enable && !ret &&
37248c2ecf20Sopenharmony_ci		     (pkt->l2info & htonl(RXF_UDP_F)) &&
37258c2ecf20Sopenharmony_ci		     cxgb4_ptp_is_ptp_rx(skb))) {
37268c2ecf20Sopenharmony_ci		if (!t4_tx_hststamp(adapter, skb, q->netdev))
37278c2ecf20Sopenharmony_ci			return 0;
37288c2ecf20Sopenharmony_ci	}
37298c2ecf20Sopenharmony_ci
37308c2ecf20Sopenharmony_ci	skb->protocol = eth_type_trans(skb, q->netdev);
37318c2ecf20Sopenharmony_ci	skb_record_rx_queue(skb, q->idx);
37328c2ecf20Sopenharmony_ci	if (skb->dev->features & NETIF_F_RXHASH)
37338c2ecf20Sopenharmony_ci		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
37348c2ecf20Sopenharmony_ci			     PKT_HASH_TYPE_L3);
37358c2ecf20Sopenharmony_ci
37368c2ecf20Sopenharmony_ci	rxq->stats.pkts++;
37378c2ecf20Sopenharmony_ci
37388c2ecf20Sopenharmony_ci	if (pi->rxtstamp)
37398c2ecf20Sopenharmony_ci		cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb),
37408c2ecf20Sopenharmony_ci					 si->sgetstamp);
37418c2ecf20Sopenharmony_ci	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
37428c2ecf20Sopenharmony_ci		if (!pkt->ip_frag) {
37438c2ecf20Sopenharmony_ci			skb->ip_summed = CHECKSUM_UNNECESSARY;
37448c2ecf20Sopenharmony_ci			rxq->stats.rx_cso++;
37458c2ecf20Sopenharmony_ci		} else if (pkt->l2info & htonl(RXF_IP_F)) {
37468c2ecf20Sopenharmony_ci			__sum16 c = (__force __sum16)pkt->csum;
37478c2ecf20Sopenharmony_ci			skb->csum = csum_unfold(c);
37488c2ecf20Sopenharmony_ci
37498c2ecf20Sopenharmony_ci			if (tnl_hdr_len) {
37508c2ecf20Sopenharmony_ci				skb->ip_summed = CHECKSUM_UNNECESSARY;
37518c2ecf20Sopenharmony_ci				skb->csum_level = 1;
37528c2ecf20Sopenharmony_ci			} else {
37538c2ecf20Sopenharmony_ci				skb->ip_summed = CHECKSUM_COMPLETE;
37548c2ecf20Sopenharmony_ci			}
37558c2ecf20Sopenharmony_ci			rxq->stats.rx_cso++;
37568c2ecf20Sopenharmony_ci		}
37578c2ecf20Sopenharmony_ci	} else {
37588c2ecf20Sopenharmony_ci		skb_checksum_none_assert(skb);
37598c2ecf20Sopenharmony_ci#ifdef CONFIG_CHELSIO_T4_FCOE
37608c2ecf20Sopenharmony_ci#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
37618c2ecf20Sopenharmony_ci			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
37628c2ecf20Sopenharmony_ci
37638c2ecf20Sopenharmony_ci		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
37648c2ecf20Sopenharmony_ci			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
37658c2ecf20Sopenharmony_ci			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
37668c2ecf20Sopenharmony_ci				if (q->adap->params.tp.rx_pkt_encap)
37678c2ecf20Sopenharmony_ci					csum_ok = err_vec &
37688c2ecf20Sopenharmony_ci						  T6_COMPR_RXERR_SUM_F;
37698c2ecf20Sopenharmony_ci				else
37708c2ecf20Sopenharmony_ci					csum_ok = err_vec & RXERR_CSUM_F;
37718c2ecf20Sopenharmony_ci				if (!csum_ok)
37728c2ecf20Sopenharmony_ci					skb->ip_summed = CHECKSUM_UNNECESSARY;
37738c2ecf20Sopenharmony_ci			}
37748c2ecf20Sopenharmony_ci		}
37758c2ecf20Sopenharmony_ci
37768c2ecf20Sopenharmony_ci#undef CPL_RX_PKT_FLAGS
37778c2ecf20Sopenharmony_ci#endif /* CONFIG_CHELSIO_T4_FCOE */
37788c2ecf20Sopenharmony_ci	}
37798c2ecf20Sopenharmony_ci
37808c2ecf20Sopenharmony_ci	if (unlikely(pkt->vlan_ex)) {
37818c2ecf20Sopenharmony_ci		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
37828c2ecf20Sopenharmony_ci		rxq->stats.vlan_ex++;
37838c2ecf20Sopenharmony_ci	}
37848c2ecf20Sopenharmony_ci	skb_mark_napi_id(skb, &q->napi);
37858c2ecf20Sopenharmony_ci	netif_receive_skb(skb);
37868c2ecf20Sopenharmony_ci	return 0;
37878c2ecf20Sopenharmony_ci}
37888c2ecf20Sopenharmony_ci
37898c2ecf20Sopenharmony_ci/**
37908c2ecf20Sopenharmony_ci *	restore_rx_bufs - put back a packet's Rx buffers
37918c2ecf20Sopenharmony_ci *	@si: the packet gather list
37928c2ecf20Sopenharmony_ci *	@q: the SGE free list
37938c2ecf20Sopenharmony_ci *	@frags: number of FL buffers to restore
37948c2ecf20Sopenharmony_ci *
37958c2ecf20Sopenharmony_ci *	Puts back on an FL the Rx buffers associated with @si.  The buffers
37968c2ecf20Sopenharmony_ci *	have already been unmapped and are left unmapped, we mark them so to
37978c2ecf20Sopenharmony_ci *	prevent further unmapping attempts.
37988c2ecf20Sopenharmony_ci *
37998c2ecf20Sopenharmony_ci *	This function undoes a series of @unmap_rx_buf calls when we find out
38008c2ecf20Sopenharmony_ci *	that the current packet can't be processed right away afterall and we
38018c2ecf20Sopenharmony_ci *	need to come back to it later.  This is a very rare event and there's
38028c2ecf20Sopenharmony_ci *	no effort to make this particularly efficient.
38038c2ecf20Sopenharmony_ci */
38048c2ecf20Sopenharmony_cistatic void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
38058c2ecf20Sopenharmony_ci			    int frags)
38068c2ecf20Sopenharmony_ci{
38078c2ecf20Sopenharmony_ci	struct rx_sw_desc *d;
38088c2ecf20Sopenharmony_ci
38098c2ecf20Sopenharmony_ci	while (frags--) {
38108c2ecf20Sopenharmony_ci		if (q->cidx == 0)
38118c2ecf20Sopenharmony_ci			q->cidx = q->size - 1;
38128c2ecf20Sopenharmony_ci		else
38138c2ecf20Sopenharmony_ci			q->cidx--;
38148c2ecf20Sopenharmony_ci		d = &q->sdesc[q->cidx];
38158c2ecf20Sopenharmony_ci		d->page = si->frags[frags].page;
38168c2ecf20Sopenharmony_ci		d->dma_addr |= RX_UNMAPPED_BUF;
38178c2ecf20Sopenharmony_ci		q->avail++;
38188c2ecf20Sopenharmony_ci	}
38198c2ecf20Sopenharmony_ci}
38208c2ecf20Sopenharmony_ci
38218c2ecf20Sopenharmony_ci/**
38228c2ecf20Sopenharmony_ci *	is_new_response - check if a response is newly written
38238c2ecf20Sopenharmony_ci *	@r: the response descriptor
38248c2ecf20Sopenharmony_ci *	@q: the response queue
38258c2ecf20Sopenharmony_ci *
38268c2ecf20Sopenharmony_ci *	Returns true if a response descriptor contains a yet unprocessed
38278c2ecf20Sopenharmony_ci *	response.
38288c2ecf20Sopenharmony_ci */
38298c2ecf20Sopenharmony_cistatic inline bool is_new_response(const struct rsp_ctrl *r,
38308c2ecf20Sopenharmony_ci				   const struct sge_rspq *q)
38318c2ecf20Sopenharmony_ci{
38328c2ecf20Sopenharmony_ci	return (r->type_gen >> RSPD_GEN_S) == q->gen;
38338c2ecf20Sopenharmony_ci}
38348c2ecf20Sopenharmony_ci
38358c2ecf20Sopenharmony_ci/**
38368c2ecf20Sopenharmony_ci *	rspq_next - advance to the next entry in a response queue
38378c2ecf20Sopenharmony_ci *	@q: the queue
38388c2ecf20Sopenharmony_ci *
38398c2ecf20Sopenharmony_ci *	Updates the state of a response queue to advance it to the next entry.
38408c2ecf20Sopenharmony_ci */
38418c2ecf20Sopenharmony_cistatic inline void rspq_next(struct sge_rspq *q)
38428c2ecf20Sopenharmony_ci{
38438c2ecf20Sopenharmony_ci	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
38448c2ecf20Sopenharmony_ci	if (unlikely(++q->cidx == q->size)) {
38458c2ecf20Sopenharmony_ci		q->cidx = 0;
38468c2ecf20Sopenharmony_ci		q->gen ^= 1;
38478c2ecf20Sopenharmony_ci		q->cur_desc = q->desc;
38488c2ecf20Sopenharmony_ci	}
38498c2ecf20Sopenharmony_ci}
38508c2ecf20Sopenharmony_ci
38518c2ecf20Sopenharmony_ci/**
38528c2ecf20Sopenharmony_ci *	process_responses - process responses from an SGE response queue
38538c2ecf20Sopenharmony_ci *	@q: the ingress queue to process
38548c2ecf20Sopenharmony_ci *	@budget: how many responses can be processed in this round
38558c2ecf20Sopenharmony_ci *
38568c2ecf20Sopenharmony_ci *	Process responses from an SGE response queue up to the supplied budget.
38578c2ecf20Sopenharmony_ci *	Responses include received packets as well as control messages from FW
38588c2ecf20Sopenharmony_ci *	or HW.
38598c2ecf20Sopenharmony_ci *
38608c2ecf20Sopenharmony_ci *	Additionally choose the interrupt holdoff time for the next interrupt
38618c2ecf20Sopenharmony_ci *	on this queue.  If the system is under memory shortage use a fairly
38628c2ecf20Sopenharmony_ci *	long delay to help recovery.
38638c2ecf20Sopenharmony_ci */
38648c2ecf20Sopenharmony_cistatic int process_responses(struct sge_rspq *q, int budget)
38658c2ecf20Sopenharmony_ci{
38668c2ecf20Sopenharmony_ci	int ret, rsp_type;
38678c2ecf20Sopenharmony_ci	int budget_left = budget;
38688c2ecf20Sopenharmony_ci	const struct rsp_ctrl *rc;
38698c2ecf20Sopenharmony_ci	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
38708c2ecf20Sopenharmony_ci	struct adapter *adapter = q->adap;
38718c2ecf20Sopenharmony_ci	struct sge *s = &adapter->sge;
38728c2ecf20Sopenharmony_ci
38738c2ecf20Sopenharmony_ci	while (likely(budget_left)) {
38748c2ecf20Sopenharmony_ci		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
38758c2ecf20Sopenharmony_ci		if (!is_new_response(rc, q)) {
38768c2ecf20Sopenharmony_ci			if (q->flush_handler)
38778c2ecf20Sopenharmony_ci				q->flush_handler(q);
38788c2ecf20Sopenharmony_ci			break;
38798c2ecf20Sopenharmony_ci		}
38808c2ecf20Sopenharmony_ci
38818c2ecf20Sopenharmony_ci		dma_rmb();
38828c2ecf20Sopenharmony_ci		rsp_type = RSPD_TYPE_G(rc->type_gen);
38838c2ecf20Sopenharmony_ci		if (likely(rsp_type == RSPD_TYPE_FLBUF_X)) {
38848c2ecf20Sopenharmony_ci			struct page_frag *fp;
38858c2ecf20Sopenharmony_ci			struct pkt_gl si;
38868c2ecf20Sopenharmony_ci			const struct rx_sw_desc *rsd;
38878c2ecf20Sopenharmony_ci			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
38888c2ecf20Sopenharmony_ci
38898c2ecf20Sopenharmony_ci			if (len & RSPD_NEWBUF_F) {
38908c2ecf20Sopenharmony_ci				if (likely(q->offset > 0)) {
38918c2ecf20Sopenharmony_ci					free_rx_bufs(q->adap, &rxq->fl, 1);
38928c2ecf20Sopenharmony_ci					q->offset = 0;
38938c2ecf20Sopenharmony_ci				}
38948c2ecf20Sopenharmony_ci				len = RSPD_LEN_G(len);
38958c2ecf20Sopenharmony_ci			}
38968c2ecf20Sopenharmony_ci			si.tot_len = len;
38978c2ecf20Sopenharmony_ci
38988c2ecf20Sopenharmony_ci			/* gather packet fragments */
38998c2ecf20Sopenharmony_ci			for (frags = 0, fp = si.frags; ; frags++, fp++) {
39008c2ecf20Sopenharmony_ci				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
39018c2ecf20Sopenharmony_ci				bufsz = get_buf_size(adapter, rsd);
39028c2ecf20Sopenharmony_ci				fp->page = rsd->page;
39038c2ecf20Sopenharmony_ci				fp->offset = q->offset;
39048c2ecf20Sopenharmony_ci				fp->size = min(bufsz, len);
39058c2ecf20Sopenharmony_ci				len -= fp->size;
39068c2ecf20Sopenharmony_ci				if (!len)
39078c2ecf20Sopenharmony_ci					break;
39088c2ecf20Sopenharmony_ci				unmap_rx_buf(q->adap, &rxq->fl);
39098c2ecf20Sopenharmony_ci			}
39108c2ecf20Sopenharmony_ci
39118c2ecf20Sopenharmony_ci			si.sgetstamp = SGE_TIMESTAMP_G(
39128c2ecf20Sopenharmony_ci					be64_to_cpu(rc->last_flit));
39138c2ecf20Sopenharmony_ci			/*
39148c2ecf20Sopenharmony_ci			 * Last buffer remains mapped so explicitly make it
39158c2ecf20Sopenharmony_ci			 * coherent for CPU access.
39168c2ecf20Sopenharmony_ci			 */
39178c2ecf20Sopenharmony_ci			dma_sync_single_for_cpu(q->adap->pdev_dev,
39188c2ecf20Sopenharmony_ci						get_buf_addr(rsd),
39198c2ecf20Sopenharmony_ci						fp->size, DMA_FROM_DEVICE);
39208c2ecf20Sopenharmony_ci
39218c2ecf20Sopenharmony_ci			si.va = page_address(si.frags[0].page) +
39228c2ecf20Sopenharmony_ci				si.frags[0].offset;
39238c2ecf20Sopenharmony_ci			prefetch(si.va);
39248c2ecf20Sopenharmony_ci
39258c2ecf20Sopenharmony_ci			si.nfrags = frags + 1;
39268c2ecf20Sopenharmony_ci			ret = q->handler(q, q->cur_desc, &si);
39278c2ecf20Sopenharmony_ci			if (likely(ret == 0))
39288c2ecf20Sopenharmony_ci				q->offset += ALIGN(fp->size, s->fl_align);
39298c2ecf20Sopenharmony_ci			else
39308c2ecf20Sopenharmony_ci				restore_rx_bufs(&si, &rxq->fl, frags);
39318c2ecf20Sopenharmony_ci		} else if (likely(rsp_type == RSPD_TYPE_CPL_X)) {
39328c2ecf20Sopenharmony_ci			ret = q->handler(q, q->cur_desc, NULL);
39338c2ecf20Sopenharmony_ci		} else {
39348c2ecf20Sopenharmony_ci			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
39358c2ecf20Sopenharmony_ci		}
39368c2ecf20Sopenharmony_ci
39378c2ecf20Sopenharmony_ci		if (unlikely(ret)) {
39388c2ecf20Sopenharmony_ci			/* couldn't process descriptor, back off for recovery */
39398c2ecf20Sopenharmony_ci			q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX);
39408c2ecf20Sopenharmony_ci			break;
39418c2ecf20Sopenharmony_ci		}
39428c2ecf20Sopenharmony_ci
39438c2ecf20Sopenharmony_ci		rspq_next(q);
39448c2ecf20Sopenharmony_ci		budget_left--;
39458c2ecf20Sopenharmony_ci	}
39468c2ecf20Sopenharmony_ci
39478c2ecf20Sopenharmony_ci	if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16)
39488c2ecf20Sopenharmony_ci		__refill_fl(q->adap, &rxq->fl);
39498c2ecf20Sopenharmony_ci	return budget - budget_left;
39508c2ecf20Sopenharmony_ci}
39518c2ecf20Sopenharmony_ci
39528c2ecf20Sopenharmony_ci/**
39538c2ecf20Sopenharmony_ci *	napi_rx_handler - the NAPI handler for Rx processing
39548c2ecf20Sopenharmony_ci *	@napi: the napi instance
39558c2ecf20Sopenharmony_ci *	@budget: how many packets we can process in this round
39568c2ecf20Sopenharmony_ci *
39578c2ecf20Sopenharmony_ci *	Handler for new data events when using NAPI.  This does not need any
39588c2ecf20Sopenharmony_ci *	locking or protection from interrupts as data interrupts are off at
39598c2ecf20Sopenharmony_ci *	this point and other adapter interrupts do not interfere (the latter
39608c2ecf20Sopenharmony_ci *	in not a concern at all with MSI-X as non-data interrupts then have
39618c2ecf20Sopenharmony_ci *	a separate handler).
39628c2ecf20Sopenharmony_ci */
39638c2ecf20Sopenharmony_cistatic int napi_rx_handler(struct napi_struct *napi, int budget)
39648c2ecf20Sopenharmony_ci{
39658c2ecf20Sopenharmony_ci	unsigned int params;
39668c2ecf20Sopenharmony_ci	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
39678c2ecf20Sopenharmony_ci	int work_done;
39688c2ecf20Sopenharmony_ci	u32 val;
39698c2ecf20Sopenharmony_ci
39708c2ecf20Sopenharmony_ci	work_done = process_responses(q, budget);
39718c2ecf20Sopenharmony_ci	if (likely(work_done < budget)) {
39728c2ecf20Sopenharmony_ci		int timer_index;
39738c2ecf20Sopenharmony_ci
39748c2ecf20Sopenharmony_ci		napi_complete_done(napi, work_done);
39758c2ecf20Sopenharmony_ci		timer_index = QINTR_TIMER_IDX_G(q->next_intr_params);
39768c2ecf20Sopenharmony_ci
39778c2ecf20Sopenharmony_ci		if (q->adaptive_rx) {
39788c2ecf20Sopenharmony_ci			if (work_done > max(timer_pkt_quota[timer_index],
39798c2ecf20Sopenharmony_ci					    MIN_NAPI_WORK))
39808c2ecf20Sopenharmony_ci				timer_index = (timer_index + 1);
39818c2ecf20Sopenharmony_ci			else
39828c2ecf20Sopenharmony_ci				timer_index = timer_index - 1;
39838c2ecf20Sopenharmony_ci
39848c2ecf20Sopenharmony_ci			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
39858c2ecf20Sopenharmony_ci			q->next_intr_params =
39868c2ecf20Sopenharmony_ci					QINTR_TIMER_IDX_V(timer_index) |
39878c2ecf20Sopenharmony_ci					QINTR_CNT_EN_V(0);
39888c2ecf20Sopenharmony_ci			params = q->next_intr_params;
39898c2ecf20Sopenharmony_ci		} else {
39908c2ecf20Sopenharmony_ci			params = q->next_intr_params;
39918c2ecf20Sopenharmony_ci			q->next_intr_params = q->intr_params;
39928c2ecf20Sopenharmony_ci		}
39938c2ecf20Sopenharmony_ci	} else
39948c2ecf20Sopenharmony_ci		params = QINTR_TIMER_IDX_V(7);
39958c2ecf20Sopenharmony_ci
39968c2ecf20Sopenharmony_ci	val = CIDXINC_V(work_done) | SEINTARM_V(params);
39978c2ecf20Sopenharmony_ci
39988c2ecf20Sopenharmony_ci	/* If we don't have access to the new User GTS (T5+), use the old
39998c2ecf20Sopenharmony_ci	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
40008c2ecf20Sopenharmony_ci	 */
40018c2ecf20Sopenharmony_ci	if (unlikely(q->bar2_addr == NULL)) {
40028c2ecf20Sopenharmony_ci		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
40038c2ecf20Sopenharmony_ci			     val | INGRESSQID_V((u32)q->cntxt_id));
40048c2ecf20Sopenharmony_ci	} else {
40058c2ecf20Sopenharmony_ci		writel(val | INGRESSQID_V(q->bar2_qid),
40068c2ecf20Sopenharmony_ci		       q->bar2_addr + SGE_UDB_GTS);
40078c2ecf20Sopenharmony_ci		wmb();
40088c2ecf20Sopenharmony_ci	}
40098c2ecf20Sopenharmony_ci	return work_done;
40108c2ecf20Sopenharmony_ci}
40118c2ecf20Sopenharmony_ci
40128c2ecf20Sopenharmony_civoid cxgb4_ethofld_restart(struct tasklet_struct *t)
40138c2ecf20Sopenharmony_ci{
40148c2ecf20Sopenharmony_ci	struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
40158c2ecf20Sopenharmony_ci						     qresume_tsk);
40168c2ecf20Sopenharmony_ci	int pktcount;
40178c2ecf20Sopenharmony_ci
40188c2ecf20Sopenharmony_ci	spin_lock(&eosw_txq->lock);
40198c2ecf20Sopenharmony_ci	pktcount = eosw_txq->cidx - eosw_txq->last_cidx;
40208c2ecf20Sopenharmony_ci	if (pktcount < 0)
40218c2ecf20Sopenharmony_ci		pktcount += eosw_txq->ndesc;
40228c2ecf20Sopenharmony_ci
40238c2ecf20Sopenharmony_ci	if (pktcount) {
40248c2ecf20Sopenharmony_ci		cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev),
40258c2ecf20Sopenharmony_ci					 eosw_txq, pktcount);
40268c2ecf20Sopenharmony_ci		eosw_txq->inuse -= pktcount;
40278c2ecf20Sopenharmony_ci	}
40288c2ecf20Sopenharmony_ci
40298c2ecf20Sopenharmony_ci	/* There may be some packets waiting for completions. So,
40308c2ecf20Sopenharmony_ci	 * attempt to send these packets now.
40318c2ecf20Sopenharmony_ci	 */
40328c2ecf20Sopenharmony_ci	ethofld_xmit(eosw_txq->netdev, eosw_txq);
40338c2ecf20Sopenharmony_ci	spin_unlock(&eosw_txq->lock);
40348c2ecf20Sopenharmony_ci}
40358c2ecf20Sopenharmony_ci
40368c2ecf20Sopenharmony_ci/* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
40378c2ecf20Sopenharmony_ci * @q: the response queue that received the packet
40388c2ecf20Sopenharmony_ci * @rsp: the response queue descriptor holding the CPL message
40398c2ecf20Sopenharmony_ci * @si: the gather list of packet fragments
40408c2ecf20Sopenharmony_ci *
40418c2ecf20Sopenharmony_ci * Process a ETHOFLD Tx completion. Increment the cidx here, but
40428c2ecf20Sopenharmony_ci * free up the descriptors in a tasklet later.
40438c2ecf20Sopenharmony_ci */
40448c2ecf20Sopenharmony_ciint cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
40458c2ecf20Sopenharmony_ci			     const struct pkt_gl *si)
40468c2ecf20Sopenharmony_ci{
40478c2ecf20Sopenharmony_ci	u8 opcode = ((const struct rss_header *)rsp)->opcode;
40488c2ecf20Sopenharmony_ci
40498c2ecf20Sopenharmony_ci	/* skip RSS header */
40508c2ecf20Sopenharmony_ci	rsp++;
40518c2ecf20Sopenharmony_ci
40528c2ecf20Sopenharmony_ci	if (opcode == CPL_FW4_ACK) {
40538c2ecf20Sopenharmony_ci		const struct cpl_fw4_ack *cpl;
40548c2ecf20Sopenharmony_ci		struct sge_eosw_txq *eosw_txq;
40558c2ecf20Sopenharmony_ci		struct eotid_entry *entry;
40568c2ecf20Sopenharmony_ci		struct sk_buff *skb;
40578c2ecf20Sopenharmony_ci		u32 hdr_len, eotid;
40588c2ecf20Sopenharmony_ci		u8 flits, wrlen16;
40598c2ecf20Sopenharmony_ci		int credits;
40608c2ecf20Sopenharmony_ci
40618c2ecf20Sopenharmony_ci		cpl = (const struct cpl_fw4_ack *)rsp;
40628c2ecf20Sopenharmony_ci		eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) -
40638c2ecf20Sopenharmony_ci			q->adap->tids.eotid_base;
40648c2ecf20Sopenharmony_ci		entry = cxgb4_lookup_eotid(&q->adap->tids, eotid);
40658c2ecf20Sopenharmony_ci		if (!entry)
40668c2ecf20Sopenharmony_ci			goto out_done;
40678c2ecf20Sopenharmony_ci
40688c2ecf20Sopenharmony_ci		eosw_txq = (struct sge_eosw_txq *)entry->data;
40698c2ecf20Sopenharmony_ci		if (!eosw_txq)
40708c2ecf20Sopenharmony_ci			goto out_done;
40718c2ecf20Sopenharmony_ci
40728c2ecf20Sopenharmony_ci		spin_lock(&eosw_txq->lock);
40738c2ecf20Sopenharmony_ci		credits = cpl->credits;
40748c2ecf20Sopenharmony_ci		while (credits > 0) {
40758c2ecf20Sopenharmony_ci			skb = eosw_txq->desc[eosw_txq->cidx].skb;
40768c2ecf20Sopenharmony_ci			if (!skb)
40778c2ecf20Sopenharmony_ci				break;
40788c2ecf20Sopenharmony_ci
40798c2ecf20Sopenharmony_ci			if (unlikely((eosw_txq->state ==
40808c2ecf20Sopenharmony_ci				      CXGB4_EO_STATE_FLOWC_OPEN_REPLY ||
40818c2ecf20Sopenharmony_ci				      eosw_txq->state ==
40828c2ecf20Sopenharmony_ci				      CXGB4_EO_STATE_FLOWC_CLOSE_REPLY) &&
40838c2ecf20Sopenharmony_ci				     eosw_txq->cidx == eosw_txq->flowc_idx)) {
40848c2ecf20Sopenharmony_ci				flits = DIV_ROUND_UP(skb->len, 8);
40858c2ecf20Sopenharmony_ci				if (eosw_txq->state ==
40868c2ecf20Sopenharmony_ci				    CXGB4_EO_STATE_FLOWC_OPEN_REPLY)
40878c2ecf20Sopenharmony_ci					eosw_txq->state = CXGB4_EO_STATE_ACTIVE;
40888c2ecf20Sopenharmony_ci				else
40898c2ecf20Sopenharmony_ci					eosw_txq->state = CXGB4_EO_STATE_CLOSED;
40908c2ecf20Sopenharmony_ci				complete(&eosw_txq->completion);
40918c2ecf20Sopenharmony_ci			} else {
40928c2ecf20Sopenharmony_ci				hdr_len = eth_get_headlen(eosw_txq->netdev,
40938c2ecf20Sopenharmony_ci							  skb->data,
40948c2ecf20Sopenharmony_ci							  skb_headlen(skb));
40958c2ecf20Sopenharmony_ci				flits = ethofld_calc_tx_flits(q->adap, skb,
40968c2ecf20Sopenharmony_ci							      hdr_len);
40978c2ecf20Sopenharmony_ci			}
40988c2ecf20Sopenharmony_ci			eosw_txq_advance_index(&eosw_txq->cidx, 1,
40998c2ecf20Sopenharmony_ci					       eosw_txq->ndesc);
41008c2ecf20Sopenharmony_ci			wrlen16 = DIV_ROUND_UP(flits * 8, 16);
41018c2ecf20Sopenharmony_ci			credits -= wrlen16;
41028c2ecf20Sopenharmony_ci		}
41038c2ecf20Sopenharmony_ci
41048c2ecf20Sopenharmony_ci		eosw_txq->cred += cpl->credits;
41058c2ecf20Sopenharmony_ci		eosw_txq->ncompl--;
41068c2ecf20Sopenharmony_ci
41078c2ecf20Sopenharmony_ci		spin_unlock(&eosw_txq->lock);
41088c2ecf20Sopenharmony_ci
41098c2ecf20Sopenharmony_ci		/* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx,
41108c2ecf20Sopenharmony_ci		 * if there were packets waiting for completion.
41118c2ecf20Sopenharmony_ci		 */
41128c2ecf20Sopenharmony_ci		tasklet_schedule(&eosw_txq->qresume_tsk);
41138c2ecf20Sopenharmony_ci	}
41148c2ecf20Sopenharmony_ci
41158c2ecf20Sopenharmony_ciout_done:
41168c2ecf20Sopenharmony_ci	return 0;
41178c2ecf20Sopenharmony_ci}
41188c2ecf20Sopenharmony_ci
41198c2ecf20Sopenharmony_ci/*
41208c2ecf20Sopenharmony_ci * The MSI-X interrupt handler for an SGE response queue.
41218c2ecf20Sopenharmony_ci */
41228c2ecf20Sopenharmony_ciirqreturn_t t4_sge_intr_msix(int irq, void *cookie)
41238c2ecf20Sopenharmony_ci{
41248c2ecf20Sopenharmony_ci	struct sge_rspq *q = cookie;
41258c2ecf20Sopenharmony_ci
41268c2ecf20Sopenharmony_ci	napi_schedule(&q->napi);
41278c2ecf20Sopenharmony_ci	return IRQ_HANDLED;
41288c2ecf20Sopenharmony_ci}
41298c2ecf20Sopenharmony_ci
41308c2ecf20Sopenharmony_ci/*
41318c2ecf20Sopenharmony_ci * Process the indirect interrupt entries in the interrupt queue and kick off
41328c2ecf20Sopenharmony_ci * NAPI for each queue that has generated an entry.
41338c2ecf20Sopenharmony_ci */
41348c2ecf20Sopenharmony_cistatic unsigned int process_intrq(struct adapter *adap)
41358c2ecf20Sopenharmony_ci{
41368c2ecf20Sopenharmony_ci	unsigned int credits;
41378c2ecf20Sopenharmony_ci	const struct rsp_ctrl *rc;
41388c2ecf20Sopenharmony_ci	struct sge_rspq *q = &adap->sge.intrq;
41398c2ecf20Sopenharmony_ci	u32 val;
41408c2ecf20Sopenharmony_ci
41418c2ecf20Sopenharmony_ci	spin_lock(&adap->sge.intrq_lock);
41428c2ecf20Sopenharmony_ci	for (credits = 0; ; credits++) {
41438c2ecf20Sopenharmony_ci		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
41448c2ecf20Sopenharmony_ci		if (!is_new_response(rc, q))
41458c2ecf20Sopenharmony_ci			break;
41468c2ecf20Sopenharmony_ci
41478c2ecf20Sopenharmony_ci		dma_rmb();
41488c2ecf20Sopenharmony_ci		if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) {
41498c2ecf20Sopenharmony_ci			unsigned int qid = ntohl(rc->pldbuflen_qid);
41508c2ecf20Sopenharmony_ci
41518c2ecf20Sopenharmony_ci			qid -= adap->sge.ingr_start;
41528c2ecf20Sopenharmony_ci			napi_schedule(&adap->sge.ingr_map[qid]->napi);
41538c2ecf20Sopenharmony_ci		}
41548c2ecf20Sopenharmony_ci
41558c2ecf20Sopenharmony_ci		rspq_next(q);
41568c2ecf20Sopenharmony_ci	}
41578c2ecf20Sopenharmony_ci
41588c2ecf20Sopenharmony_ci	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
41598c2ecf20Sopenharmony_ci
41608c2ecf20Sopenharmony_ci	/* If we don't have access to the new User GTS (T5+), use the old
41618c2ecf20Sopenharmony_ci	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
41628c2ecf20Sopenharmony_ci	 */
41638c2ecf20Sopenharmony_ci	if (unlikely(q->bar2_addr == NULL)) {
41648c2ecf20Sopenharmony_ci		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
41658c2ecf20Sopenharmony_ci			     val | INGRESSQID_V(q->cntxt_id));
41668c2ecf20Sopenharmony_ci	} else {
41678c2ecf20Sopenharmony_ci		writel(val | INGRESSQID_V(q->bar2_qid),
41688c2ecf20Sopenharmony_ci		       q->bar2_addr + SGE_UDB_GTS);
41698c2ecf20Sopenharmony_ci		wmb();
41708c2ecf20Sopenharmony_ci	}
41718c2ecf20Sopenharmony_ci	spin_unlock(&adap->sge.intrq_lock);
41728c2ecf20Sopenharmony_ci	return credits;
41738c2ecf20Sopenharmony_ci}
41748c2ecf20Sopenharmony_ci
41758c2ecf20Sopenharmony_ci/*
41768c2ecf20Sopenharmony_ci * The MSI interrupt handler, which handles data events from SGE response queues
41778c2ecf20Sopenharmony_ci * as well as error and other async events as they all use the same MSI vector.
41788c2ecf20Sopenharmony_ci */
41798c2ecf20Sopenharmony_cistatic irqreturn_t t4_intr_msi(int irq, void *cookie)
41808c2ecf20Sopenharmony_ci{
41818c2ecf20Sopenharmony_ci	struct adapter *adap = cookie;
41828c2ecf20Sopenharmony_ci
41838c2ecf20Sopenharmony_ci	if (adap->flags & CXGB4_MASTER_PF)
41848c2ecf20Sopenharmony_ci		t4_slow_intr_handler(adap);
41858c2ecf20Sopenharmony_ci	process_intrq(adap);
41868c2ecf20Sopenharmony_ci	return IRQ_HANDLED;
41878c2ecf20Sopenharmony_ci}
41888c2ecf20Sopenharmony_ci
41898c2ecf20Sopenharmony_ci/*
41908c2ecf20Sopenharmony_ci * Interrupt handler for legacy INTx interrupts.
41918c2ecf20Sopenharmony_ci * Handles data events from SGE response queues as well as error and other
41928c2ecf20Sopenharmony_ci * async events as they all use the same interrupt line.
41938c2ecf20Sopenharmony_ci */
41948c2ecf20Sopenharmony_cistatic irqreturn_t t4_intr_intx(int irq, void *cookie)
41958c2ecf20Sopenharmony_ci{
41968c2ecf20Sopenharmony_ci	struct adapter *adap = cookie;
41978c2ecf20Sopenharmony_ci
41988c2ecf20Sopenharmony_ci	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
41998c2ecf20Sopenharmony_ci	if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) |
42008c2ecf20Sopenharmony_ci	    process_intrq(adap))
42018c2ecf20Sopenharmony_ci		return IRQ_HANDLED;
42028c2ecf20Sopenharmony_ci	return IRQ_NONE;             /* probably shared interrupt */
42038c2ecf20Sopenharmony_ci}
42048c2ecf20Sopenharmony_ci
42058c2ecf20Sopenharmony_ci/**
42068c2ecf20Sopenharmony_ci *	t4_intr_handler - select the top-level interrupt handler
42078c2ecf20Sopenharmony_ci *	@adap: the adapter
42088c2ecf20Sopenharmony_ci *
42098c2ecf20Sopenharmony_ci *	Selects the top-level interrupt handler based on the type of interrupts
42108c2ecf20Sopenharmony_ci *	(MSI-X, MSI, or INTx).
42118c2ecf20Sopenharmony_ci */
42128c2ecf20Sopenharmony_ciirq_handler_t t4_intr_handler(struct adapter *adap)
42138c2ecf20Sopenharmony_ci{
42148c2ecf20Sopenharmony_ci	if (adap->flags & CXGB4_USING_MSIX)
42158c2ecf20Sopenharmony_ci		return t4_sge_intr_msix;
42168c2ecf20Sopenharmony_ci	if (adap->flags & CXGB4_USING_MSI)
42178c2ecf20Sopenharmony_ci		return t4_intr_msi;
42188c2ecf20Sopenharmony_ci	return t4_intr_intx;
42198c2ecf20Sopenharmony_ci}
42208c2ecf20Sopenharmony_ci
42218c2ecf20Sopenharmony_cistatic void sge_rx_timer_cb(struct timer_list *t)
42228c2ecf20Sopenharmony_ci{
42238c2ecf20Sopenharmony_ci	unsigned long m;
42248c2ecf20Sopenharmony_ci	unsigned int i;
42258c2ecf20Sopenharmony_ci	struct adapter *adap = from_timer(adap, t, sge.rx_timer);
42268c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
42278c2ecf20Sopenharmony_ci
42288c2ecf20Sopenharmony_ci	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
42298c2ecf20Sopenharmony_ci		for (m = s->starving_fl[i]; m; m &= m - 1) {
42308c2ecf20Sopenharmony_ci			struct sge_eth_rxq *rxq;
42318c2ecf20Sopenharmony_ci			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
42328c2ecf20Sopenharmony_ci			struct sge_fl *fl = s->egr_map[id];
42338c2ecf20Sopenharmony_ci
42348c2ecf20Sopenharmony_ci			clear_bit(id, s->starving_fl);
42358c2ecf20Sopenharmony_ci			smp_mb__after_atomic();
42368c2ecf20Sopenharmony_ci
42378c2ecf20Sopenharmony_ci			if (fl_starving(adap, fl)) {
42388c2ecf20Sopenharmony_ci				rxq = container_of(fl, struct sge_eth_rxq, fl);
42398c2ecf20Sopenharmony_ci				if (napi_reschedule(&rxq->rspq.napi))
42408c2ecf20Sopenharmony_ci					fl->starving++;
42418c2ecf20Sopenharmony_ci				else
42428c2ecf20Sopenharmony_ci					set_bit(id, s->starving_fl);
42438c2ecf20Sopenharmony_ci			}
42448c2ecf20Sopenharmony_ci		}
42458c2ecf20Sopenharmony_ci	/* The remainder of the SGE RX Timer Callback routine is dedicated to
42468c2ecf20Sopenharmony_ci	 * global Master PF activities like checking for chip ingress stalls,
42478c2ecf20Sopenharmony_ci	 * etc.
42488c2ecf20Sopenharmony_ci	 */
42498c2ecf20Sopenharmony_ci	if (!(adap->flags & CXGB4_MASTER_PF))
42508c2ecf20Sopenharmony_ci		goto done;
42518c2ecf20Sopenharmony_ci
42528c2ecf20Sopenharmony_ci	t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD);
42538c2ecf20Sopenharmony_ci
42548c2ecf20Sopenharmony_cidone:
42558c2ecf20Sopenharmony_ci	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
42568c2ecf20Sopenharmony_ci}
42578c2ecf20Sopenharmony_ci
42588c2ecf20Sopenharmony_cistatic void sge_tx_timer_cb(struct timer_list *t)
42598c2ecf20Sopenharmony_ci{
42608c2ecf20Sopenharmony_ci	struct adapter *adap = from_timer(adap, t, sge.tx_timer);
42618c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
42628c2ecf20Sopenharmony_ci	unsigned long m, period;
42638c2ecf20Sopenharmony_ci	unsigned int i, budget;
42648c2ecf20Sopenharmony_ci
42658c2ecf20Sopenharmony_ci	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
42668c2ecf20Sopenharmony_ci		for (m = s->txq_maperr[i]; m; m &= m - 1) {
42678c2ecf20Sopenharmony_ci			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
42688c2ecf20Sopenharmony_ci			struct sge_uld_txq *txq = s->egr_map[id];
42698c2ecf20Sopenharmony_ci
42708c2ecf20Sopenharmony_ci			clear_bit(id, s->txq_maperr);
42718c2ecf20Sopenharmony_ci			tasklet_schedule(&txq->qresume_tsk);
42728c2ecf20Sopenharmony_ci		}
42738c2ecf20Sopenharmony_ci
42748c2ecf20Sopenharmony_ci	if (!is_t4(adap->params.chip)) {
42758c2ecf20Sopenharmony_ci		struct sge_eth_txq *q = &s->ptptxq;
42768c2ecf20Sopenharmony_ci		int avail;
42778c2ecf20Sopenharmony_ci
42788c2ecf20Sopenharmony_ci		spin_lock(&adap->ptp_lock);
42798c2ecf20Sopenharmony_ci		avail = reclaimable(&q->q);
42808c2ecf20Sopenharmony_ci
42818c2ecf20Sopenharmony_ci		if (avail) {
42828c2ecf20Sopenharmony_ci			free_tx_desc(adap, &q->q, avail, false);
42838c2ecf20Sopenharmony_ci			q->q.in_use -= avail;
42848c2ecf20Sopenharmony_ci		}
42858c2ecf20Sopenharmony_ci		spin_unlock(&adap->ptp_lock);
42868c2ecf20Sopenharmony_ci	}
42878c2ecf20Sopenharmony_ci
42888c2ecf20Sopenharmony_ci	budget = MAX_TIMER_TX_RECLAIM;
42898c2ecf20Sopenharmony_ci	i = s->ethtxq_rover;
42908c2ecf20Sopenharmony_ci	do {
42918c2ecf20Sopenharmony_ci		budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i],
42928c2ecf20Sopenharmony_ci						       budget);
42938c2ecf20Sopenharmony_ci		if (!budget)
42948c2ecf20Sopenharmony_ci			break;
42958c2ecf20Sopenharmony_ci
42968c2ecf20Sopenharmony_ci		if (++i >= s->ethqsets)
42978c2ecf20Sopenharmony_ci			i = 0;
42988c2ecf20Sopenharmony_ci	} while (i != s->ethtxq_rover);
42998c2ecf20Sopenharmony_ci	s->ethtxq_rover = i;
43008c2ecf20Sopenharmony_ci
43018c2ecf20Sopenharmony_ci	if (budget == 0) {
43028c2ecf20Sopenharmony_ci		/* If we found too many reclaimable packets schedule a timer
43038c2ecf20Sopenharmony_ci		 * in the near future to continue where we left off.
43048c2ecf20Sopenharmony_ci		 */
43058c2ecf20Sopenharmony_ci		period = 2;
43068c2ecf20Sopenharmony_ci	} else {
43078c2ecf20Sopenharmony_ci		/* We reclaimed all reclaimable TX Descriptors, so reschedule
43088c2ecf20Sopenharmony_ci		 * at the normal period.
43098c2ecf20Sopenharmony_ci		 */
43108c2ecf20Sopenharmony_ci		period = TX_QCHECK_PERIOD;
43118c2ecf20Sopenharmony_ci	}
43128c2ecf20Sopenharmony_ci
43138c2ecf20Sopenharmony_ci	mod_timer(&s->tx_timer, jiffies + period);
43148c2ecf20Sopenharmony_ci}
43158c2ecf20Sopenharmony_ci
43168c2ecf20Sopenharmony_ci/**
43178c2ecf20Sopenharmony_ci *	bar2_address - return the BAR2 address for an SGE Queue's Registers
43188c2ecf20Sopenharmony_ci *	@adapter: the adapter
43198c2ecf20Sopenharmony_ci *	@qid: the SGE Queue ID
43208c2ecf20Sopenharmony_ci *	@qtype: the SGE Queue Type (Egress or Ingress)
43218c2ecf20Sopenharmony_ci *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
43228c2ecf20Sopenharmony_ci *
43238c2ecf20Sopenharmony_ci *	Returns the BAR2 address for the SGE Queue Registers associated with
43248c2ecf20Sopenharmony_ci *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
43258c2ecf20Sopenharmony_ci *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
43268c2ecf20Sopenharmony_ci *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
43278c2ecf20Sopenharmony_ci *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
43288c2ecf20Sopenharmony_ci */
43298c2ecf20Sopenharmony_cistatic void __iomem *bar2_address(struct adapter *adapter,
43308c2ecf20Sopenharmony_ci				  unsigned int qid,
43318c2ecf20Sopenharmony_ci				  enum t4_bar2_qtype qtype,
43328c2ecf20Sopenharmony_ci				  unsigned int *pbar2_qid)
43338c2ecf20Sopenharmony_ci{
43348c2ecf20Sopenharmony_ci	u64 bar2_qoffset;
43358c2ecf20Sopenharmony_ci	int ret;
43368c2ecf20Sopenharmony_ci
43378c2ecf20Sopenharmony_ci	ret = t4_bar2_sge_qregs(adapter, qid, qtype, 0,
43388c2ecf20Sopenharmony_ci				&bar2_qoffset, pbar2_qid);
43398c2ecf20Sopenharmony_ci	if (ret)
43408c2ecf20Sopenharmony_ci		return NULL;
43418c2ecf20Sopenharmony_ci
43428c2ecf20Sopenharmony_ci	return adapter->bar2 + bar2_qoffset;
43438c2ecf20Sopenharmony_ci}
43448c2ecf20Sopenharmony_ci
43458c2ecf20Sopenharmony_ci/* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
43468c2ecf20Sopenharmony_ci * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
43478c2ecf20Sopenharmony_ci */
43488c2ecf20Sopenharmony_ciint t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
43498c2ecf20Sopenharmony_ci		     struct net_device *dev, int intr_idx,
43508c2ecf20Sopenharmony_ci		     struct sge_fl *fl, rspq_handler_t hnd,
43518c2ecf20Sopenharmony_ci		     rspq_flush_handler_t flush_hnd, int cong)
43528c2ecf20Sopenharmony_ci{
43538c2ecf20Sopenharmony_ci	int ret, flsz = 0;
43548c2ecf20Sopenharmony_ci	struct fw_iq_cmd c;
43558c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
43568c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
43578c2ecf20Sopenharmony_ci	int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING);
43588c2ecf20Sopenharmony_ci
43598c2ecf20Sopenharmony_ci	/* Size needs to be multiple of 16, including status entry. */
43608c2ecf20Sopenharmony_ci	iq->size = roundup(iq->size, 16);
43618c2ecf20Sopenharmony_ci
43628c2ecf20Sopenharmony_ci	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
43638c2ecf20Sopenharmony_ci			      &iq->phys_addr, NULL, 0,
43648c2ecf20Sopenharmony_ci			      dev_to_node(adap->pdev_dev));
43658c2ecf20Sopenharmony_ci	if (!iq->desc)
43668c2ecf20Sopenharmony_ci		return -ENOMEM;
43678c2ecf20Sopenharmony_ci
43688c2ecf20Sopenharmony_ci	memset(&c, 0, sizeof(c));
43698c2ecf20Sopenharmony_ci	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
43708c2ecf20Sopenharmony_ci			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
43718c2ecf20Sopenharmony_ci			    FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
43728c2ecf20Sopenharmony_ci	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
43738c2ecf20Sopenharmony_ci				 FW_LEN16(c));
43748c2ecf20Sopenharmony_ci	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
43758c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
43768c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQANDST_V(intr_idx < 0) |
43778c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQANUD_V(UPDATEDELIVERY_INTERRUPT_X) |
43788c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
43798c2ecf20Sopenharmony_ci							-intr_idx - 1));
43808c2ecf20Sopenharmony_ci	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
43818c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQGTSMODE_F |
43828c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
43838c2ecf20Sopenharmony_ci		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
43848c2ecf20Sopenharmony_ci	c.iqsize = htons(iq->size);
43858c2ecf20Sopenharmony_ci	c.iqaddr = cpu_to_be64(iq->phys_addr);
43868c2ecf20Sopenharmony_ci	if (cong >= 0)
43878c2ecf20Sopenharmony_ci		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F |
43888c2ecf20Sopenharmony_ci				FW_IQ_CMD_IQTYPE_V(cong ? FW_IQ_IQTYPE_NIC
43898c2ecf20Sopenharmony_ci							:  FW_IQ_IQTYPE_OFLD));
43908c2ecf20Sopenharmony_ci
43918c2ecf20Sopenharmony_ci	if (fl) {
43928c2ecf20Sopenharmony_ci		unsigned int chip_ver =
43938c2ecf20Sopenharmony_ci			CHELSIO_CHIP_VERSION(adap->params.chip);
43948c2ecf20Sopenharmony_ci
43958c2ecf20Sopenharmony_ci		/* Allocate the ring for the hardware free list (with space
43968c2ecf20Sopenharmony_ci		 * for its status page) along with the associated software
43978c2ecf20Sopenharmony_ci		 * descriptor ring.  The free list size needs to be a multiple
43988c2ecf20Sopenharmony_ci		 * of the Egress Queue Unit and at least 2 Egress Units larger
43998c2ecf20Sopenharmony_ci		 * than the SGE's Egress Congrestion Threshold
44008c2ecf20Sopenharmony_ci		 * (fl_starve_thres - 1).
44018c2ecf20Sopenharmony_ci		 */
44028c2ecf20Sopenharmony_ci		if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
44038c2ecf20Sopenharmony_ci			fl->size = s->fl_starve_thres - 1 + 2 * 8;
44048c2ecf20Sopenharmony_ci		fl->size = roundup(fl->size, 8);
44058c2ecf20Sopenharmony_ci		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
44068c2ecf20Sopenharmony_ci				      sizeof(struct rx_sw_desc), &fl->addr,
44078c2ecf20Sopenharmony_ci				      &fl->sdesc, s->stat_len,
44088c2ecf20Sopenharmony_ci				      dev_to_node(adap->pdev_dev));
44098c2ecf20Sopenharmony_ci		if (!fl->desc)
44108c2ecf20Sopenharmony_ci			goto fl_nomem;
44118c2ecf20Sopenharmony_ci
44128c2ecf20Sopenharmony_ci		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
44138c2ecf20Sopenharmony_ci		c.iqns_to_fl0congen |= htonl(FW_IQ_CMD_FL0PACKEN_F |
44148c2ecf20Sopenharmony_ci					     FW_IQ_CMD_FL0FETCHRO_V(relaxed) |
44158c2ecf20Sopenharmony_ci					     FW_IQ_CMD_FL0DATARO_V(relaxed) |
44168c2ecf20Sopenharmony_ci					     FW_IQ_CMD_FL0PADEN_F);
44178c2ecf20Sopenharmony_ci		if (cong >= 0)
44188c2ecf20Sopenharmony_ci			c.iqns_to_fl0congen |=
44198c2ecf20Sopenharmony_ci				htonl(FW_IQ_CMD_FL0CNGCHMAP_V(cong) |
44208c2ecf20Sopenharmony_ci				      FW_IQ_CMD_FL0CONGCIF_F |
44218c2ecf20Sopenharmony_ci				      FW_IQ_CMD_FL0CONGEN_F);
44228c2ecf20Sopenharmony_ci		/* In T6, for egress queue type FL there is internal overhead
44238c2ecf20Sopenharmony_ci		 * of 16B for header going into FLM module.  Hence the maximum
44248c2ecf20Sopenharmony_ci		 * allowed burst size is 448 bytes.  For T4/T5, the hardware
44258c2ecf20Sopenharmony_ci		 * doesn't coalesce fetch requests if more than 64 bytes of
44268c2ecf20Sopenharmony_ci		 * Free List pointers are provided, so we use a 128-byte Fetch
44278c2ecf20Sopenharmony_ci		 * Burst Minimum there (T6 implements coalescing so we can use
44288c2ecf20Sopenharmony_ci		 * the smaller 64-byte value there).
44298c2ecf20Sopenharmony_ci		 */
44308c2ecf20Sopenharmony_ci		c.fl0dcaen_to_fl0cidxfthresh =
44318c2ecf20Sopenharmony_ci			htons(FW_IQ_CMD_FL0FBMIN_V(chip_ver <= CHELSIO_T5 ?
44328c2ecf20Sopenharmony_ci						   FETCHBURSTMIN_128B_X :
44338c2ecf20Sopenharmony_ci						   FETCHBURSTMIN_64B_T6_X) |
44348c2ecf20Sopenharmony_ci			      FW_IQ_CMD_FL0FBMAX_V((chip_ver <= CHELSIO_T5) ?
44358c2ecf20Sopenharmony_ci						   FETCHBURSTMAX_512B_X :
44368c2ecf20Sopenharmony_ci						   FETCHBURSTMAX_256B_X));
44378c2ecf20Sopenharmony_ci		c.fl0size = htons(flsz);
44388c2ecf20Sopenharmony_ci		c.fl0addr = cpu_to_be64(fl->addr);
44398c2ecf20Sopenharmony_ci	}
44408c2ecf20Sopenharmony_ci
44418c2ecf20Sopenharmony_ci	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
44428c2ecf20Sopenharmony_ci	if (ret)
44438c2ecf20Sopenharmony_ci		goto err;
44448c2ecf20Sopenharmony_ci
44458c2ecf20Sopenharmony_ci	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
44468c2ecf20Sopenharmony_ci	iq->cur_desc = iq->desc;
44478c2ecf20Sopenharmony_ci	iq->cidx = 0;
44488c2ecf20Sopenharmony_ci	iq->gen = 1;
44498c2ecf20Sopenharmony_ci	iq->next_intr_params = iq->intr_params;
44508c2ecf20Sopenharmony_ci	iq->cntxt_id = ntohs(c.iqid);
44518c2ecf20Sopenharmony_ci	iq->abs_id = ntohs(c.physiqid);
44528c2ecf20Sopenharmony_ci	iq->bar2_addr = bar2_address(adap,
44538c2ecf20Sopenharmony_ci				     iq->cntxt_id,
44548c2ecf20Sopenharmony_ci				     T4_BAR2_QTYPE_INGRESS,
44558c2ecf20Sopenharmony_ci				     &iq->bar2_qid);
44568c2ecf20Sopenharmony_ci	iq->size--;                           /* subtract status entry */
44578c2ecf20Sopenharmony_ci	iq->netdev = dev;
44588c2ecf20Sopenharmony_ci	iq->handler = hnd;
44598c2ecf20Sopenharmony_ci	iq->flush_handler = flush_hnd;
44608c2ecf20Sopenharmony_ci
44618c2ecf20Sopenharmony_ci	memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr));
44628c2ecf20Sopenharmony_ci	skb_queue_head_init(&iq->lro_mgr.lroq);
44638c2ecf20Sopenharmony_ci
44648c2ecf20Sopenharmony_ci	/* set offset to -1 to distinguish ingress queues without FL */
44658c2ecf20Sopenharmony_ci	iq->offset = fl ? 0 : -1;
44668c2ecf20Sopenharmony_ci
44678c2ecf20Sopenharmony_ci	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
44688c2ecf20Sopenharmony_ci
44698c2ecf20Sopenharmony_ci	if (fl) {
44708c2ecf20Sopenharmony_ci		fl->cntxt_id = ntohs(c.fl0id);
44718c2ecf20Sopenharmony_ci		fl->avail = fl->pend_cred = 0;
44728c2ecf20Sopenharmony_ci		fl->pidx = fl->cidx = 0;
44738c2ecf20Sopenharmony_ci		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
44748c2ecf20Sopenharmony_ci		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
44758c2ecf20Sopenharmony_ci
44768c2ecf20Sopenharmony_ci		/* Note, we must initialize the BAR2 Free List User Doorbell
44778c2ecf20Sopenharmony_ci		 * information before refilling the Free List!
44788c2ecf20Sopenharmony_ci		 */
44798c2ecf20Sopenharmony_ci		fl->bar2_addr = bar2_address(adap,
44808c2ecf20Sopenharmony_ci					     fl->cntxt_id,
44818c2ecf20Sopenharmony_ci					     T4_BAR2_QTYPE_EGRESS,
44828c2ecf20Sopenharmony_ci					     &fl->bar2_qid);
44838c2ecf20Sopenharmony_ci		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
44848c2ecf20Sopenharmony_ci	}
44858c2ecf20Sopenharmony_ci
44868c2ecf20Sopenharmony_ci	/* For T5 and later we attempt to set up the Congestion Manager values
44878c2ecf20Sopenharmony_ci	 * of the new RX Ethernet Queue.  This should really be handled by
44888c2ecf20Sopenharmony_ci	 * firmware because it's more complex than any host driver wants to
44898c2ecf20Sopenharmony_ci	 * get involved with and it's different per chip and this is almost
44908c2ecf20Sopenharmony_ci	 * certainly wrong.  Firmware would be wrong as well, but it would be
44918c2ecf20Sopenharmony_ci	 * a lot easier to fix in one place ...  For now we do something very
44928c2ecf20Sopenharmony_ci	 * simple (and hopefully less wrong).
44938c2ecf20Sopenharmony_ci	 */
44948c2ecf20Sopenharmony_ci	if (!is_t4(adap->params.chip) && cong >= 0) {
44958c2ecf20Sopenharmony_ci		u32 param, val, ch_map = 0;
44968c2ecf20Sopenharmony_ci		int i;
44978c2ecf20Sopenharmony_ci		u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log;
44988c2ecf20Sopenharmony_ci
44998c2ecf20Sopenharmony_ci		param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
45008c2ecf20Sopenharmony_ci			 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
45018c2ecf20Sopenharmony_ci			 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id));
45028c2ecf20Sopenharmony_ci		if (cong == 0) {
45038c2ecf20Sopenharmony_ci			val = CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_QUEUE_X);
45048c2ecf20Sopenharmony_ci		} else {
45058c2ecf20Sopenharmony_ci			val =
45068c2ecf20Sopenharmony_ci			    CONMCTXT_CNGTPMODE_V(CONMCTXT_CNGTPMODE_CHANNEL_X);
45078c2ecf20Sopenharmony_ci			for (i = 0; i < 4; i++) {
45088c2ecf20Sopenharmony_ci				if (cong & (1 << i))
45098c2ecf20Sopenharmony_ci					ch_map |= 1 << (i << cng_ch_bits_log);
45108c2ecf20Sopenharmony_ci			}
45118c2ecf20Sopenharmony_ci			val |= CONMCTXT_CNGCHMAP_V(ch_map);
45128c2ecf20Sopenharmony_ci		}
45138c2ecf20Sopenharmony_ci		ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
45148c2ecf20Sopenharmony_ci				    &param, &val);
45158c2ecf20Sopenharmony_ci		if (ret)
45168c2ecf20Sopenharmony_ci			dev_warn(adap->pdev_dev, "Failed to set Congestion"
45178c2ecf20Sopenharmony_ci				 " Manager Context for Ingress Queue %d: %d\n",
45188c2ecf20Sopenharmony_ci				 iq->cntxt_id, -ret);
45198c2ecf20Sopenharmony_ci	}
45208c2ecf20Sopenharmony_ci
45218c2ecf20Sopenharmony_ci	return 0;
45228c2ecf20Sopenharmony_ci
45238c2ecf20Sopenharmony_cifl_nomem:
45248c2ecf20Sopenharmony_ci	ret = -ENOMEM;
45258c2ecf20Sopenharmony_cierr:
45268c2ecf20Sopenharmony_ci	if (iq->desc) {
45278c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
45288c2ecf20Sopenharmony_ci				  iq->desc, iq->phys_addr);
45298c2ecf20Sopenharmony_ci		iq->desc = NULL;
45308c2ecf20Sopenharmony_ci	}
45318c2ecf20Sopenharmony_ci	if (fl && fl->desc) {
45328c2ecf20Sopenharmony_ci		kfree(fl->sdesc);
45338c2ecf20Sopenharmony_ci		fl->sdesc = NULL;
45348c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
45358c2ecf20Sopenharmony_ci				  fl->desc, fl->addr);
45368c2ecf20Sopenharmony_ci		fl->desc = NULL;
45378c2ecf20Sopenharmony_ci	}
45388c2ecf20Sopenharmony_ci	return ret;
45398c2ecf20Sopenharmony_ci}
45408c2ecf20Sopenharmony_ci
45418c2ecf20Sopenharmony_cistatic void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
45428c2ecf20Sopenharmony_ci{
45438c2ecf20Sopenharmony_ci	q->cntxt_id = id;
45448c2ecf20Sopenharmony_ci	q->bar2_addr = bar2_address(adap,
45458c2ecf20Sopenharmony_ci				    q->cntxt_id,
45468c2ecf20Sopenharmony_ci				    T4_BAR2_QTYPE_EGRESS,
45478c2ecf20Sopenharmony_ci				    &q->bar2_qid);
45488c2ecf20Sopenharmony_ci	q->in_use = 0;
45498c2ecf20Sopenharmony_ci	q->cidx = q->pidx = 0;
45508c2ecf20Sopenharmony_ci	q->stops = q->restarts = 0;
45518c2ecf20Sopenharmony_ci	q->stat = (void *)&q->desc[q->size];
45528c2ecf20Sopenharmony_ci	spin_lock_init(&q->db_lock);
45538c2ecf20Sopenharmony_ci	adap->sge.egr_map[id - adap->sge.egr_start] = q;
45548c2ecf20Sopenharmony_ci}
45558c2ecf20Sopenharmony_ci
45568c2ecf20Sopenharmony_ci/**
45578c2ecf20Sopenharmony_ci *	t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
45588c2ecf20Sopenharmony_ci *	@adap: the adapter
45598c2ecf20Sopenharmony_ci *	@txq: the SGE Ethernet TX Queue to initialize
45608c2ecf20Sopenharmony_ci *	@dev: the Linux Network Device
45618c2ecf20Sopenharmony_ci *	@netdevq: the corresponding Linux TX Queue
45628c2ecf20Sopenharmony_ci *	@iqid: the Ingress Queue to which to deliver CIDX Update messages
45638c2ecf20Sopenharmony_ci *	@dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
45648c2ecf20Sopenharmony_ci */
45658c2ecf20Sopenharmony_ciint t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
45668c2ecf20Sopenharmony_ci			 struct net_device *dev, struct netdev_queue *netdevq,
45678c2ecf20Sopenharmony_ci			 unsigned int iqid, u8 dbqt)
45688c2ecf20Sopenharmony_ci{
45698c2ecf20Sopenharmony_ci	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
45708c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
45718c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
45728c2ecf20Sopenharmony_ci	struct fw_eq_eth_cmd c;
45738c2ecf20Sopenharmony_ci	int ret, nentries;
45748c2ecf20Sopenharmony_ci
45758c2ecf20Sopenharmony_ci	/* Add status entries */
45768c2ecf20Sopenharmony_ci	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
45778c2ecf20Sopenharmony_ci
45788c2ecf20Sopenharmony_ci	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
45798c2ecf20Sopenharmony_ci			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
45808c2ecf20Sopenharmony_ci			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
45818c2ecf20Sopenharmony_ci			netdev_queue_numa_node_read(netdevq));
45828c2ecf20Sopenharmony_ci	if (!txq->q.desc)
45838c2ecf20Sopenharmony_ci		return -ENOMEM;
45848c2ecf20Sopenharmony_ci
45858c2ecf20Sopenharmony_ci	memset(&c, 0, sizeof(c));
45868c2ecf20Sopenharmony_ci	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
45878c2ecf20Sopenharmony_ci			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
45888c2ecf20Sopenharmony_ci			    FW_EQ_ETH_CMD_PFN_V(adap->pf) |
45898c2ecf20Sopenharmony_ci			    FW_EQ_ETH_CMD_VFN_V(0));
45908c2ecf20Sopenharmony_ci	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
45918c2ecf20Sopenharmony_ci				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
45928c2ecf20Sopenharmony_ci
45938c2ecf20Sopenharmony_ci	/* For TX Ethernet Queues using the SGE Doorbell Queue Timer
45948c2ecf20Sopenharmony_ci	 * mechanism, we use Ingress Queue messages for Hardware Consumer
45958c2ecf20Sopenharmony_ci	 * Index Updates on the TX Queue.  Otherwise we have the Hardware
45968c2ecf20Sopenharmony_ci	 * write the CIDX Updates into the Status Page at the end of the
45978c2ecf20Sopenharmony_ci	 * TX Queue.
45988c2ecf20Sopenharmony_ci	 */
45998c2ecf20Sopenharmony_ci	c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
46008c2ecf20Sopenharmony_ci				     FW_EQ_ETH_CMD_VIID_V(pi->viid));
46018c2ecf20Sopenharmony_ci
46028c2ecf20Sopenharmony_ci	c.fetchszm_to_iqid =
46038c2ecf20Sopenharmony_ci		htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
46048c2ecf20Sopenharmony_ci		      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
46058c2ecf20Sopenharmony_ci		      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
46068c2ecf20Sopenharmony_ci
46078c2ecf20Sopenharmony_ci	/* Note that the CIDX Flush Threshold should match MAX_TX_RECLAIM. */
46088c2ecf20Sopenharmony_ci	c.dcaen_to_eqsize =
46098c2ecf20Sopenharmony_ci		htonl(FW_EQ_ETH_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
46108c2ecf20Sopenharmony_ci					    ? FETCHBURSTMIN_64B_X
46118c2ecf20Sopenharmony_ci					    : FETCHBURSTMIN_64B_T6_X) |
46128c2ecf20Sopenharmony_ci		      FW_EQ_ETH_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
46138c2ecf20Sopenharmony_ci		      FW_EQ_ETH_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
46148c2ecf20Sopenharmony_ci		      FW_EQ_ETH_CMD_EQSIZE_V(nentries));
46158c2ecf20Sopenharmony_ci
46168c2ecf20Sopenharmony_ci	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
46178c2ecf20Sopenharmony_ci
46188c2ecf20Sopenharmony_ci	/* If we're using the SGE Doorbell Queue Timer mechanism, pass in the
46198c2ecf20Sopenharmony_ci	 * currently configured Timer Index.  THis can be changed later via an
46208c2ecf20Sopenharmony_ci	 * ethtool -C tx-usecs {Timer Val} command.  Note that the SGE
46218c2ecf20Sopenharmony_ci	 * Doorbell Queue mode is currently automatically enabled in the
46228c2ecf20Sopenharmony_ci	 * Firmware by setting either AUTOEQUEQE or AUTOEQUIQE ...
46238c2ecf20Sopenharmony_ci	 */
46248c2ecf20Sopenharmony_ci	if (dbqt)
46258c2ecf20Sopenharmony_ci		c.timeren_timerix =
46268c2ecf20Sopenharmony_ci			cpu_to_be32(FW_EQ_ETH_CMD_TIMEREN_F |
46278c2ecf20Sopenharmony_ci				    FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix));
46288c2ecf20Sopenharmony_ci
46298c2ecf20Sopenharmony_ci	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
46308c2ecf20Sopenharmony_ci	if (ret) {
46318c2ecf20Sopenharmony_ci		kfree(txq->q.sdesc);
46328c2ecf20Sopenharmony_ci		txq->q.sdesc = NULL;
46338c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev,
46348c2ecf20Sopenharmony_ci				  nentries * sizeof(struct tx_desc),
46358c2ecf20Sopenharmony_ci				  txq->q.desc, txq->q.phys_addr);
46368c2ecf20Sopenharmony_ci		txq->q.desc = NULL;
46378c2ecf20Sopenharmony_ci		return ret;
46388c2ecf20Sopenharmony_ci	}
46398c2ecf20Sopenharmony_ci
46408c2ecf20Sopenharmony_ci	txq->q.q_type = CXGB4_TXQ_ETH;
46418c2ecf20Sopenharmony_ci	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
46428c2ecf20Sopenharmony_ci	txq->txq = netdevq;
46438c2ecf20Sopenharmony_ci	txq->tso = 0;
46448c2ecf20Sopenharmony_ci	txq->uso = 0;
46458c2ecf20Sopenharmony_ci	txq->tx_cso = 0;
46468c2ecf20Sopenharmony_ci	txq->vlan_ins = 0;
46478c2ecf20Sopenharmony_ci	txq->mapping_err = 0;
46488c2ecf20Sopenharmony_ci	txq->dbqt = dbqt;
46498c2ecf20Sopenharmony_ci
46508c2ecf20Sopenharmony_ci	return 0;
46518c2ecf20Sopenharmony_ci}
46528c2ecf20Sopenharmony_ci
46538c2ecf20Sopenharmony_ciint t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
46548c2ecf20Sopenharmony_ci			  struct net_device *dev, unsigned int iqid,
46558c2ecf20Sopenharmony_ci			  unsigned int cmplqid)
46568c2ecf20Sopenharmony_ci{
46578c2ecf20Sopenharmony_ci	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
46588c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
46598c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
46608c2ecf20Sopenharmony_ci	struct fw_eq_ctrl_cmd c;
46618c2ecf20Sopenharmony_ci	int ret, nentries;
46628c2ecf20Sopenharmony_ci
46638c2ecf20Sopenharmony_ci	/* Add status entries */
46648c2ecf20Sopenharmony_ci	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
46658c2ecf20Sopenharmony_ci
46668c2ecf20Sopenharmony_ci	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
46678c2ecf20Sopenharmony_ci				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
46688c2ecf20Sopenharmony_ci				 NULL, 0, dev_to_node(adap->pdev_dev));
46698c2ecf20Sopenharmony_ci	if (!txq->q.desc)
46708c2ecf20Sopenharmony_ci		return -ENOMEM;
46718c2ecf20Sopenharmony_ci
46728c2ecf20Sopenharmony_ci	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
46738c2ecf20Sopenharmony_ci			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
46748c2ecf20Sopenharmony_ci			    FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
46758c2ecf20Sopenharmony_ci			    FW_EQ_CTRL_CMD_VFN_V(0));
46768c2ecf20Sopenharmony_ci	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
46778c2ecf20Sopenharmony_ci				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
46788c2ecf20Sopenharmony_ci	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
46798c2ecf20Sopenharmony_ci	c.physeqid_pkd = htonl(0);
46808c2ecf20Sopenharmony_ci	c.fetchszm_to_iqid =
46818c2ecf20Sopenharmony_ci		htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
46828c2ecf20Sopenharmony_ci		      FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
46838c2ecf20Sopenharmony_ci		      FW_EQ_CTRL_CMD_FETCHRO_F | FW_EQ_CTRL_CMD_IQID_V(iqid));
46848c2ecf20Sopenharmony_ci	c.dcaen_to_eqsize =
46858c2ecf20Sopenharmony_ci		htonl(FW_EQ_CTRL_CMD_FBMIN_V(chip_ver <= CHELSIO_T5
46868c2ecf20Sopenharmony_ci					     ? FETCHBURSTMIN_64B_X
46878c2ecf20Sopenharmony_ci					     : FETCHBURSTMIN_64B_T6_X) |
46888c2ecf20Sopenharmony_ci		      FW_EQ_CTRL_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
46898c2ecf20Sopenharmony_ci		      FW_EQ_CTRL_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
46908c2ecf20Sopenharmony_ci		      FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
46918c2ecf20Sopenharmony_ci	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
46928c2ecf20Sopenharmony_ci
46938c2ecf20Sopenharmony_ci	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
46948c2ecf20Sopenharmony_ci	if (ret) {
46958c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev,
46968c2ecf20Sopenharmony_ci				  nentries * sizeof(struct tx_desc),
46978c2ecf20Sopenharmony_ci				  txq->q.desc, txq->q.phys_addr);
46988c2ecf20Sopenharmony_ci		txq->q.desc = NULL;
46998c2ecf20Sopenharmony_ci		return ret;
47008c2ecf20Sopenharmony_ci	}
47018c2ecf20Sopenharmony_ci
47028c2ecf20Sopenharmony_ci	txq->q.q_type = CXGB4_TXQ_CTRL;
47038c2ecf20Sopenharmony_ci	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
47048c2ecf20Sopenharmony_ci	txq->adap = adap;
47058c2ecf20Sopenharmony_ci	skb_queue_head_init(&txq->sendq);
47068c2ecf20Sopenharmony_ci	tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
47078c2ecf20Sopenharmony_ci	txq->full = 0;
47088c2ecf20Sopenharmony_ci	return 0;
47098c2ecf20Sopenharmony_ci}
47108c2ecf20Sopenharmony_ci
47118c2ecf20Sopenharmony_ciint t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
47128c2ecf20Sopenharmony_ci			unsigned int cmplqid)
47138c2ecf20Sopenharmony_ci{
47148c2ecf20Sopenharmony_ci	u32 param, val;
47158c2ecf20Sopenharmony_ci
47168c2ecf20Sopenharmony_ci	param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
47178c2ecf20Sopenharmony_ci		 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL) |
47188c2ecf20Sopenharmony_ci		 FW_PARAMS_PARAM_YZ_V(eqid));
47198c2ecf20Sopenharmony_ci	val = cmplqid;
47208c2ecf20Sopenharmony_ci	return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val);
47218c2ecf20Sopenharmony_ci}
47228c2ecf20Sopenharmony_ci
47238c2ecf20Sopenharmony_cistatic int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_txq *q,
47248c2ecf20Sopenharmony_ci				 struct net_device *dev, u32 cmd, u32 iqid)
47258c2ecf20Sopenharmony_ci{
47268c2ecf20Sopenharmony_ci	unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
47278c2ecf20Sopenharmony_ci	struct port_info *pi = netdev_priv(dev);
47288c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
47298c2ecf20Sopenharmony_ci	struct fw_eq_ofld_cmd c;
47308c2ecf20Sopenharmony_ci	u32 fb_min, nentries;
47318c2ecf20Sopenharmony_ci	int ret;
47328c2ecf20Sopenharmony_ci
47338c2ecf20Sopenharmony_ci	/* Add status entries */
47348c2ecf20Sopenharmony_ci	nentries = q->size + s->stat_len / sizeof(struct tx_desc);
47358c2ecf20Sopenharmony_ci	q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc),
47368c2ecf20Sopenharmony_ci			     sizeof(struct tx_sw_desc), &q->phys_addr,
47378c2ecf20Sopenharmony_ci			     &q->sdesc, s->stat_len, NUMA_NO_NODE);
47388c2ecf20Sopenharmony_ci	if (!q->desc)
47398c2ecf20Sopenharmony_ci		return -ENOMEM;
47408c2ecf20Sopenharmony_ci
47418c2ecf20Sopenharmony_ci	if (chip_ver <= CHELSIO_T5)
47428c2ecf20Sopenharmony_ci		fb_min = FETCHBURSTMIN_64B_X;
47438c2ecf20Sopenharmony_ci	else
47448c2ecf20Sopenharmony_ci		fb_min = FETCHBURSTMIN_64B_T6_X;
47458c2ecf20Sopenharmony_ci
47468c2ecf20Sopenharmony_ci	memset(&c, 0, sizeof(c));
47478c2ecf20Sopenharmony_ci	c.op_to_vfn = htonl(FW_CMD_OP_V(cmd) | FW_CMD_REQUEST_F |
47488c2ecf20Sopenharmony_ci			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
47498c2ecf20Sopenharmony_ci			    FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
47508c2ecf20Sopenharmony_ci			    FW_EQ_OFLD_CMD_VFN_V(0));
47518c2ecf20Sopenharmony_ci	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
47528c2ecf20Sopenharmony_ci				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
47538c2ecf20Sopenharmony_ci	c.fetchszm_to_iqid =
47548c2ecf20Sopenharmony_ci		htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
47558c2ecf20Sopenharmony_ci		      FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
47568c2ecf20Sopenharmony_ci		      FW_EQ_OFLD_CMD_FETCHRO_F | FW_EQ_OFLD_CMD_IQID_V(iqid));
47578c2ecf20Sopenharmony_ci	c.dcaen_to_eqsize =
47588c2ecf20Sopenharmony_ci		htonl(FW_EQ_OFLD_CMD_FBMIN_V(fb_min) |
47598c2ecf20Sopenharmony_ci		      FW_EQ_OFLD_CMD_FBMAX_V(FETCHBURSTMAX_512B_X) |
47608c2ecf20Sopenharmony_ci		      FW_EQ_OFLD_CMD_CIDXFTHRESH_V(CIDXFLUSHTHRESH_32_X) |
47618c2ecf20Sopenharmony_ci		      FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
47628c2ecf20Sopenharmony_ci	c.eqaddr = cpu_to_be64(q->phys_addr);
47638c2ecf20Sopenharmony_ci
47648c2ecf20Sopenharmony_ci	ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
47658c2ecf20Sopenharmony_ci	if (ret) {
47668c2ecf20Sopenharmony_ci		kfree(q->sdesc);
47678c2ecf20Sopenharmony_ci		q->sdesc = NULL;
47688c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev,
47698c2ecf20Sopenharmony_ci				  nentries * sizeof(struct tx_desc),
47708c2ecf20Sopenharmony_ci				  q->desc, q->phys_addr);
47718c2ecf20Sopenharmony_ci		q->desc = NULL;
47728c2ecf20Sopenharmony_ci		return ret;
47738c2ecf20Sopenharmony_ci	}
47748c2ecf20Sopenharmony_ci
47758c2ecf20Sopenharmony_ci	init_txq(adap, q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
47768c2ecf20Sopenharmony_ci	return 0;
47778c2ecf20Sopenharmony_ci}
47788c2ecf20Sopenharmony_ci
47798c2ecf20Sopenharmony_ciint t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
47808c2ecf20Sopenharmony_ci			 struct net_device *dev, unsigned int iqid,
47818c2ecf20Sopenharmony_ci			 unsigned int uld_type)
47828c2ecf20Sopenharmony_ci{
47838c2ecf20Sopenharmony_ci	u32 cmd = FW_EQ_OFLD_CMD;
47848c2ecf20Sopenharmony_ci	int ret;
47858c2ecf20Sopenharmony_ci
47868c2ecf20Sopenharmony_ci	if (unlikely(uld_type == CXGB4_TX_CRYPTO))
47878c2ecf20Sopenharmony_ci		cmd = FW_EQ_CTRL_CMD;
47888c2ecf20Sopenharmony_ci
47898c2ecf20Sopenharmony_ci	ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid);
47908c2ecf20Sopenharmony_ci	if (ret)
47918c2ecf20Sopenharmony_ci		return ret;
47928c2ecf20Sopenharmony_ci
47938c2ecf20Sopenharmony_ci	txq->q.q_type = CXGB4_TXQ_ULD;
47948c2ecf20Sopenharmony_ci	txq->adap = adap;
47958c2ecf20Sopenharmony_ci	skb_queue_head_init(&txq->sendq);
47968c2ecf20Sopenharmony_ci	tasklet_setup(&txq->qresume_tsk, restart_ofldq);
47978c2ecf20Sopenharmony_ci	txq->full = 0;
47988c2ecf20Sopenharmony_ci	txq->mapping_err = 0;
47998c2ecf20Sopenharmony_ci	return 0;
48008c2ecf20Sopenharmony_ci}
48018c2ecf20Sopenharmony_ci
48028c2ecf20Sopenharmony_ciint t4_sge_alloc_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq,
48038c2ecf20Sopenharmony_ci			     struct net_device *dev, u32 iqid)
48048c2ecf20Sopenharmony_ci{
48058c2ecf20Sopenharmony_ci	int ret;
48068c2ecf20Sopenharmony_ci
48078c2ecf20Sopenharmony_ci	ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid);
48088c2ecf20Sopenharmony_ci	if (ret)
48098c2ecf20Sopenharmony_ci		return ret;
48108c2ecf20Sopenharmony_ci
48118c2ecf20Sopenharmony_ci	txq->q.q_type = CXGB4_TXQ_ULD;
48128c2ecf20Sopenharmony_ci	spin_lock_init(&txq->lock);
48138c2ecf20Sopenharmony_ci	txq->adap = adap;
48148c2ecf20Sopenharmony_ci	txq->tso = 0;
48158c2ecf20Sopenharmony_ci	txq->uso = 0;
48168c2ecf20Sopenharmony_ci	txq->tx_cso = 0;
48178c2ecf20Sopenharmony_ci	txq->vlan_ins = 0;
48188c2ecf20Sopenharmony_ci	txq->mapping_err = 0;
48198c2ecf20Sopenharmony_ci	return 0;
48208c2ecf20Sopenharmony_ci}
48218c2ecf20Sopenharmony_ci
48228c2ecf20Sopenharmony_civoid free_txq(struct adapter *adap, struct sge_txq *q)
48238c2ecf20Sopenharmony_ci{
48248c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
48258c2ecf20Sopenharmony_ci
48268c2ecf20Sopenharmony_ci	dma_free_coherent(adap->pdev_dev,
48278c2ecf20Sopenharmony_ci			  q->size * sizeof(struct tx_desc) + s->stat_len,
48288c2ecf20Sopenharmony_ci			  q->desc, q->phys_addr);
48298c2ecf20Sopenharmony_ci	q->cntxt_id = 0;
48308c2ecf20Sopenharmony_ci	q->sdesc = NULL;
48318c2ecf20Sopenharmony_ci	q->desc = NULL;
48328c2ecf20Sopenharmony_ci}
48338c2ecf20Sopenharmony_ci
48348c2ecf20Sopenharmony_civoid free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
48358c2ecf20Sopenharmony_ci		  struct sge_fl *fl)
48368c2ecf20Sopenharmony_ci{
48378c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
48388c2ecf20Sopenharmony_ci	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
48398c2ecf20Sopenharmony_ci
48408c2ecf20Sopenharmony_ci	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
48418c2ecf20Sopenharmony_ci	t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
48428c2ecf20Sopenharmony_ci		   rq->cntxt_id, fl_id, 0xffff);
48438c2ecf20Sopenharmony_ci	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
48448c2ecf20Sopenharmony_ci			  rq->desc, rq->phys_addr);
48458c2ecf20Sopenharmony_ci	netif_napi_del(&rq->napi);
48468c2ecf20Sopenharmony_ci	rq->netdev = NULL;
48478c2ecf20Sopenharmony_ci	rq->cntxt_id = rq->abs_id = 0;
48488c2ecf20Sopenharmony_ci	rq->desc = NULL;
48498c2ecf20Sopenharmony_ci
48508c2ecf20Sopenharmony_ci	if (fl) {
48518c2ecf20Sopenharmony_ci		free_rx_bufs(adap, fl, fl->avail);
48528c2ecf20Sopenharmony_ci		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
48538c2ecf20Sopenharmony_ci				  fl->desc, fl->addr);
48548c2ecf20Sopenharmony_ci		kfree(fl->sdesc);
48558c2ecf20Sopenharmony_ci		fl->sdesc = NULL;
48568c2ecf20Sopenharmony_ci		fl->cntxt_id = 0;
48578c2ecf20Sopenharmony_ci		fl->desc = NULL;
48588c2ecf20Sopenharmony_ci	}
48598c2ecf20Sopenharmony_ci}
48608c2ecf20Sopenharmony_ci
48618c2ecf20Sopenharmony_ci/**
48628c2ecf20Sopenharmony_ci *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
48638c2ecf20Sopenharmony_ci *      @adap: the adapter
48648c2ecf20Sopenharmony_ci *      @n: number of queues
48658c2ecf20Sopenharmony_ci *      @q: pointer to first queue
48668c2ecf20Sopenharmony_ci *
48678c2ecf20Sopenharmony_ci *      Release the resources of a consecutive block of offload Rx queues.
48688c2ecf20Sopenharmony_ci */
48698c2ecf20Sopenharmony_civoid t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
48708c2ecf20Sopenharmony_ci{
48718c2ecf20Sopenharmony_ci	for ( ; n; n--, q++)
48728c2ecf20Sopenharmony_ci		if (q->rspq.desc)
48738c2ecf20Sopenharmony_ci			free_rspq_fl(adap, &q->rspq,
48748c2ecf20Sopenharmony_ci				     q->fl.size ? &q->fl : NULL);
48758c2ecf20Sopenharmony_ci}
48768c2ecf20Sopenharmony_ci
48778c2ecf20Sopenharmony_civoid t4_sge_free_ethofld_txq(struct adapter *adap, struct sge_eohw_txq *txq)
48788c2ecf20Sopenharmony_ci{
48798c2ecf20Sopenharmony_ci	if (txq->q.desc) {
48808c2ecf20Sopenharmony_ci		t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
48818c2ecf20Sopenharmony_ci				txq->q.cntxt_id);
48828c2ecf20Sopenharmony_ci		free_tx_desc(adap, &txq->q, txq->q.in_use, false);
48838c2ecf20Sopenharmony_ci		kfree(txq->q.sdesc);
48848c2ecf20Sopenharmony_ci		free_txq(adap, &txq->q);
48858c2ecf20Sopenharmony_ci	}
48868c2ecf20Sopenharmony_ci}
48878c2ecf20Sopenharmony_ci
48888c2ecf20Sopenharmony_ci/**
48898c2ecf20Sopenharmony_ci *	t4_free_sge_resources - free SGE resources
48908c2ecf20Sopenharmony_ci *	@adap: the adapter
48918c2ecf20Sopenharmony_ci *
48928c2ecf20Sopenharmony_ci *	Frees resources used by the SGE queue sets.
48938c2ecf20Sopenharmony_ci */
48948c2ecf20Sopenharmony_civoid t4_free_sge_resources(struct adapter *adap)
48958c2ecf20Sopenharmony_ci{
48968c2ecf20Sopenharmony_ci	int i;
48978c2ecf20Sopenharmony_ci	struct sge_eth_rxq *eq;
48988c2ecf20Sopenharmony_ci	struct sge_eth_txq *etq;
48998c2ecf20Sopenharmony_ci
49008c2ecf20Sopenharmony_ci	/* stop all Rx queues in order to start them draining */
49018c2ecf20Sopenharmony_ci	for (i = 0; i < adap->sge.ethqsets; i++) {
49028c2ecf20Sopenharmony_ci		eq = &adap->sge.ethrxq[i];
49038c2ecf20Sopenharmony_ci		if (eq->rspq.desc)
49048c2ecf20Sopenharmony_ci			t4_iq_stop(adap, adap->mbox, adap->pf, 0,
49058c2ecf20Sopenharmony_ci				   FW_IQ_TYPE_FL_INT_CAP,
49068c2ecf20Sopenharmony_ci				   eq->rspq.cntxt_id,
49078c2ecf20Sopenharmony_ci				   eq->fl.size ? eq->fl.cntxt_id : 0xffff,
49088c2ecf20Sopenharmony_ci				   0xffff);
49098c2ecf20Sopenharmony_ci	}
49108c2ecf20Sopenharmony_ci
49118c2ecf20Sopenharmony_ci	/* clean up Ethernet Tx/Rx queues */
49128c2ecf20Sopenharmony_ci	for (i = 0; i < adap->sge.ethqsets; i++) {
49138c2ecf20Sopenharmony_ci		eq = &adap->sge.ethrxq[i];
49148c2ecf20Sopenharmony_ci		if (eq->rspq.desc)
49158c2ecf20Sopenharmony_ci			free_rspq_fl(adap, &eq->rspq,
49168c2ecf20Sopenharmony_ci				     eq->fl.size ? &eq->fl : NULL);
49178c2ecf20Sopenharmony_ci		if (eq->msix) {
49188c2ecf20Sopenharmony_ci			cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx);
49198c2ecf20Sopenharmony_ci			eq->msix = NULL;
49208c2ecf20Sopenharmony_ci		}
49218c2ecf20Sopenharmony_ci
49228c2ecf20Sopenharmony_ci		etq = &adap->sge.ethtxq[i];
49238c2ecf20Sopenharmony_ci		if (etq->q.desc) {
49248c2ecf20Sopenharmony_ci			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
49258c2ecf20Sopenharmony_ci				       etq->q.cntxt_id);
49268c2ecf20Sopenharmony_ci			__netif_tx_lock_bh(etq->txq);
49278c2ecf20Sopenharmony_ci			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
49288c2ecf20Sopenharmony_ci			__netif_tx_unlock_bh(etq->txq);
49298c2ecf20Sopenharmony_ci			kfree(etq->q.sdesc);
49308c2ecf20Sopenharmony_ci			free_txq(adap, &etq->q);
49318c2ecf20Sopenharmony_ci		}
49328c2ecf20Sopenharmony_ci	}
49338c2ecf20Sopenharmony_ci
49348c2ecf20Sopenharmony_ci	/* clean up control Tx queues */
49358c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
49368c2ecf20Sopenharmony_ci		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
49378c2ecf20Sopenharmony_ci
49388c2ecf20Sopenharmony_ci		if (cq->q.desc) {
49398c2ecf20Sopenharmony_ci			tasklet_kill(&cq->qresume_tsk);
49408c2ecf20Sopenharmony_ci			t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
49418c2ecf20Sopenharmony_ci					cq->q.cntxt_id);
49428c2ecf20Sopenharmony_ci			__skb_queue_purge(&cq->sendq);
49438c2ecf20Sopenharmony_ci			free_txq(adap, &cq->q);
49448c2ecf20Sopenharmony_ci		}
49458c2ecf20Sopenharmony_ci	}
49468c2ecf20Sopenharmony_ci
49478c2ecf20Sopenharmony_ci	if (adap->sge.fw_evtq.desc) {
49488c2ecf20Sopenharmony_ci		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
49498c2ecf20Sopenharmony_ci		if (adap->sge.fwevtq_msix_idx >= 0)
49508c2ecf20Sopenharmony_ci			cxgb4_free_msix_idx_in_bmap(adap,
49518c2ecf20Sopenharmony_ci						    adap->sge.fwevtq_msix_idx);
49528c2ecf20Sopenharmony_ci	}
49538c2ecf20Sopenharmony_ci
49548c2ecf20Sopenharmony_ci	if (adap->sge.nd_msix_idx >= 0)
49558c2ecf20Sopenharmony_ci		cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx);
49568c2ecf20Sopenharmony_ci
49578c2ecf20Sopenharmony_ci	if (adap->sge.intrq.desc)
49588c2ecf20Sopenharmony_ci		free_rspq_fl(adap, &adap->sge.intrq, NULL);
49598c2ecf20Sopenharmony_ci
49608c2ecf20Sopenharmony_ci	if (!is_t4(adap->params.chip)) {
49618c2ecf20Sopenharmony_ci		etq = &adap->sge.ptptxq;
49628c2ecf20Sopenharmony_ci		if (etq->q.desc) {
49638c2ecf20Sopenharmony_ci			t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
49648c2ecf20Sopenharmony_ci				       etq->q.cntxt_id);
49658c2ecf20Sopenharmony_ci			spin_lock_bh(&adap->ptp_lock);
49668c2ecf20Sopenharmony_ci			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
49678c2ecf20Sopenharmony_ci			spin_unlock_bh(&adap->ptp_lock);
49688c2ecf20Sopenharmony_ci			kfree(etq->q.sdesc);
49698c2ecf20Sopenharmony_ci			free_txq(adap, &etq->q);
49708c2ecf20Sopenharmony_ci		}
49718c2ecf20Sopenharmony_ci	}
49728c2ecf20Sopenharmony_ci
49738c2ecf20Sopenharmony_ci	/* clear the reverse egress queue map */
49748c2ecf20Sopenharmony_ci	memset(adap->sge.egr_map, 0,
49758c2ecf20Sopenharmony_ci	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
49768c2ecf20Sopenharmony_ci}
49778c2ecf20Sopenharmony_ci
49788c2ecf20Sopenharmony_civoid t4_sge_start(struct adapter *adap)
49798c2ecf20Sopenharmony_ci{
49808c2ecf20Sopenharmony_ci	adap->sge.ethtxq_rover = 0;
49818c2ecf20Sopenharmony_ci	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
49828c2ecf20Sopenharmony_ci	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
49838c2ecf20Sopenharmony_ci}
49848c2ecf20Sopenharmony_ci
49858c2ecf20Sopenharmony_ci/**
49868c2ecf20Sopenharmony_ci *	t4_sge_stop - disable SGE operation
49878c2ecf20Sopenharmony_ci *	@adap: the adapter
49888c2ecf20Sopenharmony_ci *
49898c2ecf20Sopenharmony_ci *	Stop tasklets and timers associated with the DMA engine.  Note that
49908c2ecf20Sopenharmony_ci *	this is effective only if measures have been taken to disable any HW
49918c2ecf20Sopenharmony_ci *	events that may restart them.
49928c2ecf20Sopenharmony_ci */
49938c2ecf20Sopenharmony_civoid t4_sge_stop(struct adapter *adap)
49948c2ecf20Sopenharmony_ci{
49958c2ecf20Sopenharmony_ci	int i;
49968c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
49978c2ecf20Sopenharmony_ci
49988c2ecf20Sopenharmony_ci	if (s->rx_timer.function)
49998c2ecf20Sopenharmony_ci		del_timer_sync(&s->rx_timer);
50008c2ecf20Sopenharmony_ci	if (s->tx_timer.function)
50018c2ecf20Sopenharmony_ci		del_timer_sync(&s->tx_timer);
50028c2ecf20Sopenharmony_ci
50038c2ecf20Sopenharmony_ci	if (is_offload(adap)) {
50048c2ecf20Sopenharmony_ci		struct sge_uld_txq_info *txq_info;
50058c2ecf20Sopenharmony_ci
50068c2ecf20Sopenharmony_ci		txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD];
50078c2ecf20Sopenharmony_ci		if (txq_info) {
50088c2ecf20Sopenharmony_ci			struct sge_uld_txq *txq = txq_info->uldtxq;
50098c2ecf20Sopenharmony_ci
50108c2ecf20Sopenharmony_ci			for_each_ofldtxq(&adap->sge, i) {
50118c2ecf20Sopenharmony_ci				if (txq->q.desc)
50128c2ecf20Sopenharmony_ci					tasklet_kill(&txq->qresume_tsk);
50138c2ecf20Sopenharmony_ci			}
50148c2ecf20Sopenharmony_ci		}
50158c2ecf20Sopenharmony_ci	}
50168c2ecf20Sopenharmony_ci
50178c2ecf20Sopenharmony_ci	if (is_pci_uld(adap)) {
50188c2ecf20Sopenharmony_ci		struct sge_uld_txq_info *txq_info;
50198c2ecf20Sopenharmony_ci
50208c2ecf20Sopenharmony_ci		txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
50218c2ecf20Sopenharmony_ci		if (txq_info) {
50228c2ecf20Sopenharmony_ci			struct sge_uld_txq *txq = txq_info->uldtxq;
50238c2ecf20Sopenharmony_ci
50248c2ecf20Sopenharmony_ci			for_each_ofldtxq(&adap->sge, i) {
50258c2ecf20Sopenharmony_ci				if (txq->q.desc)
50268c2ecf20Sopenharmony_ci					tasklet_kill(&txq->qresume_tsk);
50278c2ecf20Sopenharmony_ci			}
50288c2ecf20Sopenharmony_ci		}
50298c2ecf20Sopenharmony_ci	}
50308c2ecf20Sopenharmony_ci
50318c2ecf20Sopenharmony_ci	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
50328c2ecf20Sopenharmony_ci		struct sge_ctrl_txq *cq = &s->ctrlq[i];
50338c2ecf20Sopenharmony_ci
50348c2ecf20Sopenharmony_ci		if (cq->q.desc)
50358c2ecf20Sopenharmony_ci			tasklet_kill(&cq->qresume_tsk);
50368c2ecf20Sopenharmony_ci	}
50378c2ecf20Sopenharmony_ci}
50388c2ecf20Sopenharmony_ci
50398c2ecf20Sopenharmony_ci/**
50408c2ecf20Sopenharmony_ci *	t4_sge_init_soft - grab core SGE values needed by SGE code
50418c2ecf20Sopenharmony_ci *	@adap: the adapter
50428c2ecf20Sopenharmony_ci *
50438c2ecf20Sopenharmony_ci *	We need to grab the SGE operating parameters that we need to have
50448c2ecf20Sopenharmony_ci *	in order to do our job and make sure we can live with them.
50458c2ecf20Sopenharmony_ci */
50468c2ecf20Sopenharmony_ci
50478c2ecf20Sopenharmony_cistatic int t4_sge_init_soft(struct adapter *adap)
50488c2ecf20Sopenharmony_ci{
50498c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
50508c2ecf20Sopenharmony_ci	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
50518c2ecf20Sopenharmony_ci	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
50528c2ecf20Sopenharmony_ci	u32 ingress_rx_threshold;
50538c2ecf20Sopenharmony_ci
50548c2ecf20Sopenharmony_ci	/*
50558c2ecf20Sopenharmony_ci	 * Verify that CPL messages are going to the Ingress Queue for
50568c2ecf20Sopenharmony_ci	 * process_responses() and that only packet data is going to the
50578c2ecf20Sopenharmony_ci	 * Free Lists.
50588c2ecf20Sopenharmony_ci	 */
50598c2ecf20Sopenharmony_ci	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
50608c2ecf20Sopenharmony_ci	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
50618c2ecf20Sopenharmony_ci		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
50628c2ecf20Sopenharmony_ci		return -EINVAL;
50638c2ecf20Sopenharmony_ci	}
50648c2ecf20Sopenharmony_ci
50658c2ecf20Sopenharmony_ci	/*
50668c2ecf20Sopenharmony_ci	 * Validate the Host Buffer Register Array indices that we want to
50678c2ecf20Sopenharmony_ci	 * use ...
50688c2ecf20Sopenharmony_ci	 *
50698c2ecf20Sopenharmony_ci	 * XXX Note that we should really read through the Host Buffer Size
50708c2ecf20Sopenharmony_ci	 * XXX register array and find the indices of the Buffer Sizes which
50718c2ecf20Sopenharmony_ci	 * XXX meet our needs!
50728c2ecf20Sopenharmony_ci	 */
50738c2ecf20Sopenharmony_ci	#define READ_FL_BUF(x) \
50748c2ecf20Sopenharmony_ci		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
50758c2ecf20Sopenharmony_ci
50768c2ecf20Sopenharmony_ci	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
50778c2ecf20Sopenharmony_ci	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
50788c2ecf20Sopenharmony_ci	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
50798c2ecf20Sopenharmony_ci	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
50808c2ecf20Sopenharmony_ci
50818c2ecf20Sopenharmony_ci	/* We only bother using the Large Page logic if the Large Page Buffer
50828c2ecf20Sopenharmony_ci	 * is larger than our Page Size Buffer.
50838c2ecf20Sopenharmony_ci	 */
50848c2ecf20Sopenharmony_ci	if (fl_large_pg <= fl_small_pg)
50858c2ecf20Sopenharmony_ci		fl_large_pg = 0;
50868c2ecf20Sopenharmony_ci
50878c2ecf20Sopenharmony_ci	#undef READ_FL_BUF
50888c2ecf20Sopenharmony_ci
50898c2ecf20Sopenharmony_ci	/* The Page Size Buffer must be exactly equal to our Page Size and the
50908c2ecf20Sopenharmony_ci	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
50918c2ecf20Sopenharmony_ci	 */
50928c2ecf20Sopenharmony_ci	if (fl_small_pg != PAGE_SIZE ||
50938c2ecf20Sopenharmony_ci	    (fl_large_pg & (fl_large_pg-1)) != 0) {
50948c2ecf20Sopenharmony_ci		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
50958c2ecf20Sopenharmony_ci			fl_small_pg, fl_large_pg);
50968c2ecf20Sopenharmony_ci		return -EINVAL;
50978c2ecf20Sopenharmony_ci	}
50988c2ecf20Sopenharmony_ci	if (fl_large_pg)
50998c2ecf20Sopenharmony_ci		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
51008c2ecf20Sopenharmony_ci
51018c2ecf20Sopenharmony_ci	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
51028c2ecf20Sopenharmony_ci	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
51038c2ecf20Sopenharmony_ci		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
51048c2ecf20Sopenharmony_ci			fl_small_mtu, fl_large_mtu);
51058c2ecf20Sopenharmony_ci		return -EINVAL;
51068c2ecf20Sopenharmony_ci	}
51078c2ecf20Sopenharmony_ci
51088c2ecf20Sopenharmony_ci	/*
51098c2ecf20Sopenharmony_ci	 * Retrieve our RX interrupt holdoff timer values and counter
51108c2ecf20Sopenharmony_ci	 * threshold values from the SGE parameters.
51118c2ecf20Sopenharmony_ci	 */
51128c2ecf20Sopenharmony_ci	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
51138c2ecf20Sopenharmony_ci	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
51148c2ecf20Sopenharmony_ci	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
51158c2ecf20Sopenharmony_ci	s->timer_val[0] = core_ticks_to_us(adap,
51168c2ecf20Sopenharmony_ci		TIMERVALUE0_G(timer_value_0_and_1));
51178c2ecf20Sopenharmony_ci	s->timer_val[1] = core_ticks_to_us(adap,
51188c2ecf20Sopenharmony_ci		TIMERVALUE1_G(timer_value_0_and_1));
51198c2ecf20Sopenharmony_ci	s->timer_val[2] = core_ticks_to_us(adap,
51208c2ecf20Sopenharmony_ci		TIMERVALUE2_G(timer_value_2_and_3));
51218c2ecf20Sopenharmony_ci	s->timer_val[3] = core_ticks_to_us(adap,
51228c2ecf20Sopenharmony_ci		TIMERVALUE3_G(timer_value_2_and_3));
51238c2ecf20Sopenharmony_ci	s->timer_val[4] = core_ticks_to_us(adap,
51248c2ecf20Sopenharmony_ci		TIMERVALUE4_G(timer_value_4_and_5));
51258c2ecf20Sopenharmony_ci	s->timer_val[5] = core_ticks_to_us(adap,
51268c2ecf20Sopenharmony_ci		TIMERVALUE5_G(timer_value_4_and_5));
51278c2ecf20Sopenharmony_ci
51288c2ecf20Sopenharmony_ci	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
51298c2ecf20Sopenharmony_ci	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
51308c2ecf20Sopenharmony_ci	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
51318c2ecf20Sopenharmony_ci	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
51328c2ecf20Sopenharmony_ci	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
51338c2ecf20Sopenharmony_ci
51348c2ecf20Sopenharmony_ci	return 0;
51358c2ecf20Sopenharmony_ci}
51368c2ecf20Sopenharmony_ci
51378c2ecf20Sopenharmony_ci/**
51388c2ecf20Sopenharmony_ci *     t4_sge_init - initialize SGE
51398c2ecf20Sopenharmony_ci *     @adap: the adapter
51408c2ecf20Sopenharmony_ci *
51418c2ecf20Sopenharmony_ci *     Perform low-level SGE code initialization needed every time after a
51428c2ecf20Sopenharmony_ci *     chip reset.
51438c2ecf20Sopenharmony_ci */
51448c2ecf20Sopenharmony_ciint t4_sge_init(struct adapter *adap)
51458c2ecf20Sopenharmony_ci{
51468c2ecf20Sopenharmony_ci	struct sge *s = &adap->sge;
51478c2ecf20Sopenharmony_ci	u32 sge_control, sge_conm_ctrl;
51488c2ecf20Sopenharmony_ci	int ret, egress_threshold;
51498c2ecf20Sopenharmony_ci
51508c2ecf20Sopenharmony_ci	/*
51518c2ecf20Sopenharmony_ci	 * Ingress Padding Boundary and Egress Status Page Size are set up by
51528c2ecf20Sopenharmony_ci	 * t4_fixup_host_params().
51538c2ecf20Sopenharmony_ci	 */
51548c2ecf20Sopenharmony_ci	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
51558c2ecf20Sopenharmony_ci	s->pktshift = PKTSHIFT_G(sge_control);
51568c2ecf20Sopenharmony_ci	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
51578c2ecf20Sopenharmony_ci
51588c2ecf20Sopenharmony_ci	s->fl_align = t4_fl_pkt_align(adap);
51598c2ecf20Sopenharmony_ci	ret = t4_sge_init_soft(adap);
51608c2ecf20Sopenharmony_ci	if (ret < 0)
51618c2ecf20Sopenharmony_ci		return ret;
51628c2ecf20Sopenharmony_ci
51638c2ecf20Sopenharmony_ci	/*
51648c2ecf20Sopenharmony_ci	 * A FL with <= fl_starve_thres buffers is starving and a periodic
51658c2ecf20Sopenharmony_ci	 * timer will attempt to refill it.  This needs to be larger than the
51668c2ecf20Sopenharmony_ci	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
51678c2ecf20Sopenharmony_ci	 * stuck waiting for new packets while the SGE is waiting for us to
51688c2ecf20Sopenharmony_ci	 * give it more Free List entries.  (Note that the SGE's Egress
51698c2ecf20Sopenharmony_ci	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
51708c2ecf20Sopenharmony_ci	 * there was only a single field to control this.  For T5 there's the
51718c2ecf20Sopenharmony_ci	 * original field which now only applies to Unpacked Mode Free List
51728c2ecf20Sopenharmony_ci	 * buffers and a new field which only applies to Packed Mode Free List
51738c2ecf20Sopenharmony_ci	 * buffers.
51748c2ecf20Sopenharmony_ci	 */
51758c2ecf20Sopenharmony_ci	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
51768c2ecf20Sopenharmony_ci	switch (CHELSIO_CHIP_VERSION(adap->params.chip)) {
51778c2ecf20Sopenharmony_ci	case CHELSIO_T4:
51788c2ecf20Sopenharmony_ci		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
51798c2ecf20Sopenharmony_ci		break;
51808c2ecf20Sopenharmony_ci	case CHELSIO_T5:
51818c2ecf20Sopenharmony_ci		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
51828c2ecf20Sopenharmony_ci		break;
51838c2ecf20Sopenharmony_ci	case CHELSIO_T6:
51848c2ecf20Sopenharmony_ci		egress_threshold = T6_EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
51858c2ecf20Sopenharmony_ci		break;
51868c2ecf20Sopenharmony_ci	default:
51878c2ecf20Sopenharmony_ci		dev_err(adap->pdev_dev, "Unsupported Chip version %d\n",
51888c2ecf20Sopenharmony_ci			CHELSIO_CHIP_VERSION(adap->params.chip));
51898c2ecf20Sopenharmony_ci		return -EINVAL;
51908c2ecf20Sopenharmony_ci	}
51918c2ecf20Sopenharmony_ci	s->fl_starve_thres = 2*egress_threshold + 1;
51928c2ecf20Sopenharmony_ci
51938c2ecf20Sopenharmony_ci	t4_idma_monitor_init(adap, &s->idma_monitor);
51948c2ecf20Sopenharmony_ci
51958c2ecf20Sopenharmony_ci	/* Set up timers used for recuring callbacks to process RX and TX
51968c2ecf20Sopenharmony_ci	 * administrative tasks.
51978c2ecf20Sopenharmony_ci	 */
51988c2ecf20Sopenharmony_ci	timer_setup(&s->rx_timer, sge_rx_timer_cb, 0);
51998c2ecf20Sopenharmony_ci	timer_setup(&s->tx_timer, sge_tx_timer_cb, 0);
52008c2ecf20Sopenharmony_ci
52018c2ecf20Sopenharmony_ci	spin_lock_init(&s->intrq_lock);
52028c2ecf20Sopenharmony_ci
52038c2ecf20Sopenharmony_ci	return 0;
52048c2ecf20Sopenharmony_ci}
5205