1/*
2 * Copyright (c) 2005-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <linux/etherdevice.h>
35#include <linux/if_vlan.h>
36#include <linux/ip.h>
37#include <linux/tcp.h>
38#include <linux/dma-mapping.h>
39#include <linux/slab.h>
40#include <linux/prefetch.h>
41#include <net/arp.h>
42#include "common.h"
43#include "regs.h"
44#include "sge_defs.h"
45#include "t3_cpl.h"
46#include "firmware_exports.h"
47#include "cxgb3_offload.h"
48
49#define USE_GTS 0
50
51#define SGE_RX_SM_BUF_SIZE 1536
52
53#define SGE_RX_COPY_THRES  256
54#define SGE_RX_PULL_LEN    128
55
56#define SGE_PG_RSVD SMP_CACHE_BYTES
57/*
58 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
59 * It must be a divisor of PAGE_SIZE.  If set to 0 FL0 will use sk_buffs
60 * directly.
61 */
62#define FL0_PG_CHUNK_SIZE  2048
63#define FL0_PG_ORDER 0
64#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
65#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
66#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
67#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
68
69#define SGE_RX_DROP_THRES 16
70#define RX_RECLAIM_PERIOD (HZ/4)
71
72/*
73 * Max number of Rx buffers we replenish at a time.
74 */
75#define MAX_RX_REFILL 16U
76/*
77 * Period of the Tx buffer reclaim timer.  This timer does not need to run
78 * frequently as Tx buffers are usually reclaimed by new Tx packets.
79 */
80#define TX_RECLAIM_PERIOD (HZ / 4)
81#define TX_RECLAIM_TIMER_CHUNK 64U
82#define TX_RECLAIM_CHUNK 16U
83
84/* WR size in bytes */
85#define WR_LEN (WR_FLITS * 8)
86
87/*
88 * Types of Tx queues in each queue set.  Order here matters, do not change.
89 */
90enum { TXQ_ETH, TXQ_OFLD, TXQ_CTRL };
91
92/* Values for sge_txq.flags */
93enum {
94	TXQ_RUNNING = 1 << 0,	/* fetch engine is running */
95	TXQ_LAST_PKT_DB = 1 << 1,	/* last packet rang the doorbell */
96};
97
98struct tx_desc {
99	__be64 flit[TX_DESC_FLITS];
100};
101
102struct rx_desc {
103	__be32 addr_lo;
104	__be32 len_gen;
105	__be32 gen2;
106	__be32 addr_hi;
107};
108
109struct tx_sw_desc {		/* SW state per Tx descriptor */
110	struct sk_buff *skb;
111	u8 eop;       /* set if last descriptor for packet */
112	u8 addr_idx;  /* buffer index of first SGL entry in descriptor */
113	u8 fragidx;   /* first page fragment associated with descriptor */
114	s8 sflit;     /* start flit of first SGL entry in descriptor */
115};
116
117struct rx_sw_desc {                /* SW state per Rx descriptor */
118	union {
119		struct sk_buff *skb;
120		struct fl_pg_chunk pg_chunk;
121	};
122	DEFINE_DMA_UNMAP_ADDR(dma_addr);
123};
124
125struct rsp_desc {		/* response queue descriptor */
126	struct rss_header rss_hdr;
127	__be32 flags;
128	__be32 len_cq;
129	u8 imm_data[47];
130	u8 intr_gen;
131};
132
133/*
134 * Holds unmapping information for Tx packets that need deferred unmapping.
135 * This structure lives at skb->head and must be allocated by callers.
136 */
137struct deferred_unmap_info {
138	struct pci_dev *pdev;
139	dma_addr_t addr[MAX_SKB_FRAGS + 1];
140};
141
142/*
143 * Maps a number of flits to the number of Tx descriptors that can hold them.
144 * The formula is
145 *
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
147 *
148 * HW allows up to 4 descriptors to be combined into a WR.
149 */
150static u8 flit_desc_map[] = {
151	0,
152#if SGE_NUM_GENBITS == 1
153	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
154	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
155	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
156	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
157#elif SGE_NUM_GENBITS == 2
158	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
159	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
160	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
161	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
162#else
163# error "SGE_NUM_GENBITS must be 1 or 2"
164#endif
165};
166
167static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx)
168{
169	return container_of(q, struct sge_qset, fl[qidx]);
170}
171
172static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q)
173{
174	return container_of(q, struct sge_qset, rspq);
175}
176
177static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx)
178{
179	return container_of(q, struct sge_qset, txq[qidx]);
180}
181
182/**
183 *	refill_rspq - replenish an SGE response queue
184 *	@adapter: the adapter
185 *	@q: the response queue to replenish
186 *	@credits: how many new responses to make available
187 *
188 *	Replenishes a response queue by making the supplied number of responses
189 *	available to HW.
190 */
191static inline void refill_rspq(struct adapter *adapter,
192			       const struct sge_rspq *q, unsigned int credits)
193{
194	rmb();
195	t3_write_reg(adapter, A_SG_RSPQ_CREDIT_RETURN,
196		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
197}
198
199/**
200 *	need_skb_unmap - does the platform need unmapping of sk_buffs?
201 *
202 *	Returns true if the platform needs sk_buff unmapping.  The compiler
203 *	optimizes away unnecessary code if this returns true.
204 */
205static inline int need_skb_unmap(void)
206{
207#ifdef CONFIG_NEED_DMA_MAP_STATE
208	return 1;
209#else
210	return 0;
211#endif
212}
213
214/**
215 *	unmap_skb - unmap a packet main body and its page fragments
216 *	@skb: the packet
217 *	@q: the Tx queue containing Tx descriptors for the packet
218 *	@cidx: index of Tx descriptor
219 *	@pdev: the PCI device
220 *
221 *	Unmap the main body of an sk_buff and its page fragments, if any.
222 *	Because of the fairly complicated structure of our SGLs and the desire
223 *	to conserve space for metadata, the information necessary to unmap an
224 *	sk_buff is spread across the sk_buff itself (buffer lengths), the HW Tx
225 *	descriptors (the physical addresses of the various data buffers), and
226 *	the SW descriptor state (assorted indices).  The send functions
227 *	initialize the indices for the first packet descriptor so we can unmap
228 *	the buffers held in the first Tx descriptor here, and we have enough
229 *	information at this point to set the state for the next Tx descriptor.
230 *
231 *	Note that it is possible to clean up the first descriptor of a packet
232 *	before the send routines have written the next descriptors, but this
233 *	race does not cause any problem.  We just end up writing the unmapping
234 *	info for the descriptor first.
235 */
236static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q,
237			     unsigned int cidx, struct pci_dev *pdev)
238{
239	const struct sg_ent *sgp;
240	struct tx_sw_desc *d = &q->sdesc[cidx];
241	int nfrags, frag_idx, curflit, j = d->addr_idx;
242
243	sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit];
244	frag_idx = d->fragidx;
245
246	if (frag_idx == 0 && skb_headlen(skb)) {
247		pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]),
248				 skb_headlen(skb), PCI_DMA_TODEVICE);
249		j = 1;
250	}
251
252	curflit = d->sflit + 1 + j;
253	nfrags = skb_shinfo(skb)->nr_frags;
254
255	while (frag_idx < nfrags && curflit < WR_FLITS) {
256		pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]),
257			       skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]),
258			       PCI_DMA_TODEVICE);
259		j ^= 1;
260		if (j == 0) {
261			sgp++;
262			curflit++;
263		}
264		curflit++;
265		frag_idx++;
266	}
267
268	if (frag_idx < nfrags) {   /* SGL continues into next Tx descriptor */
269		d = cidx + 1 == q->size ? q->sdesc : d + 1;
270		d->fragidx = frag_idx;
271		d->addr_idx = j;
272		d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */
273	}
274}
275
276/**
277 *	free_tx_desc - reclaims Tx descriptors and their buffers
278 *	@adapter: the adapter
279 *	@q: the Tx queue to reclaim descriptors from
280 *	@n: the number of descriptors to reclaim
281 *
282 *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 *	Tx buffers.  Called with the Tx queue lock held.
284 */
285static void free_tx_desc(struct adapter *adapter, struct sge_txq *q,
286			 unsigned int n)
287{
288	struct tx_sw_desc *d;
289	struct pci_dev *pdev = adapter->pdev;
290	unsigned int cidx = q->cidx;
291
292	const int need_unmap = need_skb_unmap() &&
293			       q->cntxt_id >= FW_TUNNEL_SGEEC_START;
294
295	d = &q->sdesc[cidx];
296	while (n--) {
297		if (d->skb) {	/* an SGL is present */
298			if (need_unmap)
299				unmap_skb(d->skb, q, cidx, pdev);
300			if (d->eop) {
301				dev_consume_skb_any(d->skb);
302				d->skb = NULL;
303			}
304		}
305		++d;
306		if (++cidx == q->size) {
307			cidx = 0;
308			d = q->sdesc;
309		}
310	}
311	q->cidx = cidx;
312}
313
314/**
315 *	reclaim_completed_tx - reclaims completed Tx descriptors
316 *	@adapter: the adapter
317 *	@q: the Tx queue to reclaim completed descriptors from
318 *	@chunk: maximum number of descriptors to reclaim
319 *
320 *	Reclaims Tx descriptors that the SGE has indicated it has processed,
321 *	and frees the associated buffers if possible.  Called with the Tx
322 *	queue's lock held.
323 */
324static inline unsigned int reclaim_completed_tx(struct adapter *adapter,
325						struct sge_txq *q,
326						unsigned int chunk)
327{
328	unsigned int reclaim = q->processed - q->cleaned;
329
330	reclaim = min(chunk, reclaim);
331	if (reclaim) {
332		free_tx_desc(adapter, q, reclaim);
333		q->cleaned += reclaim;
334		q->in_use -= reclaim;
335	}
336	return q->processed - q->cleaned;
337}
338
339/**
340 *	should_restart_tx - are there enough resources to restart a Tx queue?
341 *	@q: the Tx queue
342 *
343 *	Checks if there are enough descriptors to restart a suspended Tx queue.
344 */
345static inline int should_restart_tx(const struct sge_txq *q)
346{
347	unsigned int r = q->processed - q->cleaned;
348
349	return q->in_use - r < (q->size >> 1);
350}
351
352static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
353			  struct rx_sw_desc *d)
354{
355	if (q->use_pages && d->pg_chunk.page) {
356		(*d->pg_chunk.p_cnt)--;
357		if (!*d->pg_chunk.p_cnt)
358			pci_unmap_page(pdev,
359				       d->pg_chunk.mapping,
360				       q->alloc_size, PCI_DMA_FROMDEVICE);
361
362		put_page(d->pg_chunk.page);
363		d->pg_chunk.page = NULL;
364	} else {
365		pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
366				 q->buf_size, PCI_DMA_FROMDEVICE);
367		kfree_skb(d->skb);
368		d->skb = NULL;
369	}
370}
371
372/**
373 *	free_rx_bufs - free the Rx buffers on an SGE free list
374 *	@pdev: the PCI device associated with the adapter
375 *	@q: the SGE free list to clean up
376 *
377 *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
378 *	this queue should be stopped before calling this function.
379 */
380static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381{
382	unsigned int cidx = q->cidx;
383
384	while (q->credits--) {
385		struct rx_sw_desc *d = &q->sdesc[cidx];
386
387
388		clear_rx_desc(pdev, q, d);
389		if (++cidx == q->size)
390			cidx = 0;
391	}
392
393	if (q->pg_chunk.page) {
394		__free_pages(q->pg_chunk.page, q->order);
395		q->pg_chunk.page = NULL;
396	}
397}
398
399/**
400 *	add_one_rx_buf - add a packet buffer to a free-buffer list
401 *	@va:  buffer start VA
402 *	@len: the buffer length
403 *	@d: the HW Rx descriptor to write
404 *	@sd: the SW Rx descriptor to write
405 *	@gen: the generation bit value
406 *	@pdev: the PCI device associated with the adapter
407 *
408 *	Add a buffer of the given length to the supplied HW and SW Rx
409 *	descriptors.
410 */
411static inline int add_one_rx_buf(void *va, unsigned int len,
412				 struct rx_desc *d, struct rx_sw_desc *sd,
413				 unsigned int gen, struct pci_dev *pdev)
414{
415	dma_addr_t mapping;
416
417	mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
418	if (unlikely(pci_dma_mapping_error(pdev, mapping)))
419		return -ENOMEM;
420
421	dma_unmap_addr_set(sd, dma_addr, mapping);
422
423	d->addr_lo = cpu_to_be32(mapping);
424	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
425	dma_wmb();
426	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
427	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
428	return 0;
429}
430
431static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
432				   unsigned int gen)
433{
434	d->addr_lo = cpu_to_be32(mapping);
435	d->addr_hi = cpu_to_be32((u64) mapping >> 32);
436	dma_wmb();
437	d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
438	d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
439	return 0;
440}
441
442static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
443			  struct rx_sw_desc *sd, gfp_t gfp,
444			  unsigned int order)
445{
446	if (!q->pg_chunk.page) {
447		dma_addr_t mapping;
448
449		q->pg_chunk.page = alloc_pages(gfp, order);
450		if (unlikely(!q->pg_chunk.page))
451			return -ENOMEM;
452		q->pg_chunk.va = page_address(q->pg_chunk.page);
453		q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
454				    SGE_PG_RSVD;
455		q->pg_chunk.offset = 0;
456		mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
457				       0, q->alloc_size, PCI_DMA_FROMDEVICE);
458		if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
459			__free_pages(q->pg_chunk.page, order);
460			q->pg_chunk.page = NULL;
461			return -EIO;
462		}
463		q->pg_chunk.mapping = mapping;
464	}
465	sd->pg_chunk = q->pg_chunk;
466
467	prefetch(sd->pg_chunk.p_cnt);
468
469	q->pg_chunk.offset += q->buf_size;
470	if (q->pg_chunk.offset == (PAGE_SIZE << order))
471		q->pg_chunk.page = NULL;
472	else {
473		q->pg_chunk.va += q->buf_size;
474		get_page(q->pg_chunk.page);
475	}
476
477	if (sd->pg_chunk.offset == 0)
478		*sd->pg_chunk.p_cnt = 1;
479	else
480		*sd->pg_chunk.p_cnt += 1;
481
482	return 0;
483}
484
485static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
486{
487	if (q->pend_cred >= q->credits / 4) {
488		q->pend_cred = 0;
489		wmb();
490		t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
491	}
492}
493
494/**
495 *	refill_fl - refill an SGE free-buffer list
496 *	@adap: the adapter
497 *	@q: the free-list to refill
498 *	@n: the number of new buffers to allocate
499 *	@gfp: the gfp flags for allocating new buffers
500 *
501 *	(Re)populate an SGE free-buffer list with up to @n new packet buffers,
502 *	allocated with the supplied gfp flags.  The caller must assure that
503 *	@n does not exceed the queue's capacity.
504 */
505static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
506{
507	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
508	struct rx_desc *d = &q->desc[q->pidx];
509	unsigned int count = 0;
510
511	while (n--) {
512		dma_addr_t mapping;
513		int err;
514
515		if (q->use_pages) {
516			if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
517						    q->order))) {
518nomem:				q->alloc_failed++;
519				break;
520			}
521			mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
522			dma_unmap_addr_set(sd, dma_addr, mapping);
523
524			add_one_rx_chunk(mapping, d, q->gen);
525			pci_dma_sync_single_for_device(adap->pdev, mapping,
526						q->buf_size - SGE_PG_RSVD,
527						PCI_DMA_FROMDEVICE);
528		} else {
529			void *buf_start;
530
531			struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
532			if (!skb)
533				goto nomem;
534
535			sd->skb = skb;
536			buf_start = skb->data;
537			err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
538					     q->gen, adap->pdev);
539			if (unlikely(err)) {
540				clear_rx_desc(adap->pdev, q, sd);
541				break;
542			}
543		}
544
545		d++;
546		sd++;
547		if (++q->pidx == q->size) {
548			q->pidx = 0;
549			q->gen ^= 1;
550			sd = q->sdesc;
551			d = q->desc;
552		}
553		count++;
554	}
555
556	q->credits += count;
557	q->pend_cred += count;
558	ring_fl_db(adap, q);
559
560	return count;
561}
562
563static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
564{
565	refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits),
566		  GFP_ATOMIC | __GFP_COMP);
567}
568
569/**
570 *	recycle_rx_buf - recycle a receive buffer
571 *	@adap: the adapter
572 *	@q: the SGE free list
573 *	@idx: index of buffer to recycle
574 *
575 *	Recycles the specified buffer on the given free list by adding it at
576 *	the next available slot on the list.
577 */
578static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
579			   unsigned int idx)
580{
581	struct rx_desc *from = &q->desc[idx];
582	struct rx_desc *to = &q->desc[q->pidx];
583
584	q->sdesc[q->pidx] = q->sdesc[idx];
585	to->addr_lo = from->addr_lo;	/* already big endian */
586	to->addr_hi = from->addr_hi;	/* likewise */
587	dma_wmb();
588	to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen));
589	to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen));
590
591	if (++q->pidx == q->size) {
592		q->pidx = 0;
593		q->gen ^= 1;
594	}
595
596	q->credits++;
597	q->pend_cred++;
598	ring_fl_db(adap, q);
599}
600
601/**
602 *	alloc_ring - allocate resources for an SGE descriptor ring
603 *	@pdev: the PCI device
604 *	@nelem: the number of descriptors
605 *	@elem_size: the size of each descriptor
606 *	@sw_size: the size of the SW state associated with each ring element
607 *	@phys: the physical address of the allocated ring
608 *	@metadata: address of the array holding the SW state for the ring
609 *
610 *	Allocates resources for an SGE descriptor ring, such as Tx queues,
611 *	free buffer lists, or response queues.  Each SGE ring requires
612 *	space for its HW descriptors plus, optionally, space for the SW state
613 *	associated with each HW entry (the metadata).  The function returns
614 *	three values: the virtual address for the HW ring (the return value
615 *	of the function), the physical address of the HW ring, and the address
616 *	of the SW ring.
617 */
618static void *alloc_ring(struct pci_dev *pdev, size_t nelem, size_t elem_size,
619			size_t sw_size, dma_addr_t * phys, void *metadata)
620{
621	size_t len = nelem * elem_size;
622	void *s = NULL;
623	void *p = dma_alloc_coherent(&pdev->dev, len, phys, GFP_KERNEL);
624
625	if (!p)
626		return NULL;
627	if (sw_size && metadata) {
628		s = kcalloc(nelem, sw_size, GFP_KERNEL);
629
630		if (!s) {
631			dma_free_coherent(&pdev->dev, len, p, *phys);
632			return NULL;
633		}
634		*(void **)metadata = s;
635	}
636	return p;
637}
638
639/**
640 *	t3_reset_qset - reset a sge qset
641 *	@q: the queue set
642 *
643 *	Reset the qset structure.
644 *	the NAPI structure is preserved in the event of
645 *	the qset's reincarnation, for example during EEH recovery.
646 */
647static void t3_reset_qset(struct sge_qset *q)
648{
649	if (q->adap &&
650	    !(q->adap->flags & NAPI_INIT)) {
651		memset(q, 0, sizeof(*q));
652		return;
653	}
654
655	q->adap = NULL;
656	memset(&q->rspq, 0, sizeof(q->rspq));
657	memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET);
658	memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET);
659	q->txq_stopped = 0;
660	q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */
661	q->rx_reclaim_timer.function = NULL;
662	q->nomem = 0;
663	napi_free_frags(&q->napi);
664}
665
666
667/**
668 *	free_qset - free the resources of an SGE queue set
669 *	@adapter: the adapter owning the queue set
670 *	@q: the queue set
671 *
672 *	Release the HW and SW resources associated with an SGE queue set, such
673 *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
674 *	queue set must be quiesced prior to calling this.
675 */
676static void t3_free_qset(struct adapter *adapter, struct sge_qset *q)
677{
678	int i;
679	struct pci_dev *pdev = adapter->pdev;
680
681	for (i = 0; i < SGE_RXQ_PER_SET; ++i)
682		if (q->fl[i].desc) {
683			spin_lock_irq(&adapter->sge.reg_lock);
684			t3_sge_disable_fl(adapter, q->fl[i].cntxt_id);
685			spin_unlock_irq(&adapter->sge.reg_lock);
686			free_rx_bufs(pdev, &q->fl[i]);
687			kfree(q->fl[i].sdesc);
688			dma_free_coherent(&pdev->dev,
689					  q->fl[i].size *
690					  sizeof(struct rx_desc), q->fl[i].desc,
691					  q->fl[i].phys_addr);
692		}
693
694	for (i = 0; i < SGE_TXQ_PER_SET; ++i)
695		if (q->txq[i].desc) {
696			spin_lock_irq(&adapter->sge.reg_lock);
697			t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0);
698			spin_unlock_irq(&adapter->sge.reg_lock);
699			if (q->txq[i].sdesc) {
700				free_tx_desc(adapter, &q->txq[i],
701					     q->txq[i].in_use);
702				kfree(q->txq[i].sdesc);
703			}
704			dma_free_coherent(&pdev->dev,
705					  q->txq[i].size *
706					  sizeof(struct tx_desc),
707					  q->txq[i].desc, q->txq[i].phys_addr);
708			__skb_queue_purge(&q->txq[i].sendq);
709		}
710
711	if (q->rspq.desc) {
712		spin_lock_irq(&adapter->sge.reg_lock);
713		t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id);
714		spin_unlock_irq(&adapter->sge.reg_lock);
715		dma_free_coherent(&pdev->dev,
716				  q->rspq.size * sizeof(struct rsp_desc),
717				  q->rspq.desc, q->rspq.phys_addr);
718	}
719
720	t3_reset_qset(q);
721}
722
723/**
724 *	init_qset_cntxt - initialize an SGE queue set context info
725 *	@qs: the queue set
726 *	@id: the queue set id
727 *
728 *	Initializes the TIDs and context ids for the queues of a queue set.
729 */
730static void init_qset_cntxt(struct sge_qset *qs, unsigned int id)
731{
732	qs->rspq.cntxt_id = id;
733	qs->fl[0].cntxt_id = 2 * id;
734	qs->fl[1].cntxt_id = 2 * id + 1;
735	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
736	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
737	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
738	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
739	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
740}
741
742/**
743 *	sgl_len - calculates the size of an SGL of the given capacity
744 *	@n: the number of SGL entries
745 *
746 *	Calculates the number of flits needed for a scatter/gather list that
747 *	can hold the given number of entries.
748 */
749static inline unsigned int sgl_len(unsigned int n)
750{
751	/* alternatively: 3 * (n / 2) + 2 * (n & 1) */
752	return (3 * n) / 2 + (n & 1);
753}
754
755/**
756 *	flits_to_desc - returns the num of Tx descriptors for the given flits
757 *	@n: the number of flits
758 *
759 *	Calculates the number of Tx descriptors needed for the supplied number
760 *	of flits.
761 */
762static inline unsigned int flits_to_desc(unsigned int n)
763{
764	BUG_ON(n >= ARRAY_SIZE(flit_desc_map));
765	return flit_desc_map[n];
766}
767
768/**
769 *	get_packet - return the next ingress packet buffer from a free list
770 *	@adap: the adapter that received the packet
771 *	@fl: the SGE free list holding the packet
772 *	@len: the packet length including any SGE padding
773 *	@drop_thres: # of remaining buffers before we start dropping packets
774 *
775 *	Get the next packet from a free list and complete setup of the
776 *	sk_buff.  If the packet is small we make a copy and recycle the
777 *	original buffer, otherwise we use the original buffer itself.  If a
778 *	positive drop threshold is supplied packets are dropped and their
779 *	buffers recycled if (a) the number of remaining buffers is under the
780 *	threshold and the packet is too big to copy, or (b) the packet should
781 *	be copied but there is no memory for the copy.
782 */
783static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
784				  unsigned int len, unsigned int drop_thres)
785{
786	struct sk_buff *skb = NULL;
787	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
788
789	prefetch(sd->skb->data);
790	fl->credits--;
791
792	if (len <= SGE_RX_COPY_THRES) {
793		skb = alloc_skb(len, GFP_ATOMIC);
794		if (likely(skb != NULL)) {
795			__skb_put(skb, len);
796			pci_dma_sync_single_for_cpu(adap->pdev,
797					    dma_unmap_addr(sd, dma_addr), len,
798					    PCI_DMA_FROMDEVICE);
799			memcpy(skb->data, sd->skb->data, len);
800			pci_dma_sync_single_for_device(adap->pdev,
801					    dma_unmap_addr(sd, dma_addr), len,
802					    PCI_DMA_FROMDEVICE);
803		} else if (!drop_thres)
804			goto use_orig_buf;
805recycle:
806		recycle_rx_buf(adap, fl, fl->cidx);
807		return skb;
808	}
809
810	if (unlikely(fl->credits < drop_thres) &&
811	    refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1),
812		      GFP_ATOMIC | __GFP_COMP) == 0)
813		goto recycle;
814
815use_orig_buf:
816	pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
817			 fl->buf_size, PCI_DMA_FROMDEVICE);
818	skb = sd->skb;
819	skb_put(skb, len);
820	__refill_fl(adap, fl);
821	return skb;
822}
823
824/**
825 *	get_packet_pg - return the next ingress packet buffer from a free list
826 *	@adap: the adapter that received the packet
827 *	@fl: the SGE free list holding the packet
828 *	@q: the queue
829 *	@len: the packet length including any SGE padding
830 *	@drop_thres: # of remaining buffers before we start dropping packets
831 *
832 *	Get the next packet from a free list populated with page chunks.
833 *	If the packet is small we make a copy and recycle the original buffer,
834 *	otherwise we attach the original buffer as a page fragment to a fresh
835 *	sk_buff.  If a positive drop threshold is supplied packets are dropped
836 *	and their buffers recycled if (a) the number of remaining buffers is
837 *	under the threshold and the packet is too big to copy, or (b) there's
838 *	no system memory.
839 *
840 * 	Note: this function is similar to @get_packet but deals with Rx buffers
841 * 	that are page chunks rather than sk_buffs.
842 */
843static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
844				     struct sge_rspq *q, unsigned int len,
845				     unsigned int drop_thres)
846{
847	struct sk_buff *newskb, *skb;
848	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
849
850	dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
851
852	newskb = skb = q->pg_skb;
853	if (!skb && (len <= SGE_RX_COPY_THRES)) {
854		newskb = alloc_skb(len, GFP_ATOMIC);
855		if (likely(newskb != NULL)) {
856			__skb_put(newskb, len);
857			pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
858					    PCI_DMA_FROMDEVICE);
859			memcpy(newskb->data, sd->pg_chunk.va, len);
860			pci_dma_sync_single_for_device(adap->pdev, dma_addr,
861						       len,
862						       PCI_DMA_FROMDEVICE);
863		} else if (!drop_thres)
864			return NULL;
865recycle:
866		fl->credits--;
867		recycle_rx_buf(adap, fl, fl->cidx);
868		q->rx_recycle_buf++;
869		return newskb;
870	}
871
872	if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
873		goto recycle;
874
875	prefetch(sd->pg_chunk.p_cnt);
876
877	if (!skb)
878		newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
879
880	if (unlikely(!newskb)) {
881		if (!drop_thres)
882			return NULL;
883		goto recycle;
884	}
885
886	pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
887				    PCI_DMA_FROMDEVICE);
888	(*sd->pg_chunk.p_cnt)--;
889	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
890		pci_unmap_page(adap->pdev,
891			       sd->pg_chunk.mapping,
892			       fl->alloc_size,
893			       PCI_DMA_FROMDEVICE);
894	if (!skb) {
895		__skb_put(newskb, SGE_RX_PULL_LEN);
896		memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
897		skb_fill_page_desc(newskb, 0, sd->pg_chunk.page,
898				   sd->pg_chunk.offset + SGE_RX_PULL_LEN,
899				   len - SGE_RX_PULL_LEN);
900		newskb->len = len;
901		newskb->data_len = len - SGE_RX_PULL_LEN;
902		newskb->truesize += newskb->data_len;
903	} else {
904		skb_fill_page_desc(newskb, skb_shinfo(newskb)->nr_frags,
905				   sd->pg_chunk.page,
906				   sd->pg_chunk.offset, len);
907		newskb->len += len;
908		newskb->data_len += len;
909		newskb->truesize += len;
910	}
911
912	fl->credits--;
913	/*
914	 * We do not refill FLs here, we let the caller do it to overlap a
915	 * prefetch.
916	 */
917	return newskb;
918}
919
920/**
921 *	get_imm_packet - return the next ingress packet buffer from a response
922 *	@resp: the response descriptor containing the packet data
923 *
924 *	Return a packet containing the immediate data of the given response.
925 */
926static inline struct sk_buff *get_imm_packet(const struct rsp_desc *resp)
927{
928	struct sk_buff *skb = alloc_skb(IMMED_PKT_SIZE, GFP_ATOMIC);
929
930	if (skb) {
931		__skb_put(skb, IMMED_PKT_SIZE);
932		skb_copy_to_linear_data(skb, resp->imm_data, IMMED_PKT_SIZE);
933	}
934	return skb;
935}
936
937/**
938 *	calc_tx_descs - calculate the number of Tx descriptors for a packet
939 *	@skb: the packet
940 *
941 * 	Returns the number of Tx descriptors needed for the given Ethernet
942 * 	packet.  Ethernet packets require addition of WR and CPL headers.
943 */
944static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
945{
946	unsigned int flits;
947
948	if (skb->len <= WR_LEN - sizeof(struct cpl_tx_pkt))
949		return 1;
950
951	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2;
952	if (skb_shinfo(skb)->gso_size)
953		flits++;
954	return flits_to_desc(flits);
955}
956
957/*	map_skb - map a packet main body and its page fragments
958 *	@pdev: the PCI device
959 *	@skb: the packet
960 *	@addr: placeholder to save the mapped addresses
961 *
962 *	map the main body of an sk_buff and its page fragments, if any.
963 */
964static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
965		   dma_addr_t *addr)
966{
967	const skb_frag_t *fp, *end;
968	const struct skb_shared_info *si;
969
970	if (skb_headlen(skb)) {
971		*addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
972				       PCI_DMA_TODEVICE);
973		if (pci_dma_mapping_error(pdev, *addr))
974			goto out_err;
975		addr++;
976	}
977
978	si = skb_shinfo(skb);
979	end = &si->frags[si->nr_frags];
980
981	for (fp = si->frags; fp < end; fp++) {
982		*addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
983					 DMA_TO_DEVICE);
984		if (pci_dma_mapping_error(pdev, *addr))
985			goto unwind;
986		addr++;
987	}
988	return 0;
989
990unwind:
991	while (fp-- > si->frags)
992		dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
993			       DMA_TO_DEVICE);
994
995	pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
996out_err:
997	return -ENOMEM;
998}
999
1000/**
1001 *	write_sgl - populate a scatter/gather list for a packet
1002 *	@skb: the packet
1003 *	@sgp: the SGL to populate
1004 *	@start: start address of skb main body data to include in the SGL
1005 *	@len: length of skb main body data to include in the SGL
1006 *	@addr: the list of the mapped addresses
1007 *
1008 *	Copies the scatter/gather list for the buffers that make up a packet
1009 *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1010 *	appropriately.
1011 */
1012static inline unsigned int write_sgl(const struct sk_buff *skb,
1013				     struct sg_ent *sgp, unsigned char *start,
1014				     unsigned int len, const dma_addr_t *addr)
1015{
1016	unsigned int i, j = 0, k = 0, nfrags;
1017
1018	if (len) {
1019		sgp->len[0] = cpu_to_be32(len);
1020		sgp->addr[j++] = cpu_to_be64(addr[k++]);
1021	}
1022
1023	nfrags = skb_shinfo(skb)->nr_frags;
1024	for (i = 0; i < nfrags; i++) {
1025		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1026
1027		sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
1028		sgp->addr[j] = cpu_to_be64(addr[k++]);
1029		j ^= 1;
1030		if (j == 0)
1031			++sgp;
1032	}
1033	if (j)
1034		sgp->len[j] = 0;
1035	return ((nfrags + (len != 0)) * 3) / 2 + j;
1036}
1037
1038/**
1039 *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1040 *	@adap: the adapter
1041 *	@q: the Tx queue
1042 *
1043 *	Ring the doorbel if a Tx queue is asleep.  There is a natural race,
1044 *	where the HW is going to sleep just after we checked, however,
1045 *	then the interrupt handler will detect the outstanding TX packet
1046 *	and ring the doorbell for us.
1047 *
1048 *	When GTS is disabled we unconditionally ring the doorbell.
1049 */
1050static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q)
1051{
1052#if USE_GTS
1053	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1054	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1055		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1056		t3_write_reg(adap, A_SG_KDOORBELL,
1057			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1058	}
1059#else
1060	wmb();			/* write descriptors before telling HW */
1061	t3_write_reg(adap, A_SG_KDOORBELL,
1062		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1063#endif
1064}
1065
1066static inline void wr_gen2(struct tx_desc *d, unsigned int gen)
1067{
1068#if SGE_NUM_GENBITS == 2
1069	d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen);
1070#endif
1071}
1072
1073/**
1074 *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1075 *	@ndesc: number of Tx descriptors spanned by the SGL
1076 *	@skb: the packet corresponding to the WR
1077 *	@d: first Tx descriptor to be written
1078 *	@pidx: index of above descriptors
1079 *	@q: the SGE Tx queue
1080 *	@sgl: the SGL
1081 *	@flits: number of flits to the start of the SGL in the first descriptor
1082 *	@sgl_flits: the SGL size in flits
1083 *	@gen: the Tx descriptor generation
1084 *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1085 *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1086 *
1087 *	Write a work request header and an associated SGL.  If the SGL is
1088 *	small enough to fit into one Tx descriptor it has already been written
1089 *	and we just need to write the WR header.  Otherwise we distribute the
1090 *	SGL across the number of descriptors it spans.
1091 */
1092static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
1093			     struct tx_desc *d, unsigned int pidx,
1094			     const struct sge_txq *q,
1095			     const struct sg_ent *sgl,
1096			     unsigned int flits, unsigned int sgl_flits,
1097			     unsigned int gen, __be32 wr_hi,
1098			     __be32 wr_lo)
1099{
1100	struct work_request_hdr *wrp = (struct work_request_hdr *)d;
1101	struct tx_sw_desc *sd = &q->sdesc[pidx];
1102
1103	sd->skb = skb;
1104	if (need_skb_unmap()) {
1105		sd->fragidx = 0;
1106		sd->addr_idx = 0;
1107		sd->sflit = flits;
1108	}
1109
1110	if (likely(ndesc == 1)) {
1111		sd->eop = 1;
1112		wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1113				   V_WR_SGLSFLT(flits)) | wr_hi;
1114		dma_wmb();
1115		wrp->wr_lo = htonl(V_WR_LEN(flits + sgl_flits) |
1116				   V_WR_GEN(gen)) | wr_lo;
1117		wr_gen2(d, gen);
1118	} else {
1119		unsigned int ogen = gen;
1120		const u64 *fp = (const u64 *)sgl;
1121		struct work_request_hdr *wp = wrp;
1122
1123		wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1124				   V_WR_SGLSFLT(flits)) | wr_hi;
1125
1126		while (sgl_flits) {
1127			unsigned int avail = WR_FLITS - flits;
1128
1129			if (avail > sgl_flits)
1130				avail = sgl_flits;
1131			memcpy(&d->flit[flits], fp, avail * sizeof(*fp));
1132			sgl_flits -= avail;
1133			ndesc--;
1134			if (!sgl_flits)
1135				break;
1136
1137			fp += avail;
1138			d++;
1139			sd->eop = 0;
1140			sd++;
1141			if (++pidx == q->size) {
1142				pidx = 0;
1143				gen ^= 1;
1144				d = q->desc;
1145				sd = q->sdesc;
1146			}
1147
1148			sd->skb = skb;
1149			wrp = (struct work_request_hdr *)d;
1150			wrp->wr_hi = htonl(V_WR_DATATYPE(1) |
1151					   V_WR_SGLSFLT(1)) | wr_hi;
1152			wrp->wr_lo = htonl(V_WR_LEN(min(WR_FLITS,
1153							sgl_flits + 1)) |
1154					   V_WR_GEN(gen)) | wr_lo;
1155			wr_gen2(d, gen);
1156			flits = 1;
1157		}
1158		sd->eop = 1;
1159		wrp->wr_hi |= htonl(F_WR_EOP);
1160		dma_wmb();
1161		wp->wr_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1162		wr_gen2((struct tx_desc *)wp, ogen);
1163		WARN_ON(ndesc != 0);
1164	}
1165}
1166
1167/**
1168 *	write_tx_pkt_wr - write a TX_PKT work request
1169 *	@adap: the adapter
1170 *	@skb: the packet to send
1171 *	@pi: the egress interface
1172 *	@pidx: index of the first Tx descriptor to write
1173 *	@gen: the generation value to use
1174 *	@q: the Tx queue
1175 *	@ndesc: number of descriptors the packet will occupy
1176 *	@compl: the value of the COMPL bit to use
1177 *	@addr: address
1178 *
1179 *	Generate a TX_PKT work request to send the supplied packet.
1180 */
1181static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
1182			    const struct port_info *pi,
1183			    unsigned int pidx, unsigned int gen,
1184			    struct sge_txq *q, unsigned int ndesc,
1185			    unsigned int compl, const dma_addr_t *addr)
1186{
1187	unsigned int flits, sgl_flits, cntrl, tso_info;
1188	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1189	struct tx_desc *d = &q->desc[pidx];
1190	struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
1191
1192	cpl->len = htonl(skb->len);
1193	cntrl = V_TXPKT_INTF(pi->port_id);
1194
1195	if (skb_vlan_tag_present(skb))
1196		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(skb_vlan_tag_get(skb));
1197
1198	tso_info = V_LSO_MSS(skb_shinfo(skb)->gso_size);
1199	if (tso_info) {
1200		int eth_type;
1201		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)cpl;
1202
1203		d->flit[2] = 0;
1204		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1205		hdr->cntrl = htonl(cntrl);
1206		eth_type = skb_network_offset(skb) == ETH_HLEN ?
1207		    CPL_ETH_II : CPL_ETH_II_VLAN;
1208		tso_info |= V_LSO_ETH_TYPE(eth_type) |
1209		    V_LSO_IPHDR_WORDS(ip_hdr(skb)->ihl) |
1210		    V_LSO_TCPHDR_WORDS(tcp_hdr(skb)->doff);
1211		hdr->lso_info = htonl(tso_info);
1212		flits = 3;
1213	} else {
1214		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1215		cntrl |= F_TXPKT_IPCSUM_DIS;	/* SW calculates IP csum */
1216		cntrl |= V_TXPKT_L4CSUM_DIS(skb->ip_summed != CHECKSUM_PARTIAL);
1217		cpl->cntrl = htonl(cntrl);
1218
1219		if (skb->len <= WR_LEN - sizeof(*cpl)) {
1220			q->sdesc[pidx].skb = NULL;
1221			if (!skb->data_len)
1222				skb_copy_from_linear_data(skb, &d->flit[2],
1223							  skb->len);
1224			else
1225				skb_copy_bits(skb, 0, &d->flit[2], skb->len);
1226
1227			flits = (skb->len + 7) / 8 + 2;
1228			cpl->wr.wr_hi = htonl(V_WR_BCNTLFLT(skb->len & 7) |
1229					      V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT)
1230					      | F_WR_SOP | F_WR_EOP | compl);
1231			dma_wmb();
1232			cpl->wr.wr_lo = htonl(V_WR_LEN(flits) | V_WR_GEN(gen) |
1233					      V_WR_TID(q->token));
1234			wr_gen2(d, gen);
1235			dev_consume_skb_any(skb);
1236			return;
1237		}
1238
1239		flits = 2;
1240	}
1241
1242	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1243	sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
1244
1245	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
1246			 htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
1247			 htonl(V_WR_TID(q->token)));
1248}
1249
1250static inline void t3_stop_tx_queue(struct netdev_queue *txq,
1251				    struct sge_qset *qs, struct sge_txq *q)
1252{
1253	netif_tx_stop_queue(txq);
1254	set_bit(TXQ_ETH, &qs->txq_stopped);
1255	q->stops++;
1256}
1257
1258/**
1259 *	eth_xmit - add a packet to the Ethernet Tx queue
1260 *	@skb: the packet
1261 *	@dev: the egress net device
1262 *
1263 *	Add a packet to an SGE Tx queue.  Runs with softirqs disabled.
1264 */
1265netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1266{
1267	int qidx;
1268	unsigned int ndesc, pidx, credits, gen, compl;
1269	const struct port_info *pi = netdev_priv(dev);
1270	struct adapter *adap = pi->adapter;
1271	struct netdev_queue *txq;
1272	struct sge_qset *qs;
1273	struct sge_txq *q;
1274	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1275
1276	/*
1277	 * The chip min packet length is 9 octets but play safe and reject
1278	 * anything shorter than an Ethernet header.
1279	 */
1280	if (unlikely(skb->len < ETH_HLEN)) {
1281		dev_kfree_skb_any(skb);
1282		return NETDEV_TX_OK;
1283	}
1284
1285	qidx = skb_get_queue_mapping(skb);
1286	qs = &pi->qs[qidx];
1287	q = &qs->txq[TXQ_ETH];
1288	txq = netdev_get_tx_queue(dev, qidx);
1289
1290	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1291
1292	credits = q->size - q->in_use;
1293	ndesc = calc_tx_descs(skb);
1294
1295	if (unlikely(credits < ndesc)) {
1296		t3_stop_tx_queue(txq, qs, q);
1297		dev_err(&adap->pdev->dev,
1298			"%s: Tx ring %u full while queue awake!\n",
1299			dev->name, q->cntxt_id & 7);
1300		return NETDEV_TX_BUSY;
1301	}
1302
1303	/* Check if ethernet packet can't be sent as immediate data */
1304	if (skb->len > (WR_LEN - sizeof(struct cpl_tx_pkt))) {
1305		if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
1306			dev_kfree_skb(skb);
1307			return NETDEV_TX_OK;
1308		}
1309	}
1310
1311	q->in_use += ndesc;
1312	if (unlikely(credits - ndesc < q->stop_thres)) {
1313		t3_stop_tx_queue(txq, qs, q);
1314
1315		if (should_restart_tx(q) &&
1316		    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1317			q->restarts++;
1318			netif_tx_start_queue(txq);
1319		}
1320	}
1321
1322	gen = q->gen;
1323	q->unacked += ndesc;
1324	compl = (q->unacked & 8) << (S_WR_COMPL - 3);
1325	q->unacked &= 7;
1326	pidx = q->pidx;
1327	q->pidx += ndesc;
1328	if (q->pidx >= q->size) {
1329		q->pidx -= q->size;
1330		q->gen ^= 1;
1331	}
1332
1333	/* update port statistics */
1334	if (skb->ip_summed == CHECKSUM_PARTIAL)
1335		qs->port_stats[SGE_PSTAT_TX_CSUM]++;
1336	if (skb_shinfo(skb)->gso_size)
1337		qs->port_stats[SGE_PSTAT_TSO]++;
1338	if (skb_vlan_tag_present(skb))
1339		qs->port_stats[SGE_PSTAT_VLANINS]++;
1340
1341	/*
1342	 * We do not use Tx completion interrupts to free DMAd Tx packets.
1343	 * This is good for performance but means that we rely on new Tx
1344	 * packets arriving to run the destructors of completed packets,
1345	 * which open up space in their sockets' send queues.  Sometimes
1346	 * we do not get such new packets causing Tx to stall.  A single
1347	 * UDP transmitter is a good example of this situation.  We have
1348	 * a clean up timer that periodically reclaims completed packets
1349	 * but it doesn't run often enough (nor do we want it to) to prevent
1350	 * lengthy stalls.  A solution to this problem is to run the
1351	 * destructor early, after the packet is queued but before it's DMAd.
1352	 * A cons is that we lie to socket memory accounting, but the amount
1353	 * of extra memory is reasonable (limited by the number of Tx
1354	 * descriptors), the packets do actually get freed quickly by new
1355	 * packets almost always, and for protocols like TCP that wait for
1356	 * acks to really free up the data the extra memory is even less.
1357	 * On the positive side we run the destructors on the sending CPU
1358	 * rather than on a potentially different completing CPU, usually a
1359	 * good thing.  We also run them without holding our Tx queue lock,
1360	 * unlike what reclaim_completed_tx() would otherwise do.
1361	 *
1362	 * Run the destructor before telling the DMA engine about the packet
1363	 * to make sure it doesn't complete and get freed prematurely.
1364	 */
1365	if (likely(!skb_shared(skb)))
1366		skb_orphan(skb);
1367
1368	write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
1369	check_ring_tx_db(adap, q);
1370	return NETDEV_TX_OK;
1371}
1372
1373/**
1374 *	write_imm - write a packet into a Tx descriptor as immediate data
1375 *	@d: the Tx descriptor to write
1376 *	@skb: the packet
1377 *	@len: the length of packet data to write as immediate data
1378 *	@gen: the generation bit value to write
1379 *
1380 *	Writes a packet as immediate data into a Tx descriptor.  The packet
1381 *	contains a work request at its beginning.  We must write the packet
1382 *	carefully so the SGE doesn't read it accidentally before it's written
1383 *	in its entirety.
1384 */
1385static inline void write_imm(struct tx_desc *d, struct sk_buff *skb,
1386			     unsigned int len, unsigned int gen)
1387{
1388	struct work_request_hdr *from = (struct work_request_hdr *)skb->data;
1389	struct work_request_hdr *to = (struct work_request_hdr *)d;
1390
1391	if (likely(!skb->data_len))
1392		memcpy(&to[1], &from[1], len - sizeof(*from));
1393	else
1394		skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from));
1395
1396	to->wr_hi = from->wr_hi | htonl(F_WR_SOP | F_WR_EOP |
1397					V_WR_BCNTLFLT(len & 7));
1398	dma_wmb();
1399	to->wr_lo = from->wr_lo | htonl(V_WR_GEN(gen) |
1400					V_WR_LEN((len + 7) / 8));
1401	wr_gen2(d, gen);
1402	kfree_skb(skb);
1403}
1404
1405/**
1406 *	check_desc_avail - check descriptor availability on a send queue
1407 *	@adap: the adapter
1408 *	@q: the send queue
1409 *	@skb: the packet needing the descriptors
1410 *	@ndesc: the number of Tx descriptors needed
1411 *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1412 *
1413 *	Checks if the requested number of Tx descriptors is available on an
1414 *	SGE send queue.  If the queue is already suspended or not enough
1415 *	descriptors are available the packet is queued for later transmission.
1416 *	Must be called with the Tx queue locked.
1417 *
1418 *	Returns 0 if enough descriptors are available, 1 if there aren't
1419 *	enough descriptors and the packet has been queued, and 2 if the caller
1420 *	needs to retry because there weren't enough descriptors at the
1421 *	beginning of the call but some freed up in the mean time.
1422 */
1423static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q,
1424				   struct sk_buff *skb, unsigned int ndesc,
1425				   unsigned int qid)
1426{
1427	if (unlikely(!skb_queue_empty(&q->sendq))) {
1428	      addq_exit:__skb_queue_tail(&q->sendq, skb);
1429		return 1;
1430	}
1431	if (unlikely(q->size - q->in_use < ndesc)) {
1432		struct sge_qset *qs = txq_to_qset(q, qid);
1433
1434		set_bit(qid, &qs->txq_stopped);
1435		smp_mb__after_atomic();
1436
1437		if (should_restart_tx(q) &&
1438		    test_and_clear_bit(qid, &qs->txq_stopped))
1439			return 2;
1440
1441		q->stops++;
1442		goto addq_exit;
1443	}
1444	return 0;
1445}
1446
1447/**
1448 *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1449 *	@q: the SGE control Tx queue
1450 *
1451 *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1452 *	that send only immediate data (presently just the control queues) and
1453 *	thus do not have any sk_buffs to release.
1454 */
1455static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1456{
1457	unsigned int reclaim = q->processed - q->cleaned;
1458
1459	q->in_use -= reclaim;
1460	q->cleaned += reclaim;
1461}
1462
1463static inline int immediate(const struct sk_buff *skb)
1464{
1465	return skb->len <= WR_LEN;
1466}
1467
1468/**
1469 *	ctrl_xmit - send a packet through an SGE control Tx queue
1470 *	@adap: the adapter
1471 *	@q: the control queue
1472 *	@skb: the packet
1473 *
1474 *	Send a packet through an SGE control Tx queue.  Packets sent through
1475 *	a control queue must fit entirely as immediate data in a single Tx
1476 *	descriptor and have no page fragments.
1477 */
1478static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
1479		     struct sk_buff *skb)
1480{
1481	int ret;
1482	struct work_request_hdr *wrp = (struct work_request_hdr *)skb->data;
1483
1484	if (unlikely(!immediate(skb))) {
1485		WARN_ON(1);
1486		dev_kfree_skb(skb);
1487		return NET_XMIT_SUCCESS;
1488	}
1489
1490	wrp->wr_hi |= htonl(F_WR_SOP | F_WR_EOP);
1491	wrp->wr_lo = htonl(V_WR_TID(q->token));
1492
1493	spin_lock(&q->lock);
1494      again:reclaim_completed_tx_imm(q);
1495
1496	ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL);
1497	if (unlikely(ret)) {
1498		if (ret == 1) {
1499			spin_unlock(&q->lock);
1500			return NET_XMIT_CN;
1501		}
1502		goto again;
1503	}
1504
1505	write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1506
1507	q->in_use++;
1508	if (++q->pidx >= q->size) {
1509		q->pidx = 0;
1510		q->gen ^= 1;
1511	}
1512	spin_unlock(&q->lock);
1513	wmb();
1514	t3_write_reg(adap, A_SG_KDOORBELL,
1515		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1516	return NET_XMIT_SUCCESS;
1517}
1518
1519/**
1520 *	restart_ctrlq - restart a suspended control queue
1521 *	@t: pointer to the tasklet associated with this handler
1522 *
1523 *	Resumes transmission on a suspended Tx control queue.
1524 */
1525static void restart_ctrlq(struct tasklet_struct *t)
1526{
1527	struct sk_buff *skb;
1528	struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
1529	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1530
1531	spin_lock(&q->lock);
1532      again:reclaim_completed_tx_imm(q);
1533
1534	while (q->in_use < q->size &&
1535	       (skb = __skb_dequeue(&q->sendq)) != NULL) {
1536
1537		write_imm(&q->desc[q->pidx], skb, skb->len, q->gen);
1538
1539		if (++q->pidx >= q->size) {
1540			q->pidx = 0;
1541			q->gen ^= 1;
1542		}
1543		q->in_use++;
1544	}
1545
1546	if (!skb_queue_empty(&q->sendq)) {
1547		set_bit(TXQ_CTRL, &qs->txq_stopped);
1548		smp_mb__after_atomic();
1549
1550		if (should_restart_tx(q) &&
1551		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1552			goto again;
1553		q->stops++;
1554	}
1555
1556	spin_unlock(&q->lock);
1557	wmb();
1558	t3_write_reg(qs->adap, A_SG_KDOORBELL,
1559		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1560}
1561
1562/*
1563 * Send a management message through control queue 0
1564 */
1565int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1566{
1567	int ret;
1568	local_bh_disable();
1569	ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb);
1570	local_bh_enable();
1571
1572	return ret;
1573}
1574
1575/**
1576 *	deferred_unmap_destructor - unmap a packet when it is freed
1577 *	@skb: the packet
1578 *
1579 *	This is the packet destructor used for Tx packets that need to remain
1580 *	mapped until they are freed rather than until their Tx descriptors are
1581 *	freed.
1582 */
1583static void deferred_unmap_destructor(struct sk_buff *skb)
1584{
1585	int i;
1586	const dma_addr_t *p;
1587	const struct skb_shared_info *si;
1588	const struct deferred_unmap_info *dui;
1589
1590	dui = (struct deferred_unmap_info *)skb->head;
1591	p = dui->addr;
1592
1593	if (skb_tail_pointer(skb) - skb_transport_header(skb))
1594		pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) -
1595				 skb_transport_header(skb), PCI_DMA_TODEVICE);
1596
1597	si = skb_shinfo(skb);
1598	for (i = 0; i < si->nr_frags; i++)
1599		pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]),
1600			       PCI_DMA_TODEVICE);
1601}
1602
1603static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
1604				     const struct sg_ent *sgl, int sgl_flits)
1605{
1606	dma_addr_t *p;
1607	struct deferred_unmap_info *dui;
1608
1609	dui = (struct deferred_unmap_info *)skb->head;
1610	dui->pdev = pdev;
1611	for (p = dui->addr; sgl_flits >= 3; sgl++, sgl_flits -= 3) {
1612		*p++ = be64_to_cpu(sgl->addr[0]);
1613		*p++ = be64_to_cpu(sgl->addr[1]);
1614	}
1615	if (sgl_flits)
1616		*p = be64_to_cpu(sgl->addr[0]);
1617}
1618
1619/**
1620 *	write_ofld_wr - write an offload work request
1621 *	@adap: the adapter
1622 *	@skb: the packet to send
1623 *	@q: the Tx queue
1624 *	@pidx: index of the first Tx descriptor to write
1625 *	@gen: the generation value to use
1626 *	@ndesc: number of descriptors the packet will occupy
1627 *	@addr: the address
1628 *
1629 *	Write an offload work request to send the supplied packet.  The packet
1630 *	data already carry the work request with most fields populated.
1631 */
1632static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
1633			  struct sge_txq *q, unsigned int pidx,
1634			  unsigned int gen, unsigned int ndesc,
1635			  const dma_addr_t *addr)
1636{
1637	unsigned int sgl_flits, flits;
1638	struct work_request_hdr *from;
1639	struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
1640	struct tx_desc *d = &q->desc[pidx];
1641
1642	if (immediate(skb)) {
1643		q->sdesc[pidx].skb = NULL;
1644		write_imm(d, skb, skb->len, gen);
1645		return;
1646	}
1647
1648	/* Only TX_DATA builds SGLs */
1649
1650	from = (struct work_request_hdr *)skb->data;
1651	memcpy(&d->flit[1], &from[1],
1652	       skb_transport_offset(skb) - sizeof(*from));
1653
1654	flits = skb_transport_offset(skb) / 8;
1655	sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
1656	sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
1657			      skb_tail_pointer(skb) - skb_transport_header(skb),
1658			      addr);
1659	if (need_skb_unmap()) {
1660		setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
1661		skb->destructor = deferred_unmap_destructor;
1662	}
1663
1664	write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits,
1665			 gen, from->wr_hi, from->wr_lo);
1666}
1667
1668/**
1669 *	calc_tx_descs_ofld - calculate # of Tx descriptors for an offload packet
1670 *	@skb: the packet
1671 *
1672 * 	Returns the number of Tx descriptors needed for the given offload
1673 * 	packet.  These packets are already fully constructed.
1674 */
1675static inline unsigned int calc_tx_descs_ofld(const struct sk_buff *skb)
1676{
1677	unsigned int flits, cnt;
1678
1679	if (skb->len <= WR_LEN)
1680		return 1;	/* packet fits as immediate data */
1681
1682	flits = skb_transport_offset(skb) / 8;	/* headers */
1683	cnt = skb_shinfo(skb)->nr_frags;
1684	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1685		cnt++;
1686	return flits_to_desc(flits + sgl_len(cnt));
1687}
1688
1689/**
1690 *	ofld_xmit - send a packet through an offload queue
1691 *	@adap: the adapter
1692 *	@q: the Tx offload queue
1693 *	@skb: the packet
1694 *
1695 *	Send an offload packet through an SGE offload queue.
1696 */
1697static int ofld_xmit(struct adapter *adap, struct sge_txq *q,
1698		     struct sk_buff *skb)
1699{
1700	int ret;
1701	unsigned int ndesc = calc_tx_descs_ofld(skb), pidx, gen;
1702
1703	spin_lock(&q->lock);
1704again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1705
1706	ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD);
1707	if (unlikely(ret)) {
1708		if (ret == 1) {
1709			skb->priority = ndesc;	/* save for restart */
1710			spin_unlock(&q->lock);
1711			return NET_XMIT_CN;
1712		}
1713		goto again;
1714	}
1715
1716	if (!immediate(skb) &&
1717	    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
1718		spin_unlock(&q->lock);
1719		return NET_XMIT_SUCCESS;
1720	}
1721
1722	gen = q->gen;
1723	q->in_use += ndesc;
1724	pidx = q->pidx;
1725	q->pidx += ndesc;
1726	if (q->pidx >= q->size) {
1727		q->pidx -= q->size;
1728		q->gen ^= 1;
1729	}
1730	spin_unlock(&q->lock);
1731
1732	write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
1733	check_ring_tx_db(adap, q);
1734	return NET_XMIT_SUCCESS;
1735}
1736
1737/**
1738 *	restart_offloadq - restart a suspended offload queue
1739 *	@t: pointer to the tasklet associated with this handler
1740 *
1741 *	Resumes transmission on a suspended Tx offload queue.
1742 */
1743static void restart_offloadq(struct tasklet_struct *t)
1744{
1745	struct sk_buff *skb;
1746	struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
1747	struct sge_txq *q = &qs->txq[TXQ_OFLD];
1748	const struct port_info *pi = netdev_priv(qs->netdev);
1749	struct adapter *adap = pi->adapter;
1750	unsigned int written = 0;
1751
1752	spin_lock(&q->lock);
1753again:	reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
1754
1755	while ((skb = skb_peek(&q->sendq)) != NULL) {
1756		unsigned int gen, pidx;
1757		unsigned int ndesc = skb->priority;
1758
1759		if (unlikely(q->size - q->in_use < ndesc)) {
1760			set_bit(TXQ_OFLD, &qs->txq_stopped);
1761			smp_mb__after_atomic();
1762
1763			if (should_restart_tx(q) &&
1764			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
1765				goto again;
1766			q->stops++;
1767			break;
1768		}
1769
1770		if (!immediate(skb) &&
1771		    map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
1772			break;
1773
1774		gen = q->gen;
1775		q->in_use += ndesc;
1776		pidx = q->pidx;
1777		q->pidx += ndesc;
1778		written += ndesc;
1779		if (q->pidx >= q->size) {
1780			q->pidx -= q->size;
1781			q->gen ^= 1;
1782		}
1783		__skb_unlink(skb, &q->sendq);
1784		spin_unlock(&q->lock);
1785
1786		write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
1787			      (dma_addr_t *)skb->head);
1788		spin_lock(&q->lock);
1789	}
1790	spin_unlock(&q->lock);
1791
1792#if USE_GTS
1793	set_bit(TXQ_RUNNING, &q->flags);
1794	set_bit(TXQ_LAST_PKT_DB, &q->flags);
1795#endif
1796	wmb();
1797	if (likely(written))
1798		t3_write_reg(adap, A_SG_KDOORBELL,
1799			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1800}
1801
1802/**
1803 *	queue_set - return the queue set a packet should use
1804 *	@skb: the packet
1805 *
1806 *	Maps a packet to the SGE queue set it should use.  The desired queue
1807 *	set is carried in bits 1-3 in the packet's priority.
1808 */
1809static inline int queue_set(const struct sk_buff *skb)
1810{
1811	return skb->priority >> 1;
1812}
1813
1814/**
1815 *	is_ctrl_pkt - return whether an offload packet is a control packet
1816 *	@skb: the packet
1817 *
1818 *	Determines whether an offload packet should use an OFLD or a CTRL
1819 *	Tx queue.  This is indicated by bit 0 in the packet's priority.
1820 */
1821static inline int is_ctrl_pkt(const struct sk_buff *skb)
1822{
1823	return skb->priority & 1;
1824}
1825
1826/**
1827 *	t3_offload_tx - send an offload packet
1828 *	@tdev: the offload device to send to
1829 *	@skb: the packet
1830 *
1831 *	Sends an offload packet.  We use the packet priority to select the
1832 *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1833 *	should be sent as regular or control, bits 1-3 select the queue set.
1834 */
1835int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
1836{
1837	struct adapter *adap = tdev2adap(tdev);
1838	struct sge_qset *qs = &adap->sge.qs[queue_set(skb)];
1839
1840	if (unlikely(is_ctrl_pkt(skb)))
1841		return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb);
1842
1843	return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb);
1844}
1845
1846/**
1847 *	offload_enqueue - add an offload packet to an SGE offload receive queue
1848 *	@q: the SGE response queue
1849 *	@skb: the packet
1850 *
1851 *	Add a new offload packet to an SGE response queue's offload packet
1852 *	queue.  If the packet is the first on the queue it schedules the RX
1853 *	softirq to process the queue.
1854 */
1855static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb)
1856{
1857	int was_empty = skb_queue_empty(&q->rx_queue);
1858
1859	__skb_queue_tail(&q->rx_queue, skb);
1860
1861	if (was_empty) {
1862		struct sge_qset *qs = rspq_to_qset(q);
1863
1864		napi_schedule(&qs->napi);
1865	}
1866}
1867
1868/**
1869 *	deliver_partial_bundle - deliver a (partial) bundle of Rx offload pkts
1870 *	@tdev: the offload device that will be receiving the packets
1871 *	@q: the SGE response queue that assembled the bundle
1872 *	@skbs: the partial bundle
1873 *	@n: the number of packets in the bundle
1874 *
1875 *	Delivers a (partial) bundle of Rx offload packets to an offload device.
1876 */
1877static inline void deliver_partial_bundle(struct t3cdev *tdev,
1878					  struct sge_rspq *q,
1879					  struct sk_buff *skbs[], int n)
1880{
1881	if (n) {
1882		q->offload_bundles++;
1883		tdev->recv(tdev, skbs, n);
1884	}
1885}
1886
1887/**
1888 *	ofld_poll - NAPI handler for offload packets in interrupt mode
1889 *	@napi: the network device doing the polling
1890 *	@budget: polling budget
1891 *
1892 *	The NAPI handler for offload packets when a response queue is serviced
1893 *	by the hard interrupt handler, i.e., when it's operating in non-polling
1894 *	mode.  Creates small packet batches and sends them through the offload
1895 *	receive handler.  Batches need to be of modest size as we do prefetches
1896 *	on the packets in each.
1897 */
1898static int ofld_poll(struct napi_struct *napi, int budget)
1899{
1900	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
1901	struct sge_rspq *q = &qs->rspq;
1902	struct adapter *adapter = qs->adap;
1903	int work_done = 0;
1904
1905	while (work_done < budget) {
1906		struct sk_buff *skb, *tmp, *skbs[RX_BUNDLE_SIZE];
1907		struct sk_buff_head queue;
1908		int ngathered;
1909
1910		spin_lock_irq(&q->lock);
1911		__skb_queue_head_init(&queue);
1912		skb_queue_splice_init(&q->rx_queue, &queue);
1913		if (skb_queue_empty(&queue)) {
1914			napi_complete_done(napi, work_done);
1915			spin_unlock_irq(&q->lock);
1916			return work_done;
1917		}
1918		spin_unlock_irq(&q->lock);
1919
1920		ngathered = 0;
1921		skb_queue_walk_safe(&queue, skb, tmp) {
1922			if (work_done >= budget)
1923				break;
1924			work_done++;
1925
1926			__skb_unlink(skb, &queue);
1927			prefetch(skb->data);
1928			skbs[ngathered] = skb;
1929			if (++ngathered == RX_BUNDLE_SIZE) {
1930				q->offload_bundles++;
1931				adapter->tdev.recv(&adapter->tdev, skbs,
1932						   ngathered);
1933				ngathered = 0;
1934			}
1935		}
1936		if (!skb_queue_empty(&queue)) {
1937			/* splice remaining packets back onto Rx queue */
1938			spin_lock_irq(&q->lock);
1939			skb_queue_splice(&queue, &q->rx_queue);
1940			spin_unlock_irq(&q->lock);
1941		}
1942		deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered);
1943	}
1944
1945	return work_done;
1946}
1947
1948/**
1949 *	rx_offload - process a received offload packet
1950 *	@tdev: the offload device receiving the packet
1951 *	@rq: the response queue that received the packet
1952 *	@skb: the packet
1953 *	@rx_gather: a gather list of packets if we are building a bundle
1954 *	@gather_idx: index of the next available slot in the bundle
1955 *
1956 *	Process an ingress offload pakcet and add it to the offload ingress
1957 *	queue. 	Returns the index of the next available slot in the bundle.
1958 */
1959static inline int rx_offload(struct t3cdev *tdev, struct sge_rspq *rq,
1960			     struct sk_buff *skb, struct sk_buff *rx_gather[],
1961			     unsigned int gather_idx)
1962{
1963	skb_reset_mac_header(skb);
1964	skb_reset_network_header(skb);
1965	skb_reset_transport_header(skb);
1966
1967	if (rq->polling) {
1968		rx_gather[gather_idx++] = skb;
1969		if (gather_idx == RX_BUNDLE_SIZE) {
1970			tdev->recv(tdev, rx_gather, RX_BUNDLE_SIZE);
1971			gather_idx = 0;
1972			rq->offload_bundles++;
1973		}
1974	} else
1975		offload_enqueue(rq, skb);
1976
1977	return gather_idx;
1978}
1979
1980/**
1981 *	restart_tx - check whether to restart suspended Tx queues
1982 *	@qs: the queue set to resume
1983 *
1984 *	Restarts suspended Tx queues of an SGE queue set if they have enough
1985 *	free resources to resume operation.
1986 */
1987static void restart_tx(struct sge_qset *qs)
1988{
1989	if (test_bit(TXQ_ETH, &qs->txq_stopped) &&
1990	    should_restart_tx(&qs->txq[TXQ_ETH]) &&
1991	    test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1992		qs->txq[TXQ_ETH].restarts++;
1993		if (netif_running(qs->netdev))
1994			netif_tx_wake_queue(qs->tx_q);
1995	}
1996
1997	if (test_bit(TXQ_OFLD, &qs->txq_stopped) &&
1998	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
1999	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2000		qs->txq[TXQ_OFLD].restarts++;
2001		tasklet_schedule(&qs->txq[TXQ_OFLD].qresume_tsk);
2002	}
2003	if (test_bit(TXQ_CTRL, &qs->txq_stopped) &&
2004	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2005	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2006		qs->txq[TXQ_CTRL].restarts++;
2007		tasklet_schedule(&qs->txq[TXQ_CTRL].qresume_tsk);
2008	}
2009}
2010
2011/**
2012 *	cxgb3_arp_process - process an ARP request probing a private IP address
2013 *	@pi: the port info
2014 *	@skb: the skbuff containing the ARP request
2015 *
2016 *	Check if the ARP request is probing the private IP address
2017 *	dedicated to iSCSI, generate an ARP reply if so.
2018 */
2019static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
2020{
2021	struct net_device *dev = skb->dev;
2022	struct arphdr *arp;
2023	unsigned char *arp_ptr;
2024	unsigned char *sha;
2025	__be32 sip, tip;
2026
2027	if (!dev)
2028		return;
2029
2030	skb_reset_network_header(skb);
2031	arp = arp_hdr(skb);
2032
2033	if (arp->ar_op != htons(ARPOP_REQUEST))
2034		return;
2035
2036	arp_ptr = (unsigned char *)(arp + 1);
2037	sha = arp_ptr;
2038	arp_ptr += dev->addr_len;
2039	memcpy(&sip, arp_ptr, sizeof(sip));
2040	arp_ptr += sizeof(sip);
2041	arp_ptr += dev->addr_len;
2042	memcpy(&tip, arp_ptr, sizeof(tip));
2043
2044	if (tip != pi->iscsi_ipv4addr)
2045		return;
2046
2047	arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
2048		 pi->iscsic.mac_addr, sha);
2049
2050}
2051
2052static inline int is_arp(struct sk_buff *skb)
2053{
2054	return skb->protocol == htons(ETH_P_ARP);
2055}
2056
2057static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
2058					struct sk_buff *skb)
2059{
2060	if (is_arp(skb)) {
2061		cxgb3_arp_process(pi, skb);
2062		return;
2063	}
2064
2065	if (pi->iscsic.recv)
2066		pi->iscsic.recv(pi, skb);
2067
2068}
2069
2070/**
2071 *	rx_eth - process an ingress ethernet packet
2072 *	@adap: the adapter
2073 *	@rq: the response queue that received the packet
2074 *	@skb: the packet
2075 *	@pad: padding
2076 *	@lro: large receive offload
2077 *
2078 *	Process an ingress ethernet pakcet and deliver it to the stack.
2079 *	The padding is 2 if the packet was delivered in an Rx buffer and 0
2080 *	if it was immediate data in a response.
2081 */
2082static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2083		   struct sk_buff *skb, int pad, int lro)
2084{
2085	struct cpl_rx_pkt *p = (struct cpl_rx_pkt *)(skb->data + pad);
2086	struct sge_qset *qs = rspq_to_qset(rq);
2087	struct port_info *pi;
2088
2089	skb_pull(skb, sizeof(*p) + pad);
2090	skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
2091	pi = netdev_priv(skb->dev);
2092	if ((skb->dev->features & NETIF_F_RXCSUM) && p->csum_valid &&
2093	    p->csum == htons(0xffff) && !p->fragment) {
2094		qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2095		skb->ip_summed = CHECKSUM_UNNECESSARY;
2096	} else
2097		skb_checksum_none_assert(skb);
2098	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2099
2100	if (p->vlan_valid) {
2101		qs->port_stats[SGE_PSTAT_VLANEX]++;
2102		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(p->vlan));
2103	}
2104	if (rq->polling) {
2105		if (lro)
2106			napi_gro_receive(&qs->napi, skb);
2107		else {
2108			if (unlikely(pi->iscsic.flags))
2109				cxgb3_process_iscsi_prov_pack(pi, skb);
2110			netif_receive_skb(skb);
2111		}
2112	} else
2113		netif_rx(skb);
2114}
2115
2116static inline int is_eth_tcp(u32 rss)
2117{
2118	return G_HASHTYPE(ntohl(rss)) == RSS_HASH_4_TUPLE;
2119}
2120
2121/**
2122 *	lro_add_page - add a page chunk to an LRO session
2123 *	@adap: the adapter
2124 *	@qs: the associated queue set
2125 *	@fl: the free list containing the page chunk to add
2126 *	@len: packet length
2127 *	@complete: Indicates the last fragment of a frame
2128 *
2129 *	Add a received packet contained in a page chunk to an existing LRO
2130 *	session.
2131 */
2132static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2133			 struct sge_fl *fl, int len, int complete)
2134{
2135	struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
2136	struct port_info *pi = netdev_priv(qs->netdev);
2137	struct sk_buff *skb = NULL;
2138	struct cpl_rx_pkt *cpl;
2139	skb_frag_t *rx_frag;
2140	int nr_frags;
2141	int offset = 0;
2142
2143	if (!qs->nomem) {
2144		skb = napi_get_frags(&qs->napi);
2145		qs->nomem = !skb;
2146	}
2147
2148	fl->credits--;
2149
2150	pci_dma_sync_single_for_cpu(adap->pdev,
2151				    dma_unmap_addr(sd, dma_addr),
2152				    fl->buf_size - SGE_PG_RSVD,
2153				    PCI_DMA_FROMDEVICE);
2154
2155	(*sd->pg_chunk.p_cnt)--;
2156	if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
2157		pci_unmap_page(adap->pdev,
2158			       sd->pg_chunk.mapping,
2159			       fl->alloc_size,
2160			       PCI_DMA_FROMDEVICE);
2161
2162	if (!skb) {
2163		put_page(sd->pg_chunk.page);
2164		if (complete)
2165			qs->nomem = 0;
2166		return;
2167	}
2168
2169	rx_frag = skb_shinfo(skb)->frags;
2170	nr_frags = skb_shinfo(skb)->nr_frags;
2171
2172	if (!nr_frags) {
2173		offset = 2 + sizeof(struct cpl_rx_pkt);
2174		cpl = qs->lro_va = sd->pg_chunk.va + 2;
2175
2176		if ((qs->netdev->features & NETIF_F_RXCSUM) &&
2177		     cpl->csum_valid && cpl->csum == htons(0xffff)) {
2178			skb->ip_summed = CHECKSUM_UNNECESSARY;
2179			qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2180		} else
2181			skb->ip_summed = CHECKSUM_NONE;
2182	} else
2183		cpl = qs->lro_va;
2184
2185	len -= offset;
2186
2187	rx_frag += nr_frags;
2188	__skb_frag_set_page(rx_frag, sd->pg_chunk.page);
2189	skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
2190	skb_frag_size_set(rx_frag, len);
2191
2192	skb->len += len;
2193	skb->data_len += len;
2194	skb->truesize += len;
2195	skb_shinfo(skb)->nr_frags++;
2196
2197	if (!complete)
2198		return;
2199
2200	skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]);
2201
2202	if (cpl->vlan_valid) {
2203		qs->port_stats[SGE_PSTAT_VLANEX]++;
2204		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cpl->vlan));
2205	}
2206	napi_gro_frags(&qs->napi);
2207}
2208
2209/**
2210 *	handle_rsp_cntrl_info - handles control information in a response
2211 *	@qs: the queue set corresponding to the response
2212 *	@flags: the response control flags
2213 *
2214 *	Handles the control information of an SGE response, such as GTS
2215 *	indications and completion credits for the queue set's Tx queues.
2216 *	HW coalesces credits, we don't do any extra SW coalescing.
2217 */
2218static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
2219{
2220	unsigned int credits;
2221
2222#if USE_GTS
2223	if (flags & F_RSPD_TXQ0_GTS)
2224		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2225#endif
2226
2227	credits = G_RSPD_TXQ0_CR(flags);
2228	if (credits)
2229		qs->txq[TXQ_ETH].processed += credits;
2230
2231	credits = G_RSPD_TXQ2_CR(flags);
2232	if (credits)
2233		qs->txq[TXQ_CTRL].processed += credits;
2234
2235# if USE_GTS
2236	if (flags & F_RSPD_TXQ1_GTS)
2237		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2238# endif
2239	credits = G_RSPD_TXQ1_CR(flags);
2240	if (credits)
2241		qs->txq[TXQ_OFLD].processed += credits;
2242}
2243
2244/**
2245 *	check_ring_db - check if we need to ring any doorbells
2246 *	@adap: the adapter
2247 *	@qs: the queue set whose Tx queues are to be examined
2248 *	@sleeping: indicates which Tx queue sent GTS
2249 *
2250 *	Checks if some of a queue set's Tx queues need to ring their doorbells
2251 *	to resume transmission after idling while they still have unprocessed
2252 *	descriptors.
2253 */
2254static void check_ring_db(struct adapter *adap, struct sge_qset *qs,
2255			  unsigned int sleeping)
2256{
2257	if (sleeping & F_RSPD_TXQ0_GTS) {
2258		struct sge_txq *txq = &qs->txq[TXQ_ETH];
2259
2260		if (txq->cleaned + txq->in_use != txq->processed &&
2261		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2262			set_bit(TXQ_RUNNING, &txq->flags);
2263			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2264				     V_EGRCNTX(txq->cntxt_id));
2265		}
2266	}
2267
2268	if (sleeping & F_RSPD_TXQ1_GTS) {
2269		struct sge_txq *txq = &qs->txq[TXQ_OFLD];
2270
2271		if (txq->cleaned + txq->in_use != txq->processed &&
2272		    !test_and_set_bit(TXQ_LAST_PKT_DB, &txq->flags)) {
2273			set_bit(TXQ_RUNNING, &txq->flags);
2274			t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX |
2275				     V_EGRCNTX(txq->cntxt_id));
2276		}
2277	}
2278}
2279
2280/**
2281 *	is_new_response - check if a response is newly written
2282 *	@r: the response descriptor
2283 *	@q: the response queue
2284 *
2285 *	Returns true if a response descriptor contains a yet unprocessed
2286 *	response.
2287 */
2288static inline int is_new_response(const struct rsp_desc *r,
2289				  const struct sge_rspq *q)
2290{
2291	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2292}
2293
2294static inline void clear_rspq_bufstate(struct sge_rspq * const q)
2295{
2296	q->pg_skb = NULL;
2297	q->rx_recycle_buf = 0;
2298}
2299
2300#define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2301#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2302			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2303			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2304			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2305
2306/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2307#define NOMEM_INTR_DELAY 2500
2308
2309/**
2310 *	process_responses - process responses from an SGE response queue
2311 *	@adap: the adapter
2312 *	@qs: the queue set to which the response queue belongs
2313 *	@budget: how many responses can be processed in this round
2314 *
2315 *	Process responses from an SGE response queue up to the supplied budget.
2316 *	Responses include received packets as well as credits and other events
2317 *	for the queues that belong to the response queue's queue set.
2318 *	A negative budget is effectively unlimited.
2319 *
2320 *	Additionally choose the interrupt holdoff time for the next interrupt
2321 *	on this queue.  If the system is under memory shortage use a fairly
2322 *	long delay to help recovery.
2323 */
2324static int process_responses(struct adapter *adap, struct sge_qset *qs,
2325			     int budget)
2326{
2327	struct sge_rspq *q = &qs->rspq;
2328	struct rsp_desc *r = &q->desc[q->cidx];
2329	int budget_left = budget;
2330	unsigned int sleeping = 0;
2331	struct sk_buff *offload_skbs[RX_BUNDLE_SIZE];
2332	int ngathered = 0;
2333
2334	q->next_holdoff = q->holdoff_tmr;
2335
2336	while (likely(budget_left && is_new_response(r, q))) {
2337		int packet_complete, eth, ethpad = 2;
2338		int lro = !!(qs->netdev->features & NETIF_F_GRO);
2339		struct sk_buff *skb = NULL;
2340		u32 len, flags;
2341		__be32 rss_hi, rss_lo;
2342
2343		dma_rmb();
2344		eth = r->rss_hdr.opcode == CPL_RX_PKT;
2345		rss_hi = *(const __be32 *)r;
2346		rss_lo = r->rss_hdr.rss_hash_val;
2347		flags = ntohl(r->flags);
2348
2349		if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
2350			skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
2351			if (!skb)
2352				goto no_mem;
2353
2354			__skb_put_data(skb, r, AN_PKT_SIZE);
2355			skb->data[0] = CPL_ASYNC_NOTIF;
2356			rss_hi = htonl(CPL_ASYNC_NOTIF << 24);
2357			q->async_notif++;
2358		} else if (flags & F_RSPD_IMM_DATA_VALID) {
2359			skb = get_imm_packet(r);
2360			if (unlikely(!skb)) {
2361no_mem:
2362				q->next_holdoff = NOMEM_INTR_DELAY;
2363				q->nomem++;
2364				/* consume one credit since we tried */
2365				budget_left--;
2366				break;
2367			}
2368			q->imm_data++;
2369			ethpad = 0;
2370		} else if ((len = ntohl(r->len_cq)) != 0) {
2371			struct sge_fl *fl;
2372
2373			lro &= eth && is_eth_tcp(rss_hi);
2374
2375			fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2376			if (fl->use_pages) {
2377				void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
2378
2379				net_prefetch(addr);
2380				__refill_fl(adap, fl);
2381				if (lro > 0) {
2382					lro_add_page(adap, qs, fl,
2383						     G_RSPD_LEN(len),
2384						     flags & F_RSPD_EOP);
2385					goto next_fl;
2386				}
2387
2388				skb = get_packet_pg(adap, fl, q,
2389						    G_RSPD_LEN(len),
2390						    eth ?
2391						    SGE_RX_DROP_THRES : 0);
2392				q->pg_skb = skb;
2393			} else
2394				skb = get_packet(adap, fl, G_RSPD_LEN(len),
2395						 eth ? SGE_RX_DROP_THRES : 0);
2396			if (unlikely(!skb)) {
2397				if (!eth)
2398					goto no_mem;
2399				q->rx_drops++;
2400			} else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2401				__skb_pull(skb, 2);
2402next_fl:
2403			if (++fl->cidx == fl->size)
2404				fl->cidx = 0;
2405		} else
2406			q->pure_rsps++;
2407
2408		if (flags & RSPD_CTRL_MASK) {
2409			sleeping |= flags & RSPD_GTS_MASK;
2410			handle_rsp_cntrl_info(qs, flags);
2411		}
2412
2413		r++;
2414		if (unlikely(++q->cidx == q->size)) {
2415			q->cidx = 0;
2416			q->gen ^= 1;
2417			r = q->desc;
2418		}
2419		prefetch(r);
2420
2421		if (++q->credits >= (q->size / 4)) {
2422			refill_rspq(adap, q, q->credits);
2423			q->credits = 0;
2424		}
2425
2426		packet_complete = flags &
2427				  (F_RSPD_EOP | F_RSPD_IMM_DATA_VALID |
2428				   F_RSPD_ASYNC_NOTIF);
2429
2430		if (skb != NULL && packet_complete) {
2431			if (eth)
2432				rx_eth(adap, q, skb, ethpad, lro);
2433			else {
2434				q->offload_pkts++;
2435				/* Preserve the RSS info in csum & priority */
2436				skb->csum = rss_hi;
2437				skb->priority = rss_lo;
2438				ngathered = rx_offload(&adap->tdev, q, skb,
2439						       offload_skbs,
2440						       ngathered);
2441			}
2442
2443			if (flags & F_RSPD_EOP)
2444				clear_rspq_bufstate(q);
2445		}
2446		--budget_left;
2447	}
2448
2449	deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered);
2450
2451	if (sleeping)
2452		check_ring_db(adap, qs, sleeping);
2453
2454	smp_mb();		/* commit Tx queue .processed updates */
2455	if (unlikely(qs->txq_stopped != 0))
2456		restart_tx(qs);
2457
2458	budget -= budget_left;
2459	return budget;
2460}
2461
2462static inline int is_pure_response(const struct rsp_desc *r)
2463{
2464	__be32 n = r->flags & htonl(F_RSPD_ASYNC_NOTIF | F_RSPD_IMM_DATA_VALID);
2465
2466	return (n | r->len_cq) == 0;
2467}
2468
2469/**
2470 *	napi_rx_handler - the NAPI handler for Rx processing
2471 *	@napi: the napi instance
2472 *	@budget: how many packets we can process in this round
2473 *
2474 *	Handler for new data events when using NAPI.
2475 */
2476static int napi_rx_handler(struct napi_struct *napi, int budget)
2477{
2478	struct sge_qset *qs = container_of(napi, struct sge_qset, napi);
2479	struct adapter *adap = qs->adap;
2480	int work_done = process_responses(adap, qs, budget);
2481
2482	if (likely(work_done < budget)) {
2483		napi_complete_done(napi, work_done);
2484
2485		/*
2486		 * Because we don't atomically flush the following
2487		 * write it is possible that in very rare cases it can
2488		 * reach the device in a way that races with a new
2489		 * response being written plus an error interrupt
2490		 * causing the NAPI interrupt handler below to return
2491		 * unhandled status to the OS.  To protect against
2492		 * this would require flushing the write and doing
2493		 * both the write and the flush with interrupts off.
2494		 * Way too expensive and unjustifiable given the
2495		 * rarity of the race.
2496		 *
2497		 * The race cannot happen at all with MSI-X.
2498		 */
2499		t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
2500			     V_NEWTIMER(qs->rspq.next_holdoff) |
2501			     V_NEWINDEX(qs->rspq.cidx));
2502	}
2503	return work_done;
2504}
2505
2506/*
2507 * Returns true if the device is already scheduled for polling.
2508 */
2509static inline int napi_is_scheduled(struct napi_struct *napi)
2510{
2511	return test_bit(NAPI_STATE_SCHED, &napi->state);
2512}
2513
2514/**
2515 *	process_pure_responses - process pure responses from a response queue
2516 *	@adap: the adapter
2517 *	@qs: the queue set owning the response queue
2518 *	@r: the first pure response to process
2519 *
2520 *	A simpler version of process_responses() that handles only pure (i.e.,
2521 *	non data-carrying) responses.  Such respones are too light-weight to
2522 *	justify calling a softirq under NAPI, so we handle them specially in
2523 *	the interrupt handler.  The function is called with a pointer to a
2524 *	response, which the caller must ensure is a valid pure response.
2525 *
2526 *	Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2527 */
2528static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
2529				  struct rsp_desc *r)
2530{
2531	struct sge_rspq *q = &qs->rspq;
2532	unsigned int sleeping = 0;
2533
2534	do {
2535		u32 flags = ntohl(r->flags);
2536
2537		r++;
2538		if (unlikely(++q->cidx == q->size)) {
2539			q->cidx = 0;
2540			q->gen ^= 1;
2541			r = q->desc;
2542		}
2543		prefetch(r);
2544
2545		if (flags & RSPD_CTRL_MASK) {
2546			sleeping |= flags & RSPD_GTS_MASK;
2547			handle_rsp_cntrl_info(qs, flags);
2548		}
2549
2550		q->pure_rsps++;
2551		if (++q->credits >= (q->size / 4)) {
2552			refill_rspq(adap, q, q->credits);
2553			q->credits = 0;
2554		}
2555		if (!is_new_response(r, q))
2556			break;
2557		dma_rmb();
2558	} while (is_pure_response(r));
2559
2560	if (sleeping)
2561		check_ring_db(adap, qs, sleeping);
2562
2563	smp_mb();		/* commit Tx queue .processed updates */
2564	if (unlikely(qs->txq_stopped != 0))
2565		restart_tx(qs);
2566
2567	return is_new_response(r, q);
2568}
2569
2570/**
2571 *	handle_responses - decide what to do with new responses in NAPI mode
2572 *	@adap: the adapter
2573 *	@q: the response queue
2574 *
2575 *	This is used by the NAPI interrupt handlers to decide what to do with
2576 *	new SGE responses.  If there are no new responses it returns -1.  If
2577 *	there are new responses and they are pure (i.e., non-data carrying)
2578 *	it handles them straight in hard interrupt context as they are very
2579 *	cheap and don't deliver any packets.  Finally, if there are any data
2580 *	signaling responses it schedules the NAPI handler.  Returns 1 if it
2581 *	schedules NAPI, 0 if all new responses were pure.
2582 *
2583 *	The caller must ascertain NAPI is not already running.
2584 */
2585static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
2586{
2587	struct sge_qset *qs = rspq_to_qset(q);
2588	struct rsp_desc *r = &q->desc[q->cidx];
2589
2590	if (!is_new_response(r, q))
2591		return -1;
2592	dma_rmb();
2593	if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
2594		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2595			     V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
2596		return 0;
2597	}
2598	napi_schedule(&qs->napi);
2599	return 1;
2600}
2601
2602/*
2603 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2604 * (i.e., response queue serviced in hard interrupt).
2605 */
2606static irqreturn_t t3_sge_intr_msix(int irq, void *cookie)
2607{
2608	struct sge_qset *qs = cookie;
2609	struct adapter *adap = qs->adap;
2610	struct sge_rspq *q = &qs->rspq;
2611
2612	spin_lock(&q->lock);
2613	if (process_responses(adap, qs, -1) == 0)
2614		q->unhandled_irqs++;
2615	t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2616		     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2617	spin_unlock(&q->lock);
2618	return IRQ_HANDLED;
2619}
2620
2621/*
2622 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2623 * (i.e., response queue serviced by NAPI polling).
2624 */
2625static irqreturn_t t3_sge_intr_msix_napi(int irq, void *cookie)
2626{
2627	struct sge_qset *qs = cookie;
2628	struct sge_rspq *q = &qs->rspq;
2629
2630	spin_lock(&q->lock);
2631
2632	if (handle_responses(qs->adap, q) < 0)
2633		q->unhandled_irqs++;
2634	spin_unlock(&q->lock);
2635	return IRQ_HANDLED;
2636}
2637
2638/*
2639 * The non-NAPI MSI interrupt handler.  This needs to handle data events from
2640 * SGE response queues as well as error and other async events as they all use
2641 * the same MSI vector.  We use one SGE response queue per port in this mode
2642 * and protect all response queues with queue 0's lock.
2643 */
2644static irqreturn_t t3_intr_msi(int irq, void *cookie)
2645{
2646	int new_packets = 0;
2647	struct adapter *adap = cookie;
2648	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2649
2650	spin_lock(&q->lock);
2651
2652	if (process_responses(adap, &adap->sge.qs[0], -1)) {
2653		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
2654			     V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx));
2655		new_packets = 1;
2656	}
2657
2658	if (adap->params.nports == 2 &&
2659	    process_responses(adap, &adap->sge.qs[1], -1)) {
2660		struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2661
2662		t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) |
2663			     V_NEWTIMER(q1->next_holdoff) |
2664			     V_NEWINDEX(q1->cidx));
2665		new_packets = 1;
2666	}
2667
2668	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2669		q->unhandled_irqs++;
2670
2671	spin_unlock(&q->lock);
2672	return IRQ_HANDLED;
2673}
2674
2675static int rspq_check_napi(struct sge_qset *qs)
2676{
2677	struct sge_rspq *q = &qs->rspq;
2678
2679	if (!napi_is_scheduled(&qs->napi) &&
2680	    is_new_response(&q->desc[q->cidx], q)) {
2681		napi_schedule(&qs->napi);
2682		return 1;
2683	}
2684	return 0;
2685}
2686
2687/*
2688 * The MSI interrupt handler for the NAPI case (i.e., response queues serviced
2689 * by NAPI polling).  Handles data events from SGE response queues as well as
2690 * error and other async events as they all use the same MSI vector.  We use
2691 * one SGE response queue per port in this mode and protect all response
2692 * queues with queue 0's lock.
2693 */
2694static irqreturn_t t3_intr_msi_napi(int irq, void *cookie)
2695{
2696	int new_packets;
2697	struct adapter *adap = cookie;
2698	struct sge_rspq *q = &adap->sge.qs[0].rspq;
2699
2700	spin_lock(&q->lock);
2701
2702	new_packets = rspq_check_napi(&adap->sge.qs[0]);
2703	if (adap->params.nports == 2)
2704		new_packets += rspq_check_napi(&adap->sge.qs[1]);
2705	if (!new_packets && t3_slow_intr_handler(adap) == 0)
2706		q->unhandled_irqs++;
2707
2708	spin_unlock(&q->lock);
2709	return IRQ_HANDLED;
2710}
2711
2712/*
2713 * A helper function that processes responses and issues GTS.
2714 */
2715static inline int process_responses_gts(struct adapter *adap,
2716					struct sge_rspq *rq)
2717{
2718	int work;
2719
2720	work = process_responses(adap, rspq_to_qset(rq), -1);
2721	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
2722		     V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
2723	return work;
2724}
2725
2726/*
2727 * The legacy INTx interrupt handler.  This needs to handle data events from
2728 * SGE response queues as well as error and other async events as they all use
2729 * the same interrupt pin.  We use one SGE response queue per port in this mode
2730 * and protect all response queues with queue 0's lock.
2731 */
2732static irqreturn_t t3_intr(int irq, void *cookie)
2733{
2734	int work_done, w0, w1;
2735	struct adapter *adap = cookie;
2736	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2737	struct sge_rspq *q1 = &adap->sge.qs[1].rspq;
2738
2739	spin_lock(&q0->lock);
2740
2741	w0 = is_new_response(&q0->desc[q0->cidx], q0);
2742	w1 = adap->params.nports == 2 &&
2743	    is_new_response(&q1->desc[q1->cidx], q1);
2744
2745	if (likely(w0 | w1)) {
2746		t3_write_reg(adap, A_PL_CLI, 0);
2747		t3_read_reg(adap, A_PL_CLI);	/* flush */
2748
2749		if (likely(w0))
2750			process_responses_gts(adap, q0);
2751
2752		if (w1)
2753			process_responses_gts(adap, q1);
2754
2755		work_done = w0 | w1;
2756	} else
2757		work_done = t3_slow_intr_handler(adap);
2758
2759	spin_unlock(&q0->lock);
2760	return IRQ_RETVAL(work_done != 0);
2761}
2762
2763/*
2764 * Interrupt handler for legacy INTx interrupts for T3B-based cards.
2765 * Handles data events from SGE response queues as well as error and other
2766 * async events as they all use the same interrupt pin.  We use one SGE
2767 * response queue per port in this mode and protect all response queues with
2768 * queue 0's lock.
2769 */
2770static irqreturn_t t3b_intr(int irq, void *cookie)
2771{
2772	u32 map;
2773	struct adapter *adap = cookie;
2774	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
2775
2776	t3_write_reg(adap, A_PL_CLI, 0);
2777	map = t3_read_reg(adap, A_SG_DATA_INTR);
2778
2779	if (unlikely(!map))	/* shared interrupt, most likely */
2780		return IRQ_NONE;
2781
2782	spin_lock(&q0->lock);
2783
2784	if (unlikely(map & F_ERRINTR))
2785		t3_slow_intr_handler(adap);
2786
2787	if (likely(map & 1))
2788		process_responses_gts(adap, q0);
2789
2790	if (map & 2)
2791		process_responses_gts(adap, &adap->sge.qs[1].rspq);
2792
2793	spin_unlock(&q0->lock);
2794	return IRQ_HANDLED;
2795}
2796
2797/*
2798 * NAPI interrupt handler for legacy INTx interrupts for T3B-based cards.
2799 * Handles data events from SGE response queues as well as error and other
2800 * async events as they all use the same interrupt pin.  We use one SGE
2801 * response queue per port in this mode and protect all response queues with
2802 * queue 0's lock.
2803 */
2804static irqreturn_t t3b_intr_napi(int irq, void *cookie)
2805{
2806	u32 map;
2807	struct adapter *adap = cookie;
2808	struct sge_qset *qs0 = &adap->sge.qs[0];
2809	struct sge_rspq *q0 = &qs0->rspq;
2810
2811	t3_write_reg(adap, A_PL_CLI, 0);
2812	map = t3_read_reg(adap, A_SG_DATA_INTR);
2813
2814	if (unlikely(!map))	/* shared interrupt, most likely */
2815		return IRQ_NONE;
2816
2817	spin_lock(&q0->lock);
2818
2819	if (unlikely(map & F_ERRINTR))
2820		t3_slow_intr_handler(adap);
2821
2822	if (likely(map & 1))
2823		napi_schedule(&qs0->napi);
2824
2825	if (map & 2)
2826		napi_schedule(&adap->sge.qs[1].napi);
2827
2828	spin_unlock(&q0->lock);
2829	return IRQ_HANDLED;
2830}
2831
2832/**
2833 *	t3_intr_handler - select the top-level interrupt handler
2834 *	@adap: the adapter
2835 *	@polling: whether using NAPI to service response queues
2836 *
2837 *	Selects the top-level interrupt handler based on the type of interrupts
2838 *	(MSI-X, MSI, or legacy) and whether NAPI will be used to service the
2839 *	response queues.
2840 */
2841irq_handler_t t3_intr_handler(struct adapter *adap, int polling)
2842{
2843	if (adap->flags & USING_MSIX)
2844		return polling ? t3_sge_intr_msix_napi : t3_sge_intr_msix;
2845	if (adap->flags & USING_MSI)
2846		return polling ? t3_intr_msi_napi : t3_intr_msi;
2847	if (adap->params.rev > 0)
2848		return polling ? t3b_intr_napi : t3b_intr;
2849	return t3_intr;
2850}
2851
2852#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
2853		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
2854		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
2855		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
2856		    F_HIRCQPARITYERROR)
2857#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
2858#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
2859		      F_RSPQDISABLED)
2860
2861/**
2862 *	t3_sge_err_intr_handler - SGE async event interrupt handler
2863 *	@adapter: the adapter
2864 *
2865 *	Interrupt handler for SGE asynchronous (non-data) events.
2866 */
2867void t3_sge_err_intr_handler(struct adapter *adapter)
2868{
2869	unsigned int v, status = t3_read_reg(adapter, A_SG_INT_CAUSE) &
2870				 ~F_FLEMPTY;
2871
2872	if (status & SGE_PARERR)
2873		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
2874			 status & SGE_PARERR);
2875	if (status & SGE_FRAMINGERR)
2876		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
2877			 status & SGE_FRAMINGERR);
2878
2879	if (status & F_RSPQCREDITOVERFOW)
2880		CH_ALERT(adapter, "SGE response queue credit overflow\n");
2881
2882	if (status & F_RSPQDISABLED) {
2883		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
2884
2885		CH_ALERT(adapter,
2886			 "packet delivered to disabled response queue "
2887			 "(0x%x)\n", (v >> S_RSPQ0DISABLED) & 0xff);
2888	}
2889
2890	if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
2891		queue_work(cxgb3_wq, &adapter->db_drop_task);
2892
2893	if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
2894		queue_work(cxgb3_wq, &adapter->db_full_task);
2895
2896	if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
2897		queue_work(cxgb3_wq, &adapter->db_empty_task);
2898
2899	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
2900	if (status &  SGE_FATALERR)
2901		t3_fatal_err(adapter);
2902}
2903
2904/**
2905 *	sge_timer_tx - perform periodic maintenance of an SGE qset
2906 *	@t: a timer list containing the SGE queue set to maintain
2907 *
2908 *	Runs periodically from a timer to perform maintenance of an SGE queue
2909 *	set.  It performs two tasks:
2910 *
2911 *	Cleans up any completed Tx descriptors that may still be pending.
2912 *	Normal descriptor cleanup happens when new packets are added to a Tx
2913 *	queue so this timer is relatively infrequent and does any cleanup only
2914 *	if the Tx queue has not seen any new packets in a while.  We make a
2915 *	best effort attempt to reclaim descriptors, in that we don't wait
2916 *	around if we cannot get a queue's lock (which most likely is because
2917 *	someone else is queueing new packets and so will also handle the clean
2918 *	up).  Since control queues use immediate data exclusively we don't
2919 *	bother cleaning them up here.
2920 *
2921 */
2922static void sge_timer_tx(struct timer_list *t)
2923{
2924	struct sge_qset *qs = from_timer(qs, t, tx_reclaim_timer);
2925	struct port_info *pi = netdev_priv(qs->netdev);
2926	struct adapter *adap = pi->adapter;
2927	unsigned int tbd[SGE_TXQ_PER_SET] = {0, 0};
2928	unsigned long next_period;
2929
2930	if (__netif_tx_trylock(qs->tx_q)) {
2931                tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH],
2932                                                     TX_RECLAIM_TIMER_CHUNK);
2933		__netif_tx_unlock(qs->tx_q);
2934	}
2935
2936	if (spin_trylock(&qs->txq[TXQ_OFLD].lock)) {
2937		tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD],
2938						     TX_RECLAIM_TIMER_CHUNK);
2939		spin_unlock(&qs->txq[TXQ_OFLD].lock);
2940	}
2941
2942	next_period = TX_RECLAIM_PERIOD >>
2943                      (max(tbd[TXQ_ETH], tbd[TXQ_OFLD]) /
2944                      TX_RECLAIM_TIMER_CHUNK);
2945	mod_timer(&qs->tx_reclaim_timer, jiffies + next_period);
2946}
2947
2948/**
2949 *	sge_timer_rx - perform periodic maintenance of an SGE qset
2950 *	@t: the timer list containing the SGE queue set to maintain
2951 *
2952 *	a) Replenishes Rx queues that have run out due to memory shortage.
2953 *	Normally new Rx buffers are added when existing ones are consumed but
2954 *	when out of memory a queue can become empty.  We try to add only a few
2955 *	buffers here, the queue will be replenished fully as these new buffers
2956 *	are used up if memory shortage has subsided.
2957 *
2958 *	b) Return coalesced response queue credits in case a response queue is
2959 *	starved.
2960 *
2961 */
2962static void sge_timer_rx(struct timer_list *t)
2963{
2964	spinlock_t *lock;
2965	struct sge_qset *qs = from_timer(qs, t, rx_reclaim_timer);
2966	struct port_info *pi = netdev_priv(qs->netdev);
2967	struct adapter *adap = pi->adapter;
2968	u32 status;
2969
2970	lock = adap->params.rev > 0 ?
2971	       &qs->rspq.lock : &adap->sge.qs[0].rspq.lock;
2972
2973	if (!spin_trylock_irq(lock))
2974		goto out;
2975
2976	if (napi_is_scheduled(&qs->napi))
2977		goto unlock;
2978
2979	if (adap->params.rev < 4) {
2980		status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS);
2981
2982		if (status & (1 << qs->rspq.cntxt_id)) {
2983			qs->rspq.starved++;
2984			if (qs->rspq.credits) {
2985				qs->rspq.credits--;
2986				refill_rspq(adap, &qs->rspq, 1);
2987				qs->rspq.restarted++;
2988				t3_write_reg(adap, A_SG_RSPQ_FL_STATUS,
2989					     1 << qs->rspq.cntxt_id);
2990			}
2991		}
2992	}
2993
2994	if (qs->fl[0].credits < qs->fl[0].size)
2995		__refill_fl(adap, &qs->fl[0]);
2996	if (qs->fl[1].credits < qs->fl[1].size)
2997		__refill_fl(adap, &qs->fl[1]);
2998
2999unlock:
3000	spin_unlock_irq(lock);
3001out:
3002	mod_timer(&qs->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
3003}
3004
3005/**
3006 *	t3_update_qset_coalesce - update coalescing settings for a queue set
3007 *	@qs: the SGE queue set
3008 *	@p: new queue set parameters
3009 *
3010 *	Update the coalescing settings for an SGE queue set.  Nothing is done
3011 *	if the queue set is not initialized yet.
3012 */
3013void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
3014{
3015	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */
3016	qs->rspq.polling = p->polling;
3017	qs->napi.poll = p->polling ? napi_rx_handler : ofld_poll;
3018}
3019
3020/**
3021 *	t3_sge_alloc_qset - initialize an SGE queue set
3022 *	@adapter: the adapter
3023 *	@id: the queue set id
3024 *	@nports: how many Ethernet ports will be using this queue set
3025 *	@irq_vec_idx: the IRQ vector index for response queue interrupts
3026 *	@p: configuration parameters for this queue set
3027 *	@ntxq: number of Tx queues for the queue set
3028 *	@dev: net device associated with this queue set
3029 *	@netdevq: net device TX queue associated with this queue set
3030 *
3031 *	Allocate resources and initialize an SGE queue set.  A queue set
3032 *	comprises a response queue, two Rx free-buffer queues, and up to 3
3033 *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
3034 *	queue, offload queue, and control queue.
3035 */
3036int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
3037		      int irq_vec_idx, const struct qset_params *p,
3038		      int ntxq, struct net_device *dev,
3039		      struct netdev_queue *netdevq)
3040{
3041	int i, avail, ret = -ENOMEM;
3042	struct sge_qset *q = &adapter->sge.qs[id];
3043
3044	init_qset_cntxt(q, id);
3045	timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0);
3046	timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0);
3047
3048	q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size,
3049				   sizeof(struct rx_desc),
3050				   sizeof(struct rx_sw_desc),
3051				   &q->fl[0].phys_addr, &q->fl[0].sdesc);
3052	if (!q->fl[0].desc)
3053		goto err;
3054
3055	q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size,
3056				   sizeof(struct rx_desc),
3057				   sizeof(struct rx_sw_desc),
3058				   &q->fl[1].phys_addr, &q->fl[1].sdesc);
3059	if (!q->fl[1].desc)
3060		goto err;
3061
3062	q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size,
3063				  sizeof(struct rsp_desc), 0,
3064				  &q->rspq.phys_addr, NULL);
3065	if (!q->rspq.desc)
3066		goto err;
3067
3068	for (i = 0; i < ntxq; ++i) {
3069		/*
3070		 * The control queue always uses immediate data so does not
3071		 * need to keep track of any sk_buffs.
3072		 */
3073		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
3074
3075		q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i],
3076					    sizeof(struct tx_desc), sz,
3077					    &q->txq[i].phys_addr,
3078					    &q->txq[i].sdesc);
3079		if (!q->txq[i].desc)
3080			goto err;
3081
3082		q->txq[i].gen = 1;
3083		q->txq[i].size = p->txq_size[i];
3084		spin_lock_init(&q->txq[i].lock);
3085		skb_queue_head_init(&q->txq[i].sendq);
3086	}
3087
3088	tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
3089	tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
3090
3091	q->fl[0].gen = q->fl[1].gen = 1;
3092	q->fl[0].size = p->fl_size;
3093	q->fl[1].size = p->jumbo_size;
3094
3095	q->rspq.gen = 1;
3096	q->rspq.size = p->rspq_size;
3097	spin_lock_init(&q->rspq.lock);
3098	skb_queue_head_init(&q->rspq.rx_queue);
3099
3100	q->txq[TXQ_ETH].stop_thres = nports *
3101	    flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
3102
3103#if FL0_PG_CHUNK_SIZE > 0
3104	q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
3105#else
3106	q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
3107#endif
3108#if FL1_PG_CHUNK_SIZE > 0
3109	q->fl[1].buf_size = FL1_PG_CHUNK_SIZE;
3110#else
3111	q->fl[1].buf_size = is_offload(adapter) ?
3112		(16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
3113		MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
3114#endif
3115
3116	q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
3117	q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
3118	q->fl[0].order = FL0_PG_ORDER;
3119	q->fl[1].order = FL1_PG_ORDER;
3120	q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
3121	q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
3122
3123	spin_lock_irq(&adapter->sge.reg_lock);
3124
3125	/* FL threshold comparison uses < */
3126	ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
3127				   q->rspq.phys_addr, q->rspq.size,
3128				   q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
3129	if (ret)
3130		goto err_unlock;
3131
3132	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
3133		ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
3134					  q->fl[i].phys_addr, q->fl[i].size,
3135					  q->fl[i].buf_size - SGE_PG_RSVD,
3136					  p->cong_thres, 1, 0);
3137		if (ret)
3138			goto err_unlock;
3139	}
3140
3141	ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
3142				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
3143				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
3144				 1, 0);
3145	if (ret)
3146		goto err_unlock;
3147
3148	if (ntxq > 1) {
3149		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id,
3150					 USE_GTS, SGE_CNTXT_OFLD, id,
3151					 q->txq[TXQ_OFLD].phys_addr,
3152					 q->txq[TXQ_OFLD].size, 0, 1, 0);
3153		if (ret)
3154			goto err_unlock;
3155	}
3156
3157	if (ntxq > 2) {
3158		ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0,
3159					 SGE_CNTXT_CTRL, id,
3160					 q->txq[TXQ_CTRL].phys_addr,
3161					 q->txq[TXQ_CTRL].size,
3162					 q->txq[TXQ_CTRL].token, 1, 0);
3163		if (ret)
3164			goto err_unlock;
3165	}
3166
3167	spin_unlock_irq(&adapter->sge.reg_lock);
3168
3169	q->adap = adapter;
3170	q->netdev = dev;
3171	q->tx_q = netdevq;
3172	t3_update_qset_coalesce(q, p);
3173
3174	avail = refill_fl(adapter, &q->fl[0], q->fl[0].size,
3175			  GFP_KERNEL | __GFP_COMP);
3176	if (!avail) {
3177		CH_ALERT(adapter, "free list queue 0 initialization failed\n");
3178		ret = -ENOMEM;
3179		goto err;
3180	}
3181	if (avail < q->fl[0].size)
3182		CH_WARN(adapter, "free list queue 0 enabled with %d credits\n",
3183			avail);
3184
3185	avail = refill_fl(adapter, &q->fl[1], q->fl[1].size,
3186			  GFP_KERNEL | __GFP_COMP);
3187	if (avail < q->fl[1].size)
3188		CH_WARN(adapter, "free list queue 1 enabled with %d credits\n",
3189			avail);
3190	refill_rspq(adapter, &q->rspq, q->rspq.size - 1);
3191
3192	t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
3193		     V_NEWTIMER(q->rspq.holdoff_tmr));
3194
3195	return 0;
3196
3197err_unlock:
3198	spin_unlock_irq(&adapter->sge.reg_lock);
3199err:
3200	t3_free_qset(adapter, q);
3201	return ret;
3202}
3203
3204/**
3205 *      t3_start_sge_timers - start SGE timer call backs
3206 *      @adap: the adapter
3207 *
3208 *      Starts each SGE queue set's timer call back
3209 */
3210void t3_start_sge_timers(struct adapter *adap)
3211{
3212	int i;
3213
3214	for (i = 0; i < SGE_QSETS; ++i) {
3215		struct sge_qset *q = &adap->sge.qs[i];
3216
3217		if (q->tx_reclaim_timer.function)
3218			mod_timer(&q->tx_reclaim_timer,
3219				  jiffies + TX_RECLAIM_PERIOD);
3220
3221		if (q->rx_reclaim_timer.function)
3222			mod_timer(&q->rx_reclaim_timer,
3223				  jiffies + RX_RECLAIM_PERIOD);
3224	}
3225}
3226
3227/**
3228 *	t3_stop_sge_timers - stop SGE timer call backs
3229 *	@adap: the adapter
3230 *
3231 *	Stops each SGE queue set's timer call back
3232 */
3233void t3_stop_sge_timers(struct adapter *adap)
3234{
3235	int i;
3236
3237	for (i = 0; i < SGE_QSETS; ++i) {
3238		struct sge_qset *q = &adap->sge.qs[i];
3239
3240		if (q->tx_reclaim_timer.function)
3241			del_timer_sync(&q->tx_reclaim_timer);
3242		if (q->rx_reclaim_timer.function)
3243			del_timer_sync(&q->rx_reclaim_timer);
3244	}
3245}
3246
3247/**
3248 *	t3_free_sge_resources - free SGE resources
3249 *	@adap: the adapter
3250 *
3251 *	Frees resources used by the SGE queue sets.
3252 */
3253void t3_free_sge_resources(struct adapter *adap)
3254{
3255	int i;
3256
3257	for (i = 0; i < SGE_QSETS; ++i)
3258		t3_free_qset(adap, &adap->sge.qs[i]);
3259}
3260
3261/**
3262 *	t3_sge_start - enable SGE
3263 *	@adap: the adapter
3264 *
3265 *	Enables the SGE for DMAs.  This is the last step in starting packet
3266 *	transfers.
3267 */
3268void t3_sge_start(struct adapter *adap)
3269{
3270	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
3271}
3272
3273/**
3274 *	t3_sge_stop_dma - Disable SGE DMA engine operation
3275 *	@adap: the adapter
3276 *
3277 *	Can be invoked from interrupt context e.g.  error handler.
3278 *
3279 *	Note that this function cannot disable the restart of tasklets as
3280 *	it cannot wait if called from interrupt context, however the
3281 *	tasklets will have no effect since the doorbells are disabled. The
3282 *	driver will call tg3_sge_stop() later from process context, at
3283 *	which time the tasklets will be stopped if they are still running.
3284 */
3285void t3_sge_stop_dma(struct adapter *adap)
3286{
3287	t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
3288}
3289
3290/**
3291 *	t3_sge_stop - disable SGE operation completly
3292 *	@adap: the adapter
3293 *
3294 *	Called from process context. Disables the DMA engine and any
3295 *	pending queue restart tasklets.
3296 */
3297void t3_sge_stop(struct adapter *adap)
3298{
3299	int i;
3300
3301	t3_sge_stop_dma(adap);
3302
3303	for (i = 0; i < SGE_QSETS; ++i) {
3304		struct sge_qset *qs = &adap->sge.qs[i];
3305
3306		tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
3307		tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
3308	}
3309}
3310
3311/**
3312 *	t3_sge_init - initialize SGE
3313 *	@adap: the adapter
3314 *	@p: the SGE parameters
3315 *
3316 *	Performs SGE initialization needed every time after a chip reset.
3317 *	We do not initialize any of the queue sets here, instead the driver
3318 *	top-level must request those individually.  We also do not enable DMA
3319 *	here, that should be done after the queues have been set up.
3320 */
3321void t3_sge_init(struct adapter *adap, struct sge_params *p)
3322{
3323	unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12);
3324
3325	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
3326	    F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
3327	    V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
3328	    V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
3329#if SGE_NUM_GENBITS == 1
3330	ctrl |= F_EGRGENCTRL;
3331#endif
3332	if (adap->params.rev > 0) {
3333		if (!(adap->flags & (USING_MSIX | USING_MSI)))
3334			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
3335	}
3336	t3_write_reg(adap, A_SG_CONTROL, ctrl);
3337	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
3338		     V_LORCQDRBTHRSH(512));
3339	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
3340	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
3341		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
3342	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
3343		     adap->params.rev < T3_REV_C ? 1000 : 500);
3344	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
3345	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
3346	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
3347	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
3348	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
3349}
3350
3351/**
3352 *	t3_sge_prep - one-time SGE initialization
3353 *	@adap: the associated adapter
3354 *	@p: SGE parameters
3355 *
3356 *	Performs one-time initialization of SGE SW state.  Includes determining
3357 *	defaults for the assorted SGE parameters, which admins can change until
3358 *	they are used to initialize the SGE.
3359 */
3360void t3_sge_prep(struct adapter *adap, struct sge_params *p)
3361{
3362	int i;
3363
3364	p->max_pkt_size = (16 * 1024) - sizeof(struct cpl_rx_data) -
3365	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3366
3367	for (i = 0; i < SGE_QSETS; ++i) {
3368		struct qset_params *q = p->qset + i;
3369
3370		q->polling = adap->params.rev > 0;
3371		q->coalesce_usecs = 5;
3372		q->rspq_size = 1024;
3373		q->fl_size = 1024;
3374 		q->jumbo_size = 512;
3375		q->txq_size[TXQ_ETH] = 1024;
3376		q->txq_size[TXQ_OFLD] = 1024;
3377		q->txq_size[TXQ_CTRL] = 256;
3378		q->cong_thres = 0;
3379	}
3380
3381	spin_lock_init(&adap->sge.reg_lock);
3382}
3383