1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
20 * more details.
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
24 *
25 * Contact Information:
26 *  Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
31 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 *  * Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 *  * Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in
45 *    the documentation and/or other materials provided with the
46 *    distribution.
47 *  * Neither the name Intel Corporation nor the names of its
48 *    contributors may be used to endorse or promote products derived
49 *    from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#include <linux/sched.h>
65#include <linux/wait.h>
66#include <linux/gfp.h>
67
68#include "iwl-prph.h"
69#include "iwl-io.h"
70#include "internal.h"
71#include "iwl-op-mode.h"
72#include "iwl-context-info-gen3.h"
73
74/******************************************************************************
75 *
76 * RX path functions
77 *
78 ******************************************************************************/
79
80/*
81 * Rx theory of operation
82 *
83 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
84 * each of which point to Receive Buffers to be filled by the NIC.  These get
85 * used not only for Rx frames, but for any command response or notification
86 * from the NIC.  The driver and NIC manage the Rx buffers by means
87 * of indexes into the circular buffer.
88 *
89 * Rx Queue Indexes
90 * The host/firmware share two index registers for managing the Rx buffers.
91 *
92 * The READ index maps to the first position that the firmware may be writing
93 * to -- the driver can read up to (but not including) this position and get
94 * good data.
95 * The READ index is managed by the firmware once the card is enabled.
96 *
97 * The WRITE index maps to the last position the driver has read from -- the
98 * position preceding WRITE is the last slot the firmware can place a packet.
99 *
100 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
101 * WRITE = READ.
102 *
103 * During initialization, the host sets up the READ queue position to the first
104 * INDEX position, and WRITE to the last (READ - 1 wrapped)
105 *
106 * When the firmware places a packet in a buffer, it will advance the READ index
107 * and fire the RX interrupt.  The driver can then query the READ index and
108 * process as many packets as possible, moving the WRITE index forward as it
109 * resets the Rx queue buffers with new memory.
110 *
111 * The management in the driver is as follows:
112 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
113 *   When the interrupt handler is called, the request is processed.
114 *   The page is either stolen - transferred to the upper layer
115 *   or reused - added immediately to the iwl->rxq->rx_free list.
116 * + When the page is stolen - the driver updates the matching queue's used
117 *   count, detaches the RBD and transfers it to the queue used list.
118 *   When there are two used RBDs - they are transferred to the allocator empty
119 *   list. Work is then scheduled for the allocator to start allocating
120 *   eight buffers.
121 *   When there are another 6 used RBDs - they are transferred to the allocator
122 *   empty list and the driver tries to claim the pre-allocated buffers and
123 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
124 *   until ready.
125 *   When there are 8+ buffers in the free list - either from allocation or from
126 *   8 reused unstolen pages - restock is called to update the FW and indexes.
127 * + In order to make sure the allocator always has RBDs to use for allocation
128 *   the allocator has initial pool in the size of num_queues*(8-2) - the
129 *   maximum missing RBDs per allocation request (request posted with 2
130 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
131 *   The queues supplies the recycle of the rest of the RBDs.
132 * + A received packet is processed and handed to the kernel network stack,
133 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
134 * + If there are no allocated buffers in iwl->rxq->rx_free,
135 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
136 *   If there were enough free buffers and RX_STALLED is set it is cleared.
137 *
138 *
139 * Driver sequence:
140 *
141 * iwl_rxq_alloc()            Allocates rx_free
142 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
143 *                            iwl_pcie_rxq_restock.
144 *                            Used only during initialization.
145 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
146 *                            queue, updates firmware pointers, and updates
147 *                            the WRITE index.
148 * iwl_pcie_rx_allocator()     Background work for allocating pages.
149 *
150 * -- enable interrupts --
151 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
152 *                            READ INDEX, detaching the SKB from the pool.
153 *                            Moves the packet buffer from queue to rx_used.
154 *                            Posts and claims requests to the allocator.
155 *                            Calls iwl_pcie_rxq_restock to refill any empty
156 *                            slots.
157 *
158 * RBD life-cycle:
159 *
160 * Init:
161 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
162 *
163 * Regular Receive interrupt:
164 * Page Stolen:
165 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
166 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
167 * Page not Stolen:
168 * rxq.queue -> rxq.rx_free -> rxq.queue
169 * ...
170 *
171 */
172
173/*
174 * iwl_rxq_space - Return number of free slots available in queue.
175 */
176static int iwl_rxq_space(const struct iwl_rxq *rxq)
177{
178	/* Make sure rx queue size is a power of 2 */
179	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
180
181	/*
182	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
183	 * between empty and completely full queues.
184	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
185	 * defined for negative dividends.
186	 */
187	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
188}
189
190/*
191 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
192 */
193static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
194{
195	return cpu_to_le32((u32)(dma_addr >> 8));
196}
197
198/*
199 * iwl_pcie_rx_stop - stops the Rx DMA
200 */
201int iwl_pcie_rx_stop(struct iwl_trans *trans)
202{
203	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
204		/* TODO: remove this once fw does it */
205		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
206		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
207					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
208	} else if (trans->trans_cfg->mq_rx_supported) {
209		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
210		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
211					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
212	} else {
213		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
214		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
215					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
216					   1000);
217	}
218}
219
220/*
221 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
222 */
223static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
224				    struct iwl_rxq *rxq)
225{
226	u32 reg;
227
228	lockdep_assert_held(&rxq->lock);
229
230	/*
231	 * explicitly wake up the NIC if:
232	 * 1. shadow registers aren't enabled
233	 * 2. there is a chance that the NIC is asleep
234	 */
235	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
236	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
237		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
238
239		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
240			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
241				       reg);
242			iwl_set_bit(trans, CSR_GP_CNTRL,
243				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
244			rxq->need_update = true;
245			return;
246		}
247	}
248
249	rxq->write_actual = round_down(rxq->write, 8);
250	if (trans->trans_cfg->mq_rx_supported)
251		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
252			    rxq->write_actual);
253	else
254		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
255}
256
257static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
258{
259	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
260	int i;
261
262	for (i = 0; i < trans->num_rx_queues; i++) {
263		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
264
265		if (!rxq->need_update)
266			continue;
267		spin_lock(&rxq->lock);
268		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
269		rxq->need_update = false;
270		spin_unlock(&rxq->lock);
271	}
272}
273
274static void iwl_pcie_restock_bd(struct iwl_trans *trans,
275				struct iwl_rxq *rxq,
276				struct iwl_rx_mem_buffer *rxb)
277{
278	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
279		struct iwl_rx_transfer_desc *bd = rxq->bd;
280
281		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
282
283		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
284		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
285	} else {
286		__le64 *bd = rxq->bd;
287
288		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
289	}
290
291	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
292		     (u32)rxb->vid, rxq->id, rxq->write);
293}
294
295/*
296 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
297 */
298static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
299				  struct iwl_rxq *rxq)
300{
301	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
302	struct iwl_rx_mem_buffer *rxb;
303
304	/*
305	 * If the device isn't enabled - no need to try to add buffers...
306	 * This can happen when we stop the device and still have an interrupt
307	 * pending. We stop the APM before we sync the interrupts because we
308	 * have to (see comment there). On the other hand, since the APM is
309	 * stopped, we cannot access the HW (in particular not prph).
310	 * So don't try to restock if the APM has been already stopped.
311	 */
312	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
313		return;
314
315	spin_lock(&rxq->lock);
316	while (rxq->free_count) {
317		/* Get next free Rx buffer, remove from free list */
318		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
319				       list);
320		list_del(&rxb->list);
321		rxb->invalid = false;
322		/* some low bits are expected to be unset (depending on hw) */
323		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
324		/* Point to Rx buffer via next RBD in circular buffer */
325		iwl_pcie_restock_bd(trans, rxq, rxb);
326		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
327		rxq->free_count--;
328	}
329	spin_unlock(&rxq->lock);
330
331	/*
332	 * If we've added more space for the firmware to place data, tell it.
333	 * Increment device's write pointer in multiples of 8.
334	 */
335	if (rxq->write_actual != (rxq->write & ~0x7)) {
336		spin_lock(&rxq->lock);
337		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
338		spin_unlock(&rxq->lock);
339	}
340}
341
342/*
343 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
344 */
345static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
346				  struct iwl_rxq *rxq)
347{
348	struct iwl_rx_mem_buffer *rxb;
349
350	/*
351	 * If the device isn't enabled - not need to try to add buffers...
352	 * This can happen when we stop the device and still have an interrupt
353	 * pending. We stop the APM before we sync the interrupts because we
354	 * have to (see comment there). On the other hand, since the APM is
355	 * stopped, we cannot access the HW (in particular not prph).
356	 * So don't try to restock if the APM has been already stopped.
357	 */
358	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
359		return;
360
361	spin_lock(&rxq->lock);
362	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
363		__le32 *bd = (__le32 *)rxq->bd;
364		/* The overwritten rxb must be a used one */
365		rxb = rxq->queue[rxq->write];
366		BUG_ON(rxb && rxb->page);
367
368		/* Get next free Rx buffer, remove from free list */
369		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
370				       list);
371		list_del(&rxb->list);
372		rxb->invalid = false;
373
374		/* Point to Rx buffer via next RBD in circular buffer */
375		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
376		rxq->queue[rxq->write] = rxb;
377		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
378		rxq->free_count--;
379	}
380	spin_unlock(&rxq->lock);
381
382	/* If we've added more space for the firmware to place data, tell it.
383	 * Increment device's write pointer in multiples of 8. */
384	if (rxq->write_actual != (rxq->write & ~0x7)) {
385		spin_lock(&rxq->lock);
386		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
387		spin_unlock(&rxq->lock);
388	}
389}
390
391/*
392 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
393 *
394 * If there are slots in the RX queue that need to be restocked,
395 * and we have free pre-allocated buffers, fill the ranks as much
396 * as we can, pulling from rx_free.
397 *
398 * This moves the 'write' index forward to catch up with 'processed', and
399 * also updates the memory address in the firmware to reference the new
400 * target buffer.
401 */
402static
403void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
404{
405	if (trans->trans_cfg->mq_rx_supported)
406		iwl_pcie_rxmq_restock(trans, rxq);
407	else
408		iwl_pcie_rxsq_restock(trans, rxq);
409}
410
411/*
412 * iwl_pcie_rx_alloc_page - allocates and returns a page.
413 *
414 */
415static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
416					   u32 *offset, gfp_t priority)
417{
418	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
420	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
421	struct page *page;
422	gfp_t gfp_mask = priority;
423
424	if (trans_pcie->rx_page_order > 0)
425		gfp_mask |= __GFP_COMP;
426
427	if (trans_pcie->alloc_page) {
428		spin_lock_bh(&trans_pcie->alloc_page_lock);
429		/* recheck */
430		if (trans_pcie->alloc_page) {
431			*offset = trans_pcie->alloc_page_used;
432			page = trans_pcie->alloc_page;
433			trans_pcie->alloc_page_used += rbsize;
434			if (trans_pcie->alloc_page_used >= allocsize)
435				trans_pcie->alloc_page = NULL;
436			else
437				get_page(page);
438			spin_unlock_bh(&trans_pcie->alloc_page_lock);
439			return page;
440		}
441		spin_unlock_bh(&trans_pcie->alloc_page_lock);
442	}
443
444	/* Alloc a new receive buffer */
445	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
446	if (!page) {
447		if (net_ratelimit())
448			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
449				       trans_pcie->rx_page_order);
450		/*
451		 * Issue an error if we don't have enough pre-allocated
452		  * buffers.
453		 */
454		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
455			IWL_CRIT(trans,
456				 "Failed to alloc_pages\n");
457		return NULL;
458	}
459
460	if (2 * rbsize <= allocsize) {
461		spin_lock_bh(&trans_pcie->alloc_page_lock);
462		if (!trans_pcie->alloc_page) {
463			get_page(page);
464			trans_pcie->alloc_page = page;
465			trans_pcie->alloc_page_used = rbsize;
466		}
467		spin_unlock_bh(&trans_pcie->alloc_page_lock);
468	}
469
470	*offset = 0;
471	return page;
472}
473
474/*
475 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
476 *
477 * A used RBD is an Rx buffer that has been given to the stack. To use it again
478 * a page must be allocated and the RBD must point to the page. This function
479 * doesn't change the HW pointer but handles the list of pages that is used by
480 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
481 * allocated buffers.
482 */
483void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
484			    struct iwl_rxq *rxq)
485{
486	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
487	struct iwl_rx_mem_buffer *rxb;
488	struct page *page;
489
490	while (1) {
491		unsigned int offset;
492
493		spin_lock(&rxq->lock);
494		if (list_empty(&rxq->rx_used)) {
495			spin_unlock(&rxq->lock);
496			return;
497		}
498		spin_unlock(&rxq->lock);
499
500		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
501		if (!page)
502			return;
503
504		spin_lock(&rxq->lock);
505
506		if (list_empty(&rxq->rx_used)) {
507			spin_unlock(&rxq->lock);
508			__free_pages(page, trans_pcie->rx_page_order);
509			return;
510		}
511		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
512				       list);
513		list_del(&rxb->list);
514		spin_unlock(&rxq->lock);
515
516		BUG_ON(rxb->page);
517		rxb->page = page;
518		rxb->offset = offset;
519		/* Get physical address of the RB */
520		rxb->page_dma =
521			dma_map_page(trans->dev, page, rxb->offset,
522				     trans_pcie->rx_buf_bytes,
523				     DMA_FROM_DEVICE);
524		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
525			rxb->page = NULL;
526			spin_lock(&rxq->lock);
527			list_add(&rxb->list, &rxq->rx_used);
528			spin_unlock(&rxq->lock);
529			__free_pages(page, trans_pcie->rx_page_order);
530			return;
531		}
532
533		spin_lock(&rxq->lock);
534
535		list_add_tail(&rxb->list, &rxq->rx_free);
536		rxq->free_count++;
537
538		spin_unlock(&rxq->lock);
539	}
540}
541
542void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
543{
544	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
545	int i;
546
547	if (!trans_pcie->rx_pool)
548		return;
549
550	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
551		if (!trans_pcie->rx_pool[i].page)
552			continue;
553		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
554			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
555		__free_pages(trans_pcie->rx_pool[i].page,
556			     trans_pcie->rx_page_order);
557		trans_pcie->rx_pool[i].page = NULL;
558	}
559}
560
561/*
562 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
563 *
564 * Allocates for each received request 8 pages
565 * Called as a scheduled work item.
566 */
567static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
568{
569	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
570	struct iwl_rb_allocator *rba = &trans_pcie->rba;
571	struct list_head local_empty;
572	int pending = atomic_read(&rba->req_pending);
573
574	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
575
576	/* If we were scheduled - there is at least one request */
577	spin_lock(&rba->lock);
578	/* swap out the rba->rbd_empty to a local list */
579	list_replace_init(&rba->rbd_empty, &local_empty);
580	spin_unlock(&rba->lock);
581
582	while (pending) {
583		int i;
584		LIST_HEAD(local_allocated);
585		gfp_t gfp_mask = GFP_KERNEL;
586
587		/* Do not post a warning if there are only a few requests */
588		if (pending < RX_PENDING_WATERMARK)
589			gfp_mask |= __GFP_NOWARN;
590
591		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
592			struct iwl_rx_mem_buffer *rxb;
593			struct page *page;
594
595			/* List should never be empty - each reused RBD is
596			 * returned to the list, and initial pool covers any
597			 * possible gap between the time the page is allocated
598			 * to the time the RBD is added.
599			 */
600			BUG_ON(list_empty(&local_empty));
601			/* Get the first rxb from the rbd list */
602			rxb = list_first_entry(&local_empty,
603					       struct iwl_rx_mem_buffer, list);
604			BUG_ON(rxb->page);
605
606			/* Alloc a new receive buffer */
607			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
608						      gfp_mask);
609			if (!page)
610				continue;
611			rxb->page = page;
612
613			/* Get physical address of the RB */
614			rxb->page_dma = dma_map_page(trans->dev, page,
615						     rxb->offset,
616						     trans_pcie->rx_buf_bytes,
617						     DMA_FROM_DEVICE);
618			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
619				rxb->page = NULL;
620				__free_pages(page, trans_pcie->rx_page_order);
621				continue;
622			}
623
624			/* move the allocated entry to the out list */
625			list_move(&rxb->list, &local_allocated);
626			i++;
627		}
628
629		atomic_dec(&rba->req_pending);
630		pending--;
631
632		if (!pending) {
633			pending = atomic_read(&rba->req_pending);
634			if (pending)
635				IWL_DEBUG_TPT(trans,
636					      "Got more pending allocation requests = %d\n",
637					      pending);
638		}
639
640		spin_lock(&rba->lock);
641		/* add the allocated rbds to the allocator allocated list */
642		list_splice_tail(&local_allocated, &rba->rbd_allocated);
643		/* get more empty RBDs for current pending requests */
644		list_splice_tail_init(&rba->rbd_empty, &local_empty);
645		spin_unlock(&rba->lock);
646
647		atomic_inc(&rba->req_ready);
648
649	}
650
651	spin_lock(&rba->lock);
652	/* return unused rbds to the allocator empty list */
653	list_splice_tail(&local_empty, &rba->rbd_empty);
654	spin_unlock(&rba->lock);
655
656	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
657}
658
659/*
660 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
661.*
662.* Called by queue when the queue posted allocation request and
663 * has freed 8 RBDs in order to restock itself.
664 * This function directly moves the allocated RBs to the queue's ownership
665 * and updates the relevant counters.
666 */
667static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
668				      struct iwl_rxq *rxq)
669{
670	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
671	struct iwl_rb_allocator *rba = &trans_pcie->rba;
672	int i;
673
674	lockdep_assert_held(&rxq->lock);
675
676	/*
677	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
678	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
679	 * function will return early, as there are no ready requests.
680	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
681	 * req_ready > 0, i.e. - there are ready requests and the function
682	 * hands one request to the caller.
683	 */
684	if (atomic_dec_if_positive(&rba->req_ready) < 0)
685		return;
686
687	spin_lock(&rba->lock);
688	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
689		/* Get next free Rx buffer, remove it from free list */
690		struct iwl_rx_mem_buffer *rxb =
691			list_first_entry(&rba->rbd_allocated,
692					 struct iwl_rx_mem_buffer, list);
693
694		list_move(&rxb->list, &rxq->rx_free);
695	}
696	spin_unlock(&rba->lock);
697
698	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
699	rxq->free_count += RX_CLAIM_REQ_ALLOC;
700}
701
702void iwl_pcie_rx_allocator_work(struct work_struct *data)
703{
704	struct iwl_rb_allocator *rba_p =
705		container_of(data, struct iwl_rb_allocator, rx_alloc);
706	struct iwl_trans_pcie *trans_pcie =
707		container_of(rba_p, struct iwl_trans_pcie, rba);
708
709	iwl_pcie_rx_allocator(trans_pcie->trans);
710}
711
712static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
713{
714	struct iwl_rx_transfer_desc *rx_td;
715
716	if (use_rx_td)
717		return sizeof(*rx_td);
718	else
719		return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
720			sizeof(__le32);
721}
722
723static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
724				  struct iwl_rxq *rxq)
725{
726	struct device *dev = trans->dev;
727	bool use_rx_td = (trans->trans_cfg->device_family >=
728			  IWL_DEVICE_FAMILY_AX210);
729	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
730
731	if (rxq->bd)
732		dma_free_coherent(trans->dev,
733				  free_size * rxq->queue_size,
734				  rxq->bd, rxq->bd_dma);
735	rxq->bd_dma = 0;
736	rxq->bd = NULL;
737
738	rxq->rb_stts_dma = 0;
739	rxq->rb_stts = NULL;
740
741	if (rxq->used_bd)
742		dma_free_coherent(trans->dev,
743				  (use_rx_td ? sizeof(*rxq->cd) :
744				   sizeof(__le32)) * rxq->queue_size,
745				  rxq->used_bd, rxq->used_bd_dma);
746	rxq->used_bd_dma = 0;
747	rxq->used_bd = NULL;
748
749	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
750		return;
751
752	if (rxq->tr_tail)
753		dma_free_coherent(dev, sizeof(__le16),
754				  rxq->tr_tail, rxq->tr_tail_dma);
755	rxq->tr_tail_dma = 0;
756	rxq->tr_tail = NULL;
757
758	if (rxq->cr_tail)
759		dma_free_coherent(dev, sizeof(__le16),
760				  rxq->cr_tail, rxq->cr_tail_dma);
761	rxq->cr_tail_dma = 0;
762	rxq->cr_tail = NULL;
763}
764
765static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
766				  struct iwl_rxq *rxq)
767{
768	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
769	struct device *dev = trans->dev;
770	int i;
771	int free_size;
772	bool use_rx_td = (trans->trans_cfg->device_family >=
773			  IWL_DEVICE_FAMILY_AX210);
774	size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
775			      sizeof(struct iwl_rb_status);
776
777	spin_lock_init(&rxq->lock);
778	if (trans->trans_cfg->mq_rx_supported)
779		rxq->queue_size = trans->cfg->num_rbds;
780	else
781		rxq->queue_size = RX_QUEUE_SIZE;
782
783	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
784
785	/*
786	 * Allocate the circular buffer of Read Buffer Descriptors
787	 * (RBDs)
788	 */
789	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
790				     &rxq->bd_dma, GFP_KERNEL);
791	if (!rxq->bd)
792		goto err;
793
794	if (trans->trans_cfg->mq_rx_supported) {
795		rxq->used_bd = dma_alloc_coherent(dev,
796						  (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
797						  &rxq->used_bd_dma,
798						  GFP_KERNEL);
799		if (!rxq->used_bd)
800			goto err;
801	}
802
803	rxq->rb_stts = trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
804	rxq->rb_stts_dma =
805		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
806
807	if (!use_rx_td)
808		return 0;
809
810	/* Allocate the driver's pointer to TR tail */
811	rxq->tr_tail = dma_alloc_coherent(dev, sizeof(__le16),
812					  &rxq->tr_tail_dma, GFP_KERNEL);
813	if (!rxq->tr_tail)
814		goto err;
815
816	/* Allocate the driver's pointer to CR tail */
817	rxq->cr_tail = dma_alloc_coherent(dev, sizeof(__le16),
818					  &rxq->cr_tail_dma, GFP_KERNEL);
819	if (!rxq->cr_tail)
820		goto err;
821
822	return 0;
823
824err:
825	for (i = 0; i < trans->num_rx_queues; i++) {
826		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
827
828		iwl_pcie_free_rxq_dma(trans, rxq);
829	}
830
831	return -ENOMEM;
832}
833
834static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
835{
836	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
837	struct iwl_rb_allocator *rba = &trans_pcie->rba;
838	int i, ret;
839	size_t rb_stts_size = trans->trans_cfg->device_family >=
840				IWL_DEVICE_FAMILY_AX210 ?
841			      sizeof(__le16) : sizeof(struct iwl_rb_status);
842
843	if (WARN_ON(trans_pcie->rxq))
844		return -EINVAL;
845
846	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
847				  GFP_KERNEL);
848	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
849				      sizeof(trans_pcie->rx_pool[0]),
850				      GFP_KERNEL);
851	trans_pcie->global_table =
852		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
853			sizeof(trans_pcie->global_table[0]),
854			GFP_KERNEL);
855	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
856	    !trans_pcie->global_table) {
857		ret = -ENOMEM;
858		goto err;
859	}
860
861	spin_lock_init(&rba->lock);
862
863	/*
864	 * Allocate the driver's pointer to receive buffer status.
865	 * Allocate for all queues continuously (HW requirement).
866	 */
867	trans_pcie->base_rb_stts =
868			dma_alloc_coherent(trans->dev,
869					   rb_stts_size * trans->num_rx_queues,
870					   &trans_pcie->base_rb_stts_dma,
871					   GFP_KERNEL);
872	if (!trans_pcie->base_rb_stts) {
873		ret = -ENOMEM;
874		goto err;
875	}
876
877	for (i = 0; i < trans->num_rx_queues; i++) {
878		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
879
880		rxq->id = i;
881		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
882		if (ret)
883			goto err;
884	}
885	return 0;
886
887err:
888	if (trans_pcie->base_rb_stts) {
889		dma_free_coherent(trans->dev,
890				  rb_stts_size * trans->num_rx_queues,
891				  trans_pcie->base_rb_stts,
892				  trans_pcie->base_rb_stts_dma);
893		trans_pcie->base_rb_stts = NULL;
894		trans_pcie->base_rb_stts_dma = 0;
895	}
896	kfree(trans_pcie->rx_pool);
897	kfree(trans_pcie->global_table);
898	kfree(trans_pcie->rxq);
899
900	return ret;
901}
902
903static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
904{
905	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
906	u32 rb_size;
907	unsigned long flags;
908	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
909
910	switch (trans_pcie->rx_buf_size) {
911	case IWL_AMSDU_4K:
912		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
913		break;
914	case IWL_AMSDU_8K:
915		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
916		break;
917	case IWL_AMSDU_12K:
918		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
919		break;
920	default:
921		WARN_ON(1);
922		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
923	}
924
925	if (!iwl_trans_grab_nic_access(trans, &flags))
926		return;
927
928	/* Stop Rx DMA */
929	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
930	/* reset and flush pointers */
931	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
932	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
933	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
934
935	/* Reset driver's Rx queue write index */
936	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
937
938	/* Tell device where to find RBD circular buffer in DRAM */
939	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
940		    (u32)(rxq->bd_dma >> 8));
941
942	/* Tell device where in DRAM to update its Rx status */
943	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
944		    rxq->rb_stts_dma >> 4);
945
946	/* Enable Rx DMA
947	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
948	 *      the credit mechanism in 5000 HW RX FIFO
949	 * Direct rx interrupts to hosts
950	 * Rx buffer size 4 or 8k or 12k
951	 * RB timeout 0x10
952	 * 256 RBDs
953	 */
954	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
955		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
956		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
957		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
958		    rb_size |
959		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
960		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
961
962	iwl_trans_release_nic_access(trans, &flags);
963
964	/* Set interrupt coalescing timer to default (2048 usecs) */
965	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
966
967	/* W/A for interrupt coalescing bug in 7260 and 3160 */
968	if (trans->cfg->host_interrupt_operation_mode)
969		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
970}
971
972static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
973{
974	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
975	u32 rb_size, enabled = 0;
976	unsigned long flags;
977	int i;
978
979	switch (trans_pcie->rx_buf_size) {
980	case IWL_AMSDU_2K:
981		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
982		break;
983	case IWL_AMSDU_4K:
984		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
985		break;
986	case IWL_AMSDU_8K:
987		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
988		break;
989	case IWL_AMSDU_12K:
990		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
991		break;
992	default:
993		WARN_ON(1);
994		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
995	}
996
997	if (!iwl_trans_grab_nic_access(trans, &flags))
998		return;
999
1000	/* Stop Rx DMA */
1001	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
1002	/* disable free amd used rx queue operation */
1003	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
1004
1005	for (i = 0; i < trans->num_rx_queues; i++) {
1006		/* Tell device where to find RBD free table in DRAM */
1007		iwl_write_prph64_no_grab(trans,
1008					 RFH_Q_FRBDCB_BA_LSB(i),
1009					 trans_pcie->rxq[i].bd_dma);
1010		/* Tell device where to find RBD used table in DRAM */
1011		iwl_write_prph64_no_grab(trans,
1012					 RFH_Q_URBDCB_BA_LSB(i),
1013					 trans_pcie->rxq[i].used_bd_dma);
1014		/* Tell device where in DRAM to update its Rx status */
1015		iwl_write_prph64_no_grab(trans,
1016					 RFH_Q_URBD_STTS_WPTR_LSB(i),
1017					 trans_pcie->rxq[i].rb_stts_dma);
1018		/* Reset device indice tables */
1019		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
1020		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
1021		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
1022
1023		enabled |= BIT(i) | BIT(i + 16);
1024	}
1025
1026	/*
1027	 * Enable Rx DMA
1028	 * Rx buffer size 4 or 8k or 12k
1029	 * Min RB size 4 or 8
1030	 * Drop frames that exceed RB size
1031	 * 512 RBDs
1032	 */
1033	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
1034			       RFH_DMA_EN_ENABLE_VAL | rb_size |
1035			       RFH_RXF_DMA_MIN_RB_4_8 |
1036			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
1037			       RFH_RXF_DMA_RBDCB_SIZE_512);
1038
1039	/*
1040	 * Activate DMA snooping.
1041	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
1042	 * Default queue is 0
1043	 */
1044	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
1045			       RFH_GEN_CFG_RFH_DMA_SNOOP |
1046			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
1047			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
1048			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
1049					       trans->trans_cfg->integrated ?
1050					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
1051					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
1052	/* Enable the relevant rx queues */
1053	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
1054
1055	iwl_trans_release_nic_access(trans, &flags);
1056
1057	/* Set interrupt coalescing timer to default (2048 usecs) */
1058	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1059}
1060
1061void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
1062{
1063	lockdep_assert_held(&rxq->lock);
1064
1065	INIT_LIST_HEAD(&rxq->rx_free);
1066	INIT_LIST_HEAD(&rxq->rx_used);
1067	rxq->free_count = 0;
1068	rxq->used_count = 0;
1069}
1070
1071int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1072{
1073	WARN_ON(1);
1074	return 0;
1075}
1076
1077static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1078{
1079	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1080	struct iwl_rxq *def_rxq;
1081	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1082	int i, err, queue_size, allocator_pool_size, num_alloc;
1083
1084	if (!trans_pcie->rxq) {
1085		err = iwl_pcie_rx_alloc(trans);
1086		if (err)
1087			return err;
1088	}
1089	def_rxq = trans_pcie->rxq;
1090
1091	cancel_work_sync(&rba->rx_alloc);
1092
1093	spin_lock(&rba->lock);
1094	atomic_set(&rba->req_pending, 0);
1095	atomic_set(&rba->req_ready, 0);
1096	INIT_LIST_HEAD(&rba->rbd_allocated);
1097	INIT_LIST_HEAD(&rba->rbd_empty);
1098	spin_unlock(&rba->lock);
1099
1100	/* free all first - we overwrite everything here */
1101	iwl_pcie_free_rbs_pool(trans);
1102
1103	for (i = 0; i < RX_QUEUE_SIZE; i++)
1104		def_rxq->queue[i] = NULL;
1105
1106	for (i = 0; i < trans->num_rx_queues; i++) {
1107		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1108
1109		spin_lock(&rxq->lock);
1110		/*
1111		 * Set read write pointer to reflect that we have processed
1112		 * and used all buffers, but have not restocked the Rx queue
1113		 * with fresh buffers
1114		 */
1115		rxq->read = 0;
1116		rxq->write = 0;
1117		rxq->write_actual = 0;
1118		memset(rxq->rb_stts, 0,
1119		       (trans->trans_cfg->device_family >=
1120			IWL_DEVICE_FAMILY_AX210) ?
1121		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1122
1123		iwl_pcie_rx_init_rxb_lists(rxq);
1124
1125		if (!rxq->napi.poll)
1126			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1127				       iwl_pcie_dummy_napi_poll, 64);
1128
1129		spin_unlock(&rxq->lock);
1130	}
1131
1132	/* move the pool to the default queue and allocator ownerships */
1133	queue_size = trans->trans_cfg->mq_rx_supported ?
1134			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1135	allocator_pool_size = trans->num_rx_queues *
1136		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1137	num_alloc = queue_size + allocator_pool_size;
1138
1139	for (i = 0; i < num_alloc; i++) {
1140		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1141
1142		if (i < allocator_pool_size)
1143			list_add(&rxb->list, &rba->rbd_empty);
1144		else
1145			list_add(&rxb->list, &def_rxq->rx_used);
1146		trans_pcie->global_table[i] = rxb;
1147		rxb->vid = (u16)(i + 1);
1148		rxb->invalid = true;
1149	}
1150
1151	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1152
1153	return 0;
1154}
1155
1156int iwl_pcie_rx_init(struct iwl_trans *trans)
1157{
1158	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1159	int ret = _iwl_pcie_rx_init(trans);
1160
1161	if (ret)
1162		return ret;
1163
1164	if (trans->trans_cfg->mq_rx_supported)
1165		iwl_pcie_rx_mq_hw_init(trans);
1166	else
1167		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1168
1169	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1170
1171	spin_lock(&trans_pcie->rxq->lock);
1172	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1173	spin_unlock(&trans_pcie->rxq->lock);
1174
1175	return 0;
1176}
1177
1178int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1179{
1180	/* Set interrupt coalescing timer to default (2048 usecs) */
1181	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1182
1183	/*
1184	 * We don't configure the RFH.
1185	 * Restock will be done at alive, after firmware configured the RFH.
1186	 */
1187	return _iwl_pcie_rx_init(trans);
1188}
1189
1190void iwl_pcie_rx_free(struct iwl_trans *trans)
1191{
1192	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1193	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1194	int i;
1195	size_t rb_stts_size = trans->trans_cfg->device_family >=
1196				IWL_DEVICE_FAMILY_AX210 ?
1197			      sizeof(__le16) : sizeof(struct iwl_rb_status);
1198
1199	/*
1200	 * if rxq is NULL, it means that nothing has been allocated,
1201	 * exit now
1202	 */
1203	if (!trans_pcie->rxq) {
1204		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1205		return;
1206	}
1207
1208	cancel_work_sync(&rba->rx_alloc);
1209
1210	iwl_pcie_free_rbs_pool(trans);
1211
1212	if (trans_pcie->base_rb_stts) {
1213		dma_free_coherent(trans->dev,
1214				  rb_stts_size * trans->num_rx_queues,
1215				  trans_pcie->base_rb_stts,
1216				  trans_pcie->base_rb_stts_dma);
1217		trans_pcie->base_rb_stts = NULL;
1218		trans_pcie->base_rb_stts_dma = 0;
1219	}
1220
1221	for (i = 0; i < trans->num_rx_queues; i++) {
1222		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1223
1224		iwl_pcie_free_rxq_dma(trans, rxq);
1225
1226		if (rxq->napi.poll)
1227			netif_napi_del(&rxq->napi);
1228	}
1229	kfree(trans_pcie->rx_pool);
1230	kfree(trans_pcie->global_table);
1231	kfree(trans_pcie->rxq);
1232
1233	if (trans_pcie->alloc_page)
1234		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1235}
1236
1237static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1238					  struct iwl_rb_allocator *rba)
1239{
1240	spin_lock(&rba->lock);
1241	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1242	spin_unlock(&rba->lock);
1243}
1244
1245/*
1246 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1247 *
1248 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1249 * When there are 2 empty RBDs - a request for allocation is posted
1250 */
1251static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1252				  struct iwl_rx_mem_buffer *rxb,
1253				  struct iwl_rxq *rxq, bool emergency)
1254{
1255	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1256	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1257
1258	/* Move the RBD to the used list, will be moved to allocator in batches
1259	 * before claiming or posting a request*/
1260	list_add_tail(&rxb->list, &rxq->rx_used);
1261
1262	if (unlikely(emergency))
1263		return;
1264
1265	/* Count the allocator owned RBDs */
1266	rxq->used_count++;
1267
1268	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1269	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1270	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1271	 * after but we still need to post another request.
1272	 */
1273	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1274		/* Move the 2 RBDs to the allocator ownership.
1275		 Allocator has another 6 from pool for the request completion*/
1276		iwl_pcie_rx_move_to_allocator(rxq, rba);
1277
1278		atomic_inc(&rba->req_pending);
1279		queue_work(rba->alloc_wq, &rba->rx_alloc);
1280	}
1281}
1282
1283static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1284				struct iwl_rxq *rxq,
1285				struct iwl_rx_mem_buffer *rxb,
1286				bool emergency,
1287				int i)
1288{
1289	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1290	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1291	bool page_stolen = false;
1292	int max_len = trans_pcie->rx_buf_bytes;
1293	u32 offset = 0;
1294
1295	if (WARN_ON(!rxb))
1296		return;
1297
1298	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1299
1300	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1301		struct iwl_rx_packet *pkt;
1302		u16 sequence;
1303		bool reclaim;
1304		int index, cmd_index, len;
1305		struct iwl_rx_cmd_buffer rxcb = {
1306			._offset = rxb->offset + offset,
1307			._rx_page_order = trans_pcie->rx_page_order,
1308			._page = rxb->page,
1309			._page_stolen = false,
1310			.truesize = max_len,
1311		};
1312
1313		pkt = rxb_addr(&rxcb);
1314
1315		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1316			IWL_DEBUG_RX(trans,
1317				     "Q %d: RB end marker at offset %d\n",
1318				     rxq->id, offset);
1319			break;
1320		}
1321
1322		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1323			FH_RSCSR_RXQ_POS != rxq->id,
1324		     "frame on invalid queue - is on %d and indicates %d\n",
1325		     rxq->id,
1326		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1327			FH_RSCSR_RXQ_POS);
1328
1329		IWL_DEBUG_RX(trans,
1330			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1331			     rxq->id, offset,
1332			     iwl_get_cmd_string(trans,
1333						iwl_cmd_id(pkt->hdr.cmd,
1334							   pkt->hdr.group_id,
1335							   0)),
1336			     pkt->hdr.group_id, pkt->hdr.cmd,
1337			     le16_to_cpu(pkt->hdr.sequence));
1338
1339		len = iwl_rx_packet_len(pkt);
1340		len += sizeof(u32); /* account for status word */
1341		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1342		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1343
1344		/* Reclaim a command buffer only if this packet is a response
1345		 *   to a (driver-originated) command.
1346		 * If the packet (e.g. Rx frame) originated from uCode,
1347		 *   there is no command buffer to reclaim.
1348		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1349		 *   but apparently a few don't get set; catch them here. */
1350		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1351		if (reclaim && !pkt->hdr.group_id) {
1352			int i;
1353
1354			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1355				if (trans_pcie->no_reclaim_cmds[i] ==
1356							pkt->hdr.cmd) {
1357					reclaim = false;
1358					break;
1359				}
1360			}
1361		}
1362
1363		sequence = le16_to_cpu(pkt->hdr.sequence);
1364		index = SEQ_TO_INDEX(sequence);
1365		cmd_index = iwl_txq_get_cmd_index(txq, index);
1366
1367		if (rxq->id == trans_pcie->def_rx_queue)
1368			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1369				       &rxcb);
1370		else
1371			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1372					   &rxcb, rxq->id);
1373
1374		if (reclaim) {
1375			kfree_sensitive(txq->entries[cmd_index].free_buf);
1376			txq->entries[cmd_index].free_buf = NULL;
1377		}
1378
1379		/*
1380		 * After here, we should always check rxcb._page_stolen,
1381		 * if it is true then one of the handlers took the page.
1382		 */
1383
1384		if (reclaim) {
1385			/* Invoke any callbacks, transfer the buffer to caller,
1386			 * and fire off the (possibly) blocking
1387			 * iwl_trans_send_cmd()
1388			 * as we reclaim the driver command queue */
1389			if (!rxcb._page_stolen)
1390				iwl_pcie_hcmd_complete(trans, &rxcb);
1391			else
1392				IWL_WARN(trans, "Claim null rxb?\n");
1393		}
1394
1395		page_stolen |= rxcb._page_stolen;
1396		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1397			break;
1398		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1399	}
1400
1401	/* page was stolen from us -- free our reference */
1402	if (page_stolen) {
1403		__free_pages(rxb->page, trans_pcie->rx_page_order);
1404		rxb->page = NULL;
1405	}
1406
1407	/* Reuse the page if possible. For notification packets and
1408	 * SKBs that fail to Rx correctly, add them back into the
1409	 * rx_free list for reuse later. */
1410	if (rxb->page != NULL) {
1411		rxb->page_dma =
1412			dma_map_page(trans->dev, rxb->page, rxb->offset,
1413				     trans_pcie->rx_buf_bytes,
1414				     DMA_FROM_DEVICE);
1415		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1416			/*
1417			 * free the page(s) as well to not break
1418			 * the invariant that the items on the used
1419			 * list have no page(s)
1420			 */
1421			__free_pages(rxb->page, trans_pcie->rx_page_order);
1422			rxb->page = NULL;
1423			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1424		} else {
1425			list_add_tail(&rxb->list, &rxq->rx_free);
1426			rxq->free_count++;
1427		}
1428	} else
1429		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1430}
1431
1432static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1433						  struct iwl_rxq *rxq, int i,
1434						  bool *join)
1435{
1436	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1437	struct iwl_rx_mem_buffer *rxb;
1438	u16 vid;
1439
1440	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1441
1442	if (!trans->trans_cfg->mq_rx_supported) {
1443		rxb = rxq->queue[i];
1444		rxq->queue[i] = NULL;
1445		return rxb;
1446	}
1447
1448	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1449		vid = le16_to_cpu(rxq->cd[i].rbid);
1450		*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1451	} else {
1452		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
1453	}
1454
1455	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1456		goto out_err;
1457
1458	rxb = trans_pcie->global_table[vid - 1];
1459	if (rxb->invalid)
1460		goto out_err;
1461
1462	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1463
1464	rxb->invalid = true;
1465
1466	return rxb;
1467
1468out_err:
1469	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1470	iwl_force_nmi(trans);
1471	return NULL;
1472}
1473
1474/*
1475 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1476 */
1477static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
1478{
1479	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1480	struct napi_struct *napi;
1481	struct iwl_rxq *rxq;
1482	u32 r, i, count = 0;
1483	bool emergency = false;
1484
1485	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1486		return;
1487
1488	rxq = &trans_pcie->rxq[queue];
1489
1490restart:
1491	spin_lock(&rxq->lock);
1492	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1493	 * buffer that the driver may process (last buffer filled by ucode). */
1494	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1495	i = rxq->read;
1496
1497	/* W/A 9000 device step A0 wrap-around bug */
1498	r &= (rxq->queue_size - 1);
1499
1500	/* Rx interrupt, but nothing sent from uCode */
1501	if (i == r)
1502		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1503
1504	while (i != r) {
1505		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1506		struct iwl_rx_mem_buffer *rxb;
1507		/* number of RBDs still waiting for page allocation */
1508		u32 rb_pending_alloc =
1509			atomic_read(&trans_pcie->rba.req_pending) *
1510			RX_CLAIM_REQ_ALLOC;
1511		bool join = false;
1512
1513		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1514			     !emergency)) {
1515			iwl_pcie_rx_move_to_allocator(rxq, rba);
1516			emergency = true;
1517			IWL_DEBUG_TPT(trans,
1518				      "RX path is in emergency. Pending allocations %d\n",
1519				      rb_pending_alloc);
1520		}
1521
1522		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1523
1524		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1525		if (!rxb)
1526			goto out;
1527
1528		if (unlikely(join || rxq->next_rb_is_fragment)) {
1529			rxq->next_rb_is_fragment = join;
1530			/*
1531			 * We can only get a multi-RB in the following cases:
1532			 *  - firmware issue, sending a too big notification
1533			 *  - sniffer mode with a large A-MSDU
1534			 *  - large MTU frames (>2k)
1535			 * since the multi-RB functionality is limited to newer
1536			 * hardware that cannot put multiple entries into a
1537			 * single RB.
1538			 *
1539			 * Right now, the higher layers aren't set up to deal
1540			 * with that, so discard all of these.
1541			 */
1542			list_add_tail(&rxb->list, &rxq->rx_free);
1543			rxq->free_count++;
1544		} else {
1545			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1546		}
1547
1548		i = (i + 1) & (rxq->queue_size - 1);
1549
1550		/*
1551		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1552		 * try to claim the pre-allocated buffers from the allocator.
1553		 * If not ready - will try to reclaim next time.
1554		 * There is no need to reschedule work - allocator exits only
1555		 * on success
1556		 */
1557		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1558			iwl_pcie_rx_allocator_get(trans, rxq);
1559
1560		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1561			/* Add the remaining empty RBDs for allocator use */
1562			iwl_pcie_rx_move_to_allocator(rxq, rba);
1563		} else if (emergency) {
1564			count++;
1565			if (count == 8) {
1566				count = 0;
1567				if (rb_pending_alloc < rxq->queue_size / 3) {
1568					IWL_DEBUG_TPT(trans,
1569						      "RX path exited emergency. Pending allocations %d\n",
1570						      rb_pending_alloc);
1571					emergency = false;
1572				}
1573
1574				rxq->read = i;
1575				spin_unlock(&rxq->lock);
1576				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1577				iwl_pcie_rxq_restock(trans, rxq);
1578				goto restart;
1579			}
1580		}
1581	}
1582out:
1583	/* Backtrack one entry */
1584	rxq->read = i;
1585	/* update cr tail with the rxq read pointer */
1586	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1587		*rxq->cr_tail = cpu_to_le16(r);
1588	spin_unlock(&rxq->lock);
1589
1590	/*
1591	 * handle a case where in emergency there are some unallocated RBDs.
1592	 * those RBDs are in the used list, but are not tracked by the queue's
1593	 * used_count which counts allocator owned RBDs.
1594	 * unallocated emergency RBDs must be allocated on exit, otherwise
1595	 * when called again the function may not be in emergency mode and
1596	 * they will be handed to the allocator with no tracking in the RBD
1597	 * allocator counters, which will lead to them never being claimed back
1598	 * by the queue.
1599	 * by allocating them here, they are now in the queue free list, and
1600	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1601	 */
1602	if (unlikely(emergency && count))
1603		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1604
1605	napi = &rxq->napi;
1606	if (napi->poll) {
1607		napi_gro_flush(napi, false);
1608
1609		if (napi->rx_count) {
1610			netif_receive_skb_list(&napi->rx_list);
1611			INIT_LIST_HEAD(&napi->rx_list);
1612			napi->rx_count = 0;
1613		}
1614	}
1615
1616	iwl_pcie_rxq_restock(trans, rxq);
1617}
1618
1619static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1620{
1621	u8 queue = entry->entry;
1622	struct msix_entry *entries = entry - queue;
1623
1624	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1625}
1626
1627/*
1628 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1629 * This interrupt handler should be used with RSS queue only.
1630 */
1631irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1632{
1633	struct msix_entry *entry = dev_id;
1634	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1635	struct iwl_trans *trans = trans_pcie->trans;
1636
1637	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1638
1639	if (WARN_ON(entry->entry >= trans->num_rx_queues))
1640		return IRQ_NONE;
1641
1642	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1643
1644	local_bh_disable();
1645	iwl_pcie_rx_handle(trans, entry->entry);
1646	local_bh_enable();
1647
1648	iwl_pcie_clear_irq(trans, entry);
1649
1650	lock_map_release(&trans->sync_cmd_lockdep_map);
1651
1652	return IRQ_HANDLED;
1653}
1654
1655/*
1656 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1657 */
1658static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1659{
1660	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1661	int i;
1662
1663	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1664	if (trans->cfg->internal_wimax_coex &&
1665	    !trans->cfg->apmg_not_supported &&
1666	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1667			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1668	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1669			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1670		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1671		iwl_op_mode_wimax_active(trans->op_mode);
1672		wake_up(&trans_pcie->wait_command_queue);
1673		return;
1674	}
1675
1676	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1677		if (!trans->txqs.txq[i])
1678			continue;
1679		del_timer(&trans->txqs.txq[i]->stuck_timer);
1680	}
1681
1682	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1683	 * before we wake up the command caller, to ensure a proper cleanup. */
1684	iwl_trans_fw_error(trans);
1685
1686	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1687	wake_up(&trans_pcie->wait_command_queue);
1688}
1689
1690static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1691{
1692	u32 inta;
1693
1694	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1695
1696	trace_iwlwifi_dev_irq(trans->dev);
1697
1698	/* Discover which interrupts are active/pending */
1699	inta = iwl_read32(trans, CSR_INT);
1700
1701	/* the thread will service interrupts and re-enable them */
1702	return inta;
1703}
1704
1705/* a device (PCI-E) page is 4096 bytes long */
1706#define ICT_SHIFT	12
1707#define ICT_SIZE	(1 << ICT_SHIFT)
1708#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1709
1710/* interrupt handler using ict table, with this interrupt driver will
1711 * stop using INTA register to get device's interrupt, reading this register
1712 * is expensive, device will write interrupts in ICT dram table, increment
1713 * index then will fire interrupt to driver, driver will OR all ICT table
1714 * entries from current index up to table entry with 0 value. the result is
1715 * the interrupt we need to service, driver will set the entries back to 0 and
1716 * set index.
1717 */
1718static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1719{
1720	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1721	u32 inta;
1722	u32 val = 0;
1723	u32 read;
1724
1725	trace_iwlwifi_dev_irq(trans->dev);
1726
1727	/* Ignore interrupt if there's nothing in NIC to service.
1728	 * This may be due to IRQ shared with another device,
1729	 * or due to sporadic interrupts thrown from our NIC. */
1730	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1731	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1732	if (!read)
1733		return 0;
1734
1735	/*
1736	 * Collect all entries up to the first 0, starting from ict_index;
1737	 * note we already read at ict_index.
1738	 */
1739	do {
1740		val |= read;
1741		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1742				trans_pcie->ict_index, read);
1743		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1744		trans_pcie->ict_index =
1745			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1746
1747		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1748		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1749					   read);
1750	} while (read);
1751
1752	/* We should not get this value, just ignore it. */
1753	if (val == 0xffffffff)
1754		val = 0;
1755
1756	/*
1757	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1758	 * (bit 15 before shifting it to 31) to clear when using interrupt
1759	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1760	 * so we use them to decide on the real state of the Rx bit.
1761	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1762	 */
1763	if (val & 0xC0000)
1764		val |= 0x8000;
1765
1766	inta = (0xff & val) | ((0xff00 & val) << 16);
1767	return inta;
1768}
1769
1770void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1771{
1772	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1773	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1774	bool hw_rfkill, prev, report;
1775
1776	mutex_lock(&trans_pcie->mutex);
1777	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1778	hw_rfkill = iwl_is_rfkill_set(trans);
1779	if (hw_rfkill) {
1780		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1781		set_bit(STATUS_RFKILL_HW, &trans->status);
1782	}
1783	if (trans_pcie->opmode_down)
1784		report = hw_rfkill;
1785	else
1786		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1787
1788	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1789		 hw_rfkill ? "disable radio" : "enable radio");
1790
1791	isr_stats->rfkill++;
1792
1793	if (prev != report)
1794		iwl_trans_pcie_rf_kill(trans, report);
1795	mutex_unlock(&trans_pcie->mutex);
1796
1797	if (hw_rfkill) {
1798		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1799				       &trans->status))
1800			IWL_DEBUG_RF_KILL(trans,
1801					  "Rfkill while SYNC HCMD in flight\n");
1802		wake_up(&trans_pcie->wait_command_queue);
1803	} else {
1804		clear_bit(STATUS_RFKILL_HW, &trans->status);
1805		if (trans_pcie->opmode_down)
1806			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1807	}
1808}
1809
1810irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1811{
1812	struct iwl_trans *trans = dev_id;
1813	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1814	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1815	u32 inta = 0;
1816	u32 handled = 0;
1817
1818	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1819
1820	spin_lock(&trans_pcie->irq_lock);
1821
1822	/* dram interrupt table not set yet,
1823	 * use legacy interrupt.
1824	 */
1825	if (likely(trans_pcie->use_ict))
1826		inta = iwl_pcie_int_cause_ict(trans);
1827	else
1828		inta = iwl_pcie_int_cause_non_ict(trans);
1829
1830	if (iwl_have_debug_level(IWL_DL_ISR)) {
1831		IWL_DEBUG_ISR(trans,
1832			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1833			      inta, trans_pcie->inta_mask,
1834			      iwl_read32(trans, CSR_INT_MASK),
1835			      iwl_read32(trans, CSR_FH_INT_STATUS));
1836		if (inta & (~trans_pcie->inta_mask))
1837			IWL_DEBUG_ISR(trans,
1838				      "We got a masked interrupt (0x%08x)\n",
1839				      inta & (~trans_pcie->inta_mask));
1840	}
1841
1842	inta &= trans_pcie->inta_mask;
1843
1844	/*
1845	 * Ignore interrupt if there's nothing in NIC to service.
1846	 * This may be due to IRQ shared with another device,
1847	 * or due to sporadic interrupts thrown from our NIC.
1848	 */
1849	if (unlikely(!inta)) {
1850		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1851		/*
1852		 * Re-enable interrupts here since we don't
1853		 * have anything to service
1854		 */
1855		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1856			_iwl_enable_interrupts(trans);
1857		spin_unlock(&trans_pcie->irq_lock);
1858		lock_map_release(&trans->sync_cmd_lockdep_map);
1859		return IRQ_NONE;
1860	}
1861
1862	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1863		/*
1864		 * Hardware disappeared. It might have
1865		 * already raised an interrupt.
1866		 */
1867		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1868		spin_unlock(&trans_pcie->irq_lock);
1869		goto out;
1870	}
1871
1872	/* Ack/clear/reset pending uCode interrupts.
1873	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1874	 */
1875	/* There is a hardware bug in the interrupt mask function that some
1876	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1877	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1878	 * ICT interrupt handling mechanism has another bug that might cause
1879	 * these unmasked interrupts fail to be detected. We workaround the
1880	 * hardware bugs here by ACKing all the possible interrupts so that
1881	 * interrupt coalescing can still be achieved.
1882	 */
1883	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1884
1885	if (iwl_have_debug_level(IWL_DL_ISR))
1886		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1887			      inta, iwl_read32(trans, CSR_INT_MASK));
1888
1889	spin_unlock(&trans_pcie->irq_lock);
1890
1891	/* Now service all interrupt bits discovered above. */
1892	if (inta & CSR_INT_BIT_HW_ERR) {
1893		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1894
1895		/* Tell the device to stop sending interrupts */
1896		iwl_disable_interrupts(trans);
1897
1898		isr_stats->hw++;
1899		iwl_pcie_irq_handle_error(trans);
1900
1901		handled |= CSR_INT_BIT_HW_ERR;
1902
1903		goto out;
1904	}
1905
1906	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1907	if (inta & CSR_INT_BIT_SCD) {
1908		IWL_DEBUG_ISR(trans,
1909			      "Scheduler finished to transmit the frame/frames.\n");
1910		isr_stats->sch++;
1911	}
1912
1913	/* Alive notification via Rx interrupt will do the real work */
1914	if (inta & CSR_INT_BIT_ALIVE) {
1915		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1916		isr_stats->alive++;
1917		if (trans->trans_cfg->gen2) {
1918			/*
1919			 * We can restock, since firmware configured
1920			 * the RFH
1921			 */
1922			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1923		}
1924
1925		handled |= CSR_INT_BIT_ALIVE;
1926	}
1927
1928	/* Safely ignore these bits for debug checks below */
1929	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1930
1931	/* HW RF KILL switch toggled */
1932	if (inta & CSR_INT_BIT_RF_KILL) {
1933		iwl_pcie_handle_rfkill_irq(trans);
1934		handled |= CSR_INT_BIT_RF_KILL;
1935	}
1936
1937	/* Chip got too hot and stopped itself */
1938	if (inta & CSR_INT_BIT_CT_KILL) {
1939		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1940		isr_stats->ctkill++;
1941		handled |= CSR_INT_BIT_CT_KILL;
1942	}
1943
1944	/* Error detected by uCode */
1945	if (inta & CSR_INT_BIT_SW_ERR) {
1946		IWL_ERR(trans, "Microcode SW error detected. "
1947			" Restarting 0x%X.\n", inta);
1948		isr_stats->sw++;
1949		iwl_pcie_irq_handle_error(trans);
1950		handled |= CSR_INT_BIT_SW_ERR;
1951	}
1952
1953	/* uCode wakes up after power-down sleep */
1954	if (inta & CSR_INT_BIT_WAKEUP) {
1955		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1956		iwl_pcie_rxq_check_wrptr(trans);
1957		iwl_pcie_txq_check_wrptrs(trans);
1958
1959		isr_stats->wakeup++;
1960
1961		handled |= CSR_INT_BIT_WAKEUP;
1962	}
1963
1964	/* All uCode command responses, including Tx command responses,
1965	 * Rx "responses" (frame-received notification), and other
1966	 * notifications from uCode come through here*/
1967	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1968		    CSR_INT_BIT_RX_PERIODIC)) {
1969		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1970		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1971			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1972			iwl_write32(trans, CSR_FH_INT_STATUS,
1973					CSR_FH_INT_RX_MASK);
1974		}
1975		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1976			handled |= CSR_INT_BIT_RX_PERIODIC;
1977			iwl_write32(trans,
1978				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1979		}
1980		/* Sending RX interrupt require many steps to be done in the
1981		 * the device:
1982		 * 1- write interrupt to current index in ICT table.
1983		 * 2- dma RX frame.
1984		 * 3- update RX shared data to indicate last write index.
1985		 * 4- send interrupt.
1986		 * This could lead to RX race, driver could receive RX interrupt
1987		 * but the shared data changes does not reflect this;
1988		 * periodic interrupt will detect any dangling Rx activity.
1989		 */
1990
1991		/* Disable periodic interrupt; we use it as just a one-shot. */
1992		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1993			    CSR_INT_PERIODIC_DIS);
1994
1995		/*
1996		 * Enable periodic interrupt in 8 msec only if we received
1997		 * real RX interrupt (instead of just periodic int), to catch
1998		 * any dangling Rx interrupt.  If it was just the periodic
1999		 * interrupt, there was no dangling Rx activity, and no need
2000		 * to extend the periodic interrupt; one-shot is enough.
2001		 */
2002		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2003			iwl_write8(trans, CSR_INT_PERIODIC_REG,
2004				   CSR_INT_PERIODIC_ENA);
2005
2006		isr_stats->rx++;
2007
2008		local_bh_disable();
2009		iwl_pcie_rx_handle(trans, 0);
2010		local_bh_enable();
2011	}
2012
2013	/* This "Tx" DMA channel is used only for loading uCode */
2014	if (inta & CSR_INT_BIT_FH_TX) {
2015		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2016		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2017		isr_stats->tx++;
2018		handled |= CSR_INT_BIT_FH_TX;
2019		/* Wake up uCode load routine, now that load is complete */
2020		trans_pcie->ucode_write_complete = true;
2021		wake_up(&trans_pcie->ucode_write_waitq);
2022	}
2023
2024	if (inta & ~handled) {
2025		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2026		isr_stats->unhandled++;
2027	}
2028
2029	if (inta & ~(trans_pcie->inta_mask)) {
2030		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2031			 inta & ~trans_pcie->inta_mask);
2032	}
2033
2034	spin_lock(&trans_pcie->irq_lock);
2035	/* only Re-enable all interrupt if disabled by irq */
2036	if (test_bit(STATUS_INT_ENABLED, &trans->status))
2037		_iwl_enable_interrupts(trans);
2038	/* we are loading the firmware, enable FH_TX interrupt only */
2039	else if (handled & CSR_INT_BIT_FH_TX)
2040		iwl_enable_fw_load_int(trans);
2041	/* Re-enable RF_KILL if it occurred */
2042	else if (handled & CSR_INT_BIT_RF_KILL)
2043		iwl_enable_rfkill_int(trans);
2044	/* Re-enable the ALIVE / Rx interrupt if it occurred */
2045	else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2046		iwl_enable_fw_load_int_ctx_info(trans);
2047	spin_unlock(&trans_pcie->irq_lock);
2048
2049out:
2050	lock_map_release(&trans->sync_cmd_lockdep_map);
2051	return IRQ_HANDLED;
2052}
2053
2054/******************************************************************************
2055 *
2056 * ICT functions
2057 *
2058 ******************************************************************************/
2059
2060/* Free dram table */
2061void iwl_pcie_free_ict(struct iwl_trans *trans)
2062{
2063	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2064
2065	if (trans_pcie->ict_tbl) {
2066		dma_free_coherent(trans->dev, ICT_SIZE,
2067				  trans_pcie->ict_tbl,
2068				  trans_pcie->ict_tbl_dma);
2069		trans_pcie->ict_tbl = NULL;
2070		trans_pcie->ict_tbl_dma = 0;
2071	}
2072}
2073
2074/*
2075 * allocate dram shared table, it is an aligned memory
2076 * block of ICT_SIZE.
2077 * also reset all data related to ICT table interrupt.
2078 */
2079int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2080{
2081	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2082
2083	trans_pcie->ict_tbl =
2084		dma_alloc_coherent(trans->dev, ICT_SIZE,
2085				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2086	if (!trans_pcie->ict_tbl)
2087		return -ENOMEM;
2088
2089	/* just an API sanity check ... it is guaranteed to be aligned */
2090	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2091		iwl_pcie_free_ict(trans);
2092		return -EINVAL;
2093	}
2094
2095	return 0;
2096}
2097
2098/* Device is going up inform it about using ICT interrupt table,
2099 * also we need to tell the driver to start using ICT interrupt.
2100 */
2101void iwl_pcie_reset_ict(struct iwl_trans *trans)
2102{
2103	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2104	u32 val;
2105
2106	if (!trans_pcie->ict_tbl)
2107		return;
2108
2109	spin_lock(&trans_pcie->irq_lock);
2110	_iwl_disable_interrupts(trans);
2111
2112	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2113
2114	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2115
2116	val |= CSR_DRAM_INT_TBL_ENABLE |
2117	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2118	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2119
2120	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2121
2122	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2123	trans_pcie->use_ict = true;
2124	trans_pcie->ict_index = 0;
2125	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2126	_iwl_enable_interrupts(trans);
2127	spin_unlock(&trans_pcie->irq_lock);
2128}
2129
2130/* Device is going down disable ict interrupt usage */
2131void iwl_pcie_disable_ict(struct iwl_trans *trans)
2132{
2133	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2134
2135	spin_lock(&trans_pcie->irq_lock);
2136	trans_pcie->use_ict = false;
2137	spin_unlock(&trans_pcie->irq_lock);
2138}
2139
2140irqreturn_t iwl_pcie_isr(int irq, void *data)
2141{
2142	struct iwl_trans *trans = data;
2143
2144	if (!trans)
2145		return IRQ_NONE;
2146
2147	/* Disable (but don't clear!) interrupts here to avoid
2148	 * back-to-back ISRs and sporadic interrupts from our NIC.
2149	 * If we have something to service, the tasklet will re-enable ints.
2150	 * If we *don't* have something, we'll re-enable before leaving here.
2151	 */
2152	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2153
2154	return IRQ_WAKE_THREAD;
2155}
2156
2157irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2158{
2159	return IRQ_WAKE_THREAD;
2160}
2161
2162irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2163{
2164	struct msix_entry *entry = dev_id;
2165	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2166	struct iwl_trans *trans = trans_pcie->trans;
2167	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2168	u32 inta_fh, inta_hw;
2169
2170	lock_map_acquire(&trans->sync_cmd_lockdep_map);
2171
2172	spin_lock(&trans_pcie->irq_lock);
2173	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2174	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2175	/*
2176	 * Clear causes registers to avoid being handling the same cause.
2177	 */
2178	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
2179	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2180	spin_unlock(&trans_pcie->irq_lock);
2181
2182	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2183
2184	if (unlikely(!(inta_fh | inta_hw))) {
2185		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2186		lock_map_release(&trans->sync_cmd_lockdep_map);
2187		return IRQ_NONE;
2188	}
2189
2190	if (iwl_have_debug_level(IWL_DL_ISR)) {
2191		IWL_DEBUG_ISR(trans,
2192			      "ISR inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2193			      inta_fh, trans_pcie->fh_mask,
2194			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2195		if (inta_fh & ~trans_pcie->fh_mask)
2196			IWL_DEBUG_ISR(trans,
2197				      "We got a masked interrupt (0x%08x)\n",
2198				      inta_fh & ~trans_pcie->fh_mask);
2199	}
2200
2201	inta_fh &= trans_pcie->fh_mask;
2202
2203	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2204	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2205		local_bh_disable();
2206		iwl_pcie_rx_handle(trans, 0);
2207		local_bh_enable();
2208	}
2209
2210	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2211	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2212		local_bh_disable();
2213		iwl_pcie_rx_handle(trans, 1);
2214		local_bh_enable();
2215	}
2216
2217	/* This "Tx" DMA channel is used only for loading uCode */
2218	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2219		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2220		isr_stats->tx++;
2221		/*
2222		 * Wake up uCode load routine,
2223		 * now that load is complete
2224		 */
2225		trans_pcie->ucode_write_complete = true;
2226		wake_up(&trans_pcie->ucode_write_waitq);
2227	}
2228
2229	/* Error detected by uCode */
2230	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
2231	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
2232		IWL_ERR(trans,
2233			"Microcode SW error detected. Restarting 0x%X.\n",
2234			inta_fh);
2235		isr_stats->sw++;
2236		iwl_pcie_irq_handle_error(trans);
2237	}
2238
2239	/* After checking FH register check HW register */
2240	if (iwl_have_debug_level(IWL_DL_ISR)) {
2241		IWL_DEBUG_ISR(trans,
2242			      "ISR inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2243			      inta_hw, trans_pcie->hw_mask,
2244			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2245		if (inta_hw & ~trans_pcie->hw_mask)
2246			IWL_DEBUG_ISR(trans,
2247				      "We got a masked interrupt 0x%08x\n",
2248				      inta_hw & ~trans_pcie->hw_mask);
2249	}
2250
2251	inta_hw &= trans_pcie->hw_mask;
2252
2253	/* Alive notification via Rx interrupt will do the real work */
2254	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2255		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2256		isr_stats->alive++;
2257		if (trans->trans_cfg->gen2) {
2258			/* We can restock, since firmware configured the RFH */
2259			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2260		}
2261	}
2262
2263	/*
2264	 * In some rare cases when the HW is in a bad state, we may
2265	 * get this interrupt too early, when prph_info is still NULL.
2266	 * So make sure that it's not NULL to prevent crashing.
2267	 */
2268	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2269		u32 sleep_notif =
2270			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2271		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2272		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2273			IWL_DEBUG_ISR(trans,
2274				      "Sx interrupt: sleep notification = 0x%x\n",
2275				      sleep_notif);
2276			trans_pcie->sx_complete = true;
2277			wake_up(&trans_pcie->sx_waitq);
2278		} else {
2279			/* uCode wakes up after power-down sleep */
2280			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2281			iwl_pcie_rxq_check_wrptr(trans);
2282			iwl_pcie_txq_check_wrptrs(trans);
2283
2284			isr_stats->wakeup++;
2285		}
2286	}
2287
2288	if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) {
2289		/* Reflect IML transfer status */
2290		int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
2291
2292		IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
2293		if (res == IWL_IMAGE_RESP_FAIL) {
2294			isr_stats->sw++;
2295			iwl_pcie_irq_handle_error(trans);
2296		}
2297	}
2298
2299	/* Chip got too hot and stopped itself */
2300	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2301		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2302		isr_stats->ctkill++;
2303	}
2304
2305	/* HW RF KILL switch toggled */
2306	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2307		iwl_pcie_handle_rfkill_irq(trans);
2308
2309	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2310		IWL_ERR(trans,
2311			"Hardware error detected. Restarting.\n");
2312
2313		isr_stats->hw++;
2314		trans->dbg.hw_error = true;
2315		iwl_pcie_irq_handle_error(trans);
2316	}
2317
2318	iwl_pcie_clear_irq(trans, entry);
2319
2320	lock_map_release(&trans->sync_cmd_lockdep_map);
2321
2322	return IRQ_HANDLED;
2323}
2324