1// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2/*
3 * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7#include <linux/sched.h>
8#include <linux/wait.h>
9#include <linux/gfp.h>
10
11#include "iwl-prph.h"
12#include "iwl-io.h"
13#include "internal.h"
14#include "iwl-op-mode.h"
15#include "iwl-context-info-gen3.h"
16
17/******************************************************************************
18 *
19 * RX path functions
20 *
21 ******************************************************************************/
22
23/*
24 * Rx theory of operation
25 *
26 * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27 * each of which point to Receive Buffers to be filled by the NIC.  These get
28 * used not only for Rx frames, but for any command response or notification
29 * from the NIC.  The driver and NIC manage the Rx buffers by means
30 * of indexes into the circular buffer.
31 *
32 * Rx Queue Indexes
33 * The host/firmware share two index registers for managing the Rx buffers.
34 *
35 * The READ index maps to the first position that the firmware may be writing
36 * to -- the driver can read up to (but not including) this position and get
37 * good data.
38 * The READ index is managed by the firmware once the card is enabled.
39 *
40 * The WRITE index maps to the last position the driver has read from -- the
41 * position preceding WRITE is the last slot the firmware can place a packet.
42 *
43 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
44 * WRITE = READ.
45 *
46 * During initialization, the host sets up the READ queue position to the first
47 * INDEX position, and WRITE to the last (READ - 1 wrapped)
48 *
49 * When the firmware places a packet in a buffer, it will advance the READ index
50 * and fire the RX interrupt.  The driver can then query the READ index and
51 * process as many packets as possible, moving the WRITE index forward as it
52 * resets the Rx queue buffers with new memory.
53 *
54 * The management in the driver is as follows:
55 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56 *   When the interrupt handler is called, the request is processed.
57 *   The page is either stolen - transferred to the upper layer
58 *   or reused - added immediately to the iwl->rxq->rx_free list.
59 * + When the page is stolen - the driver updates the matching queue's used
60 *   count, detaches the RBD and transfers it to the queue used list.
61 *   When there are two used RBDs - they are transferred to the allocator empty
62 *   list. Work is then scheduled for the allocator to start allocating
63 *   eight buffers.
64 *   When there are another 6 used RBDs - they are transferred to the allocator
65 *   empty list and the driver tries to claim the pre-allocated buffers and
66 *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
67 *   until ready.
68 *   When there are 8+ buffers in the free list - either from allocation or from
69 *   8 reused unstolen pages - restock is called to update the FW and indexes.
70 * + In order to make sure the allocator always has RBDs to use for allocation
71 *   the allocator has initial pool in the size of num_queues*(8-2) - the
72 *   maximum missing RBDs per allocation request (request posted with 2
73 *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74 *   The queues supplies the recycle of the rest of the RBDs.
75 * + A received packet is processed and handed to the kernel network stack,
76 *   detached from the iwl->rxq.  The driver 'processed' index is updated.
77 * + If there are no allocated buffers in iwl->rxq->rx_free,
78 *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79 *   If there were enough free buffers and RX_STALLED is set it is cleared.
80 *
81 *
82 * Driver sequence:
83 *
84 * iwl_rxq_alloc()            Allocates rx_free
85 * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
86 *                            iwl_pcie_rxq_restock.
87 *                            Used only during initialization.
88 * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
89 *                            queue, updates firmware pointers, and updates
90 *                            the WRITE index.
91 * iwl_pcie_rx_allocator()     Background work for allocating pages.
92 *
93 * -- enable interrupts --
94 * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
95 *                            READ INDEX, detaching the SKB from the pool.
96 *                            Moves the packet buffer from queue to rx_used.
97 *                            Posts and claims requests to the allocator.
98 *                            Calls iwl_pcie_rxq_restock to refill any empty
99 *                            slots.
100 *
101 * RBD life-cycle:
102 *
103 * Init:
104 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
105 *
106 * Regular Receive interrupt:
107 * Page Stolen:
108 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
110 * Page not Stolen:
111 * rxq.queue -> rxq.rx_free -> rxq.queue
112 * ...
113 *
114 */
115
116/*
117 * iwl_rxq_space - Return number of free slots available in queue.
118 */
119static int iwl_rxq_space(const struct iwl_rxq *rxq)
120{
121	/* Make sure rx queue size is a power of 2 */
122	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
123
124	/*
125	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126	 * between empty and completely full queues.
127	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128	 * defined for negative dividends.
129	 */
130	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
131}
132
133/*
134 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
135 */
136static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
137{
138	return cpu_to_le32((u32)(dma_addr >> 8));
139}
140
141/*
142 * iwl_pcie_rx_stop - stops the Rx DMA
143 */
144int iwl_pcie_rx_stop(struct iwl_trans *trans)
145{
146	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
147		/* TODO: remove this once fw does it */
148		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151	} else if (trans->trans_cfg->mq_rx_supported) {
152		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
155	} else {
156		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
159					   1000);
160	}
161}
162
163/*
164 * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
165 */
166static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
167				    struct iwl_rxq *rxq)
168{
169	u32 reg;
170
171	lockdep_assert_held(&rxq->lock);
172
173	/*
174	 * explicitly wake up the NIC if:
175	 * 1. shadow registers aren't enabled
176	 * 2. there is a chance that the NIC is asleep
177	 */
178	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
181
182		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
184				       reg);
185			iwl_set_bit(trans, CSR_GP_CNTRL,
186				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187			rxq->need_update = true;
188			return;
189		}
190	}
191
192	rxq->write_actual = round_down(rxq->write, 8);
193	if (!trans->trans_cfg->mq_rx_supported)
194		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
195	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
196		iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
197			    HBUS_TARG_WRPTR_RX_Q(rxq->id));
198	else
199		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
200			    rxq->write_actual);
201}
202
203static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
204{
205	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206	int i;
207
208	for (i = 0; i < trans->num_rx_queues; i++) {
209		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
210
211		if (!rxq->need_update)
212			continue;
213		spin_lock_bh(&rxq->lock);
214		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
215		rxq->need_update = false;
216		spin_unlock_bh(&rxq->lock);
217	}
218}
219
220static void iwl_pcie_restock_bd(struct iwl_trans *trans,
221				struct iwl_rxq *rxq,
222				struct iwl_rx_mem_buffer *rxb)
223{
224	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
225		struct iwl_rx_transfer_desc *bd = rxq->bd;
226
227		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
228
229		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
230		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
231	} else {
232		__le64 *bd = rxq->bd;
233
234		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
235	}
236
237	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
238		     (u32)rxb->vid, rxq->id, rxq->write);
239}
240
241/*
242 * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
243 */
244static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
245				  struct iwl_rxq *rxq)
246{
247	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
248	struct iwl_rx_mem_buffer *rxb;
249
250	/*
251	 * If the device isn't enabled - no need to try to add buffers...
252	 * This can happen when we stop the device and still have an interrupt
253	 * pending. We stop the APM before we sync the interrupts because we
254	 * have to (see comment there). On the other hand, since the APM is
255	 * stopped, we cannot access the HW (in particular not prph).
256	 * So don't try to restock if the APM has been already stopped.
257	 */
258	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
259		return;
260
261	spin_lock_bh(&rxq->lock);
262	while (rxq->free_count) {
263		/* Get next free Rx buffer, remove from free list */
264		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
265				       list);
266		list_del(&rxb->list);
267		rxb->invalid = false;
268		/* some low bits are expected to be unset (depending on hw) */
269		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
270		/* Point to Rx buffer via next RBD in circular buffer */
271		iwl_pcie_restock_bd(trans, rxq, rxb);
272		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
273		rxq->free_count--;
274	}
275	spin_unlock_bh(&rxq->lock);
276
277	/*
278	 * If we've added more space for the firmware to place data, tell it.
279	 * Increment device's write pointer in multiples of 8.
280	 */
281	if (rxq->write_actual != (rxq->write & ~0x7)) {
282		spin_lock_bh(&rxq->lock);
283		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
284		spin_unlock_bh(&rxq->lock);
285	}
286}
287
288/*
289 * iwl_pcie_rxsq_restock - restock implementation for single queue rx
290 */
291static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
292				  struct iwl_rxq *rxq)
293{
294	struct iwl_rx_mem_buffer *rxb;
295
296	/*
297	 * If the device isn't enabled - not need to try to add buffers...
298	 * This can happen when we stop the device and still have an interrupt
299	 * pending. We stop the APM before we sync the interrupts because we
300	 * have to (see comment there). On the other hand, since the APM is
301	 * stopped, we cannot access the HW (in particular not prph).
302	 * So don't try to restock if the APM has been already stopped.
303	 */
304	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
305		return;
306
307	spin_lock_bh(&rxq->lock);
308	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
309		__le32 *bd = (__le32 *)rxq->bd;
310		/* The overwritten rxb must be a used one */
311		rxb = rxq->queue[rxq->write];
312		BUG_ON(rxb && rxb->page);
313
314		/* Get next free Rx buffer, remove from free list */
315		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
316				       list);
317		list_del(&rxb->list);
318		rxb->invalid = false;
319
320		/* Point to Rx buffer via next RBD in circular buffer */
321		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
322		rxq->queue[rxq->write] = rxb;
323		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
324		rxq->free_count--;
325	}
326	spin_unlock_bh(&rxq->lock);
327
328	/* If we've added more space for the firmware to place data, tell it.
329	 * Increment device's write pointer in multiples of 8. */
330	if (rxq->write_actual != (rxq->write & ~0x7)) {
331		spin_lock_bh(&rxq->lock);
332		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
333		spin_unlock_bh(&rxq->lock);
334	}
335}
336
337/*
338 * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
339 *
340 * If there are slots in the RX queue that need to be restocked,
341 * and we have free pre-allocated buffers, fill the ranks as much
342 * as we can, pulling from rx_free.
343 *
344 * This moves the 'write' index forward to catch up with 'processed', and
345 * also updates the memory address in the firmware to reference the new
346 * target buffer.
347 */
348static
349void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
350{
351	if (trans->trans_cfg->mq_rx_supported)
352		iwl_pcie_rxmq_restock(trans, rxq);
353	else
354		iwl_pcie_rxsq_restock(trans, rxq);
355}
356
357/*
358 * iwl_pcie_rx_alloc_page - allocates and returns a page.
359 *
360 */
361static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
362					   u32 *offset, gfp_t priority)
363{
364	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
365	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
366	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
367	struct page *page;
368	gfp_t gfp_mask = priority;
369
370	if (trans_pcie->rx_page_order > 0)
371		gfp_mask |= __GFP_COMP;
372
373	if (trans_pcie->alloc_page) {
374		spin_lock_bh(&trans_pcie->alloc_page_lock);
375		/* recheck */
376		if (trans_pcie->alloc_page) {
377			*offset = trans_pcie->alloc_page_used;
378			page = trans_pcie->alloc_page;
379			trans_pcie->alloc_page_used += rbsize;
380			if (trans_pcie->alloc_page_used >= allocsize)
381				trans_pcie->alloc_page = NULL;
382			else
383				get_page(page);
384			spin_unlock_bh(&trans_pcie->alloc_page_lock);
385			return page;
386		}
387		spin_unlock_bh(&trans_pcie->alloc_page_lock);
388	}
389
390	/* Alloc a new receive buffer */
391	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
392	if (!page) {
393		if (net_ratelimit())
394			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
395				       trans_pcie->rx_page_order);
396		/*
397		 * Issue an error if we don't have enough pre-allocated
398		  * buffers.
399		 */
400		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
401			IWL_CRIT(trans,
402				 "Failed to alloc_pages\n");
403		return NULL;
404	}
405
406	if (2 * rbsize <= allocsize) {
407		spin_lock_bh(&trans_pcie->alloc_page_lock);
408		if (!trans_pcie->alloc_page) {
409			get_page(page);
410			trans_pcie->alloc_page = page;
411			trans_pcie->alloc_page_used = rbsize;
412		}
413		spin_unlock_bh(&trans_pcie->alloc_page_lock);
414	}
415
416	*offset = 0;
417	return page;
418}
419
420/*
421 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
422 *
423 * A used RBD is an Rx buffer that has been given to the stack. To use it again
424 * a page must be allocated and the RBD must point to the page. This function
425 * doesn't change the HW pointer but handles the list of pages that is used by
426 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
427 * allocated buffers.
428 */
429void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
430			    struct iwl_rxq *rxq)
431{
432	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
433	struct iwl_rx_mem_buffer *rxb;
434	struct page *page;
435
436	while (1) {
437		unsigned int offset;
438
439		spin_lock_bh(&rxq->lock);
440		if (list_empty(&rxq->rx_used)) {
441			spin_unlock_bh(&rxq->lock);
442			return;
443		}
444		spin_unlock_bh(&rxq->lock);
445
446		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
447		if (!page)
448			return;
449
450		spin_lock_bh(&rxq->lock);
451
452		if (list_empty(&rxq->rx_used)) {
453			spin_unlock_bh(&rxq->lock);
454			__free_pages(page, trans_pcie->rx_page_order);
455			return;
456		}
457		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
458				       list);
459		list_del(&rxb->list);
460		spin_unlock_bh(&rxq->lock);
461
462		BUG_ON(rxb->page);
463		rxb->page = page;
464		rxb->offset = offset;
465		/* Get physical address of the RB */
466		rxb->page_dma =
467			dma_map_page(trans->dev, page, rxb->offset,
468				     trans_pcie->rx_buf_bytes,
469				     DMA_FROM_DEVICE);
470		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
471			rxb->page = NULL;
472			spin_lock_bh(&rxq->lock);
473			list_add(&rxb->list, &rxq->rx_used);
474			spin_unlock_bh(&rxq->lock);
475			__free_pages(page, trans_pcie->rx_page_order);
476			return;
477		}
478
479		spin_lock_bh(&rxq->lock);
480
481		list_add_tail(&rxb->list, &rxq->rx_free);
482		rxq->free_count++;
483
484		spin_unlock_bh(&rxq->lock);
485	}
486}
487
488void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
489{
490	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
491	int i;
492
493	if (!trans_pcie->rx_pool)
494		return;
495
496	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
497		if (!trans_pcie->rx_pool[i].page)
498			continue;
499		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
500			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
501		__free_pages(trans_pcie->rx_pool[i].page,
502			     trans_pcie->rx_page_order);
503		trans_pcie->rx_pool[i].page = NULL;
504	}
505}
506
507/*
508 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
509 *
510 * Allocates for each received request 8 pages
511 * Called as a scheduled work item.
512 */
513static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
514{
515	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516	struct iwl_rb_allocator *rba = &trans_pcie->rba;
517	struct list_head local_empty;
518	int pending = atomic_read(&rba->req_pending);
519
520	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
521
522	/* If we were scheduled - there is at least one request */
523	spin_lock_bh(&rba->lock);
524	/* swap out the rba->rbd_empty to a local list */
525	list_replace_init(&rba->rbd_empty, &local_empty);
526	spin_unlock_bh(&rba->lock);
527
528	while (pending) {
529		int i;
530		LIST_HEAD(local_allocated);
531		gfp_t gfp_mask = GFP_KERNEL;
532
533		/* Do not post a warning if there are only a few requests */
534		if (pending < RX_PENDING_WATERMARK)
535			gfp_mask |= __GFP_NOWARN;
536
537		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
538			struct iwl_rx_mem_buffer *rxb;
539			struct page *page;
540
541			/* List should never be empty - each reused RBD is
542			 * returned to the list, and initial pool covers any
543			 * possible gap between the time the page is allocated
544			 * to the time the RBD is added.
545			 */
546			BUG_ON(list_empty(&local_empty));
547			/* Get the first rxb from the rbd list */
548			rxb = list_first_entry(&local_empty,
549					       struct iwl_rx_mem_buffer, list);
550			BUG_ON(rxb->page);
551
552			/* Alloc a new receive buffer */
553			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
554						      gfp_mask);
555			if (!page)
556				continue;
557			rxb->page = page;
558
559			/* Get physical address of the RB */
560			rxb->page_dma = dma_map_page(trans->dev, page,
561						     rxb->offset,
562						     trans_pcie->rx_buf_bytes,
563						     DMA_FROM_DEVICE);
564			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
565				rxb->page = NULL;
566				__free_pages(page, trans_pcie->rx_page_order);
567				continue;
568			}
569
570			/* move the allocated entry to the out list */
571			list_move(&rxb->list, &local_allocated);
572			i++;
573		}
574
575		atomic_dec(&rba->req_pending);
576		pending--;
577
578		if (!pending) {
579			pending = atomic_read(&rba->req_pending);
580			if (pending)
581				IWL_DEBUG_TPT(trans,
582					      "Got more pending allocation requests = %d\n",
583					      pending);
584		}
585
586		spin_lock_bh(&rba->lock);
587		/* add the allocated rbds to the allocator allocated list */
588		list_splice_tail(&local_allocated, &rba->rbd_allocated);
589		/* get more empty RBDs for current pending requests */
590		list_splice_tail_init(&rba->rbd_empty, &local_empty);
591		spin_unlock_bh(&rba->lock);
592
593		atomic_inc(&rba->req_ready);
594
595	}
596
597	spin_lock_bh(&rba->lock);
598	/* return unused rbds to the allocator empty list */
599	list_splice_tail(&local_empty, &rba->rbd_empty);
600	spin_unlock_bh(&rba->lock);
601
602	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
603}
604
605/*
606 * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
607.*
608.* Called by queue when the queue posted allocation request and
609 * has freed 8 RBDs in order to restock itself.
610 * This function directly moves the allocated RBs to the queue's ownership
611 * and updates the relevant counters.
612 */
613static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
614				      struct iwl_rxq *rxq)
615{
616	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
617	struct iwl_rb_allocator *rba = &trans_pcie->rba;
618	int i;
619
620	lockdep_assert_held(&rxq->lock);
621
622	/*
623	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
624	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
625	 * function will return early, as there are no ready requests.
626	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
627	 * req_ready > 0, i.e. - there are ready requests and the function
628	 * hands one request to the caller.
629	 */
630	if (atomic_dec_if_positive(&rba->req_ready) < 0)
631		return;
632
633	spin_lock(&rba->lock);
634	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
635		/* Get next free Rx buffer, remove it from free list */
636		struct iwl_rx_mem_buffer *rxb =
637			list_first_entry(&rba->rbd_allocated,
638					 struct iwl_rx_mem_buffer, list);
639
640		list_move(&rxb->list, &rxq->rx_free);
641	}
642	spin_unlock(&rba->lock);
643
644	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
645	rxq->free_count += RX_CLAIM_REQ_ALLOC;
646}
647
648void iwl_pcie_rx_allocator_work(struct work_struct *data)
649{
650	struct iwl_rb_allocator *rba_p =
651		container_of(data, struct iwl_rb_allocator, rx_alloc);
652	struct iwl_trans_pcie *trans_pcie =
653		container_of(rba_p, struct iwl_trans_pcie, rba);
654
655	iwl_pcie_rx_allocator(trans_pcie->trans);
656}
657
658static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
659{
660	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
661		return sizeof(struct iwl_rx_transfer_desc);
662
663	return trans->trans_cfg->mq_rx_supported ?
664			sizeof(__le64) : sizeof(__le32);
665}
666
667static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
668{
669	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
670		return sizeof(struct iwl_rx_completion_desc_bz);
671
672	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
673		return sizeof(struct iwl_rx_completion_desc);
674
675	return sizeof(__le32);
676}
677
678static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
679				  struct iwl_rxq *rxq)
680{
681	int free_size = iwl_pcie_free_bd_size(trans);
682
683	if (rxq->bd)
684		dma_free_coherent(trans->dev,
685				  free_size * rxq->queue_size,
686				  rxq->bd, rxq->bd_dma);
687	rxq->bd_dma = 0;
688	rxq->bd = NULL;
689
690	rxq->rb_stts_dma = 0;
691	rxq->rb_stts = NULL;
692
693	if (rxq->used_bd)
694		dma_free_coherent(trans->dev,
695				  iwl_pcie_used_bd_size(trans) *
696					rxq->queue_size,
697				  rxq->used_bd, rxq->used_bd_dma);
698	rxq->used_bd_dma = 0;
699	rxq->used_bd = NULL;
700}
701
702static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
703{
704	bool use_rx_td = (trans->trans_cfg->device_family >=
705			  IWL_DEVICE_FAMILY_AX210);
706
707	if (use_rx_td)
708		return sizeof(__le16);
709
710	return sizeof(struct iwl_rb_status);
711}
712
713static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
714				  struct iwl_rxq *rxq)
715{
716	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
717	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
718	struct device *dev = trans->dev;
719	int i;
720	int free_size;
721
722	spin_lock_init(&rxq->lock);
723	if (trans->trans_cfg->mq_rx_supported)
724		rxq->queue_size = trans->cfg->num_rbds;
725	else
726		rxq->queue_size = RX_QUEUE_SIZE;
727
728	free_size = iwl_pcie_free_bd_size(trans);
729
730	/*
731	 * Allocate the circular buffer of Read Buffer Descriptors
732	 * (RBDs)
733	 */
734	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
735				     &rxq->bd_dma, GFP_KERNEL);
736	if (!rxq->bd)
737		goto err;
738
739	if (trans->trans_cfg->mq_rx_supported) {
740		rxq->used_bd = dma_alloc_coherent(dev,
741						  iwl_pcie_used_bd_size(trans) *
742							rxq->queue_size,
743						  &rxq->used_bd_dma,
744						  GFP_KERNEL);
745		if (!rxq->used_bd)
746			goto err;
747	}
748
749	rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
750	rxq->rb_stts_dma =
751		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
752
753	return 0;
754
755err:
756	for (i = 0; i < trans->num_rx_queues; i++) {
757		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
758
759		iwl_pcie_free_rxq_dma(trans, rxq);
760	}
761
762	return -ENOMEM;
763}
764
765static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
766{
767	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
768	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
769	struct iwl_rb_allocator *rba = &trans_pcie->rba;
770	int i, ret;
771
772	if (WARN_ON(trans_pcie->rxq))
773		return -EINVAL;
774
775	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
776				  GFP_KERNEL);
777	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
778				      sizeof(trans_pcie->rx_pool[0]),
779				      GFP_KERNEL);
780	trans_pcie->global_table =
781		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
782			sizeof(trans_pcie->global_table[0]),
783			GFP_KERNEL);
784	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
785	    !trans_pcie->global_table) {
786		ret = -ENOMEM;
787		goto err;
788	}
789
790	spin_lock_init(&rba->lock);
791
792	/*
793	 * Allocate the driver's pointer to receive buffer status.
794	 * Allocate for all queues continuously (HW requirement).
795	 */
796	trans_pcie->base_rb_stts =
797			dma_alloc_coherent(trans->dev,
798					   rb_stts_size * trans->num_rx_queues,
799					   &trans_pcie->base_rb_stts_dma,
800					   GFP_KERNEL);
801	if (!trans_pcie->base_rb_stts) {
802		ret = -ENOMEM;
803		goto err;
804	}
805
806	for (i = 0; i < trans->num_rx_queues; i++) {
807		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
808
809		rxq->id = i;
810		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
811		if (ret)
812			goto err;
813	}
814	return 0;
815
816err:
817	if (trans_pcie->base_rb_stts) {
818		dma_free_coherent(trans->dev,
819				  rb_stts_size * trans->num_rx_queues,
820				  trans_pcie->base_rb_stts,
821				  trans_pcie->base_rb_stts_dma);
822		trans_pcie->base_rb_stts = NULL;
823		trans_pcie->base_rb_stts_dma = 0;
824	}
825	kfree(trans_pcie->rx_pool);
826	trans_pcie->rx_pool = NULL;
827	kfree(trans_pcie->global_table);
828	trans_pcie->global_table = NULL;
829	kfree(trans_pcie->rxq);
830	trans_pcie->rxq = NULL;
831
832	return ret;
833}
834
835static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
836{
837	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
838	u32 rb_size;
839	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
840
841	switch (trans_pcie->rx_buf_size) {
842	case IWL_AMSDU_4K:
843		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
844		break;
845	case IWL_AMSDU_8K:
846		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
847		break;
848	case IWL_AMSDU_12K:
849		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
850		break;
851	default:
852		WARN_ON(1);
853		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
854	}
855
856	if (!iwl_trans_grab_nic_access(trans))
857		return;
858
859	/* Stop Rx DMA */
860	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
861	/* reset and flush pointers */
862	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
863	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
864	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
865
866	/* Reset driver's Rx queue write index */
867	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
868
869	/* Tell device where to find RBD circular buffer in DRAM */
870	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
871		    (u32)(rxq->bd_dma >> 8));
872
873	/* Tell device where in DRAM to update its Rx status */
874	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
875		    rxq->rb_stts_dma >> 4);
876
877	/* Enable Rx DMA
878	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
879	 *      the credit mechanism in 5000 HW RX FIFO
880	 * Direct rx interrupts to hosts
881	 * Rx buffer size 4 or 8k or 12k
882	 * RB timeout 0x10
883	 * 256 RBDs
884	 */
885	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
886		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
887		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
888		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
889		    rb_size |
890		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
891		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
892
893	iwl_trans_release_nic_access(trans);
894
895	/* Set interrupt coalescing timer to default (2048 usecs) */
896	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
897
898	/* W/A for interrupt coalescing bug in 7260 and 3160 */
899	if (trans->cfg->host_interrupt_operation_mode)
900		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
901}
902
903static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
904{
905	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
906	u32 rb_size, enabled = 0;
907	int i;
908
909	switch (trans_pcie->rx_buf_size) {
910	case IWL_AMSDU_2K:
911		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
912		break;
913	case IWL_AMSDU_4K:
914		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
915		break;
916	case IWL_AMSDU_8K:
917		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
918		break;
919	case IWL_AMSDU_12K:
920		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
921		break;
922	default:
923		WARN_ON(1);
924		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
925	}
926
927	if (!iwl_trans_grab_nic_access(trans))
928		return;
929
930	/* Stop Rx DMA */
931	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
932	/* disable free amd used rx queue operation */
933	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
934
935	for (i = 0; i < trans->num_rx_queues; i++) {
936		/* Tell device where to find RBD free table in DRAM */
937		iwl_write_prph64_no_grab(trans,
938					 RFH_Q_FRBDCB_BA_LSB(i),
939					 trans_pcie->rxq[i].bd_dma);
940		/* Tell device where to find RBD used table in DRAM */
941		iwl_write_prph64_no_grab(trans,
942					 RFH_Q_URBDCB_BA_LSB(i),
943					 trans_pcie->rxq[i].used_bd_dma);
944		/* Tell device where in DRAM to update its Rx status */
945		iwl_write_prph64_no_grab(trans,
946					 RFH_Q_URBD_STTS_WPTR_LSB(i),
947					 trans_pcie->rxq[i].rb_stts_dma);
948		/* Reset device indice tables */
949		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
950		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
951		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
952
953		enabled |= BIT(i) | BIT(i + 16);
954	}
955
956	/*
957	 * Enable Rx DMA
958	 * Rx buffer size 4 or 8k or 12k
959	 * Min RB size 4 or 8
960	 * Drop frames that exceed RB size
961	 * 512 RBDs
962	 */
963	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
964			       RFH_DMA_EN_ENABLE_VAL | rb_size |
965			       RFH_RXF_DMA_MIN_RB_4_8 |
966			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
967			       RFH_RXF_DMA_RBDCB_SIZE_512);
968
969	/*
970	 * Activate DMA snooping.
971	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
972	 * Default queue is 0
973	 */
974	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
975			       RFH_GEN_CFG_RFH_DMA_SNOOP |
976			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
977			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
978			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
979					       trans->trans_cfg->integrated ?
980					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
981					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
982	/* Enable the relevant rx queues */
983	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
984
985	iwl_trans_release_nic_access(trans);
986
987	/* Set interrupt coalescing timer to default (2048 usecs) */
988	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
989}
990
991void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
992{
993	lockdep_assert_held(&rxq->lock);
994
995	INIT_LIST_HEAD(&rxq->rx_free);
996	INIT_LIST_HEAD(&rxq->rx_used);
997	rxq->free_count = 0;
998	rxq->used_count = 0;
999}
1000
1001static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1002
1003static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
1004{
1005	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1006	struct iwl_trans_pcie *trans_pcie;
1007	struct iwl_trans *trans;
1008	int ret;
1009
1010	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1011	trans = trans_pcie->trans;
1012
1013	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1014
1015	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1016		      rxq->id, ret, budget);
1017
1018	if (ret < budget) {
1019		spin_lock(&trans_pcie->irq_lock);
1020		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1021			_iwl_enable_interrupts(trans);
1022		spin_unlock(&trans_pcie->irq_lock);
1023
1024		napi_complete_done(&rxq->napi, ret);
1025	}
1026
1027	return ret;
1028}
1029
1030static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1031{
1032	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1033	struct iwl_trans_pcie *trans_pcie;
1034	struct iwl_trans *trans;
1035	int ret;
1036
1037	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1038	trans = trans_pcie->trans;
1039
1040	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1041	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1042		      budget);
1043
1044	if (ret < budget) {
1045		int irq_line = rxq->id;
1046
1047		/* FIRST_RSS is shared with line 0 */
1048		if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1049		    rxq->id == 1)
1050			irq_line = 0;
1051
1052		spin_lock(&trans_pcie->irq_lock);
1053		iwl_pcie_clear_irq(trans, irq_line);
1054		spin_unlock(&trans_pcie->irq_lock);
1055
1056		napi_complete_done(&rxq->napi, ret);
1057	}
1058
1059	return ret;
1060}
1061
1062void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
1063{
1064	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1065	int i;
1066
1067	if (unlikely(!trans_pcie->rxq))
1068		return;
1069
1070	for (i = 0; i < trans->num_rx_queues; i++) {
1071		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1072
1073		if (rxq && rxq->napi.poll)
1074			napi_synchronize(&rxq->napi);
1075	}
1076}
1077
1078static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1079{
1080	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1081	struct iwl_rxq *def_rxq;
1082	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1083	int i, err, queue_size, allocator_pool_size, num_alloc;
1084
1085	if (!trans_pcie->rxq) {
1086		err = iwl_pcie_rx_alloc(trans);
1087		if (err)
1088			return err;
1089	}
1090	def_rxq = trans_pcie->rxq;
1091
1092	cancel_work_sync(&rba->rx_alloc);
1093
1094	spin_lock_bh(&rba->lock);
1095	atomic_set(&rba->req_pending, 0);
1096	atomic_set(&rba->req_ready, 0);
1097	INIT_LIST_HEAD(&rba->rbd_allocated);
1098	INIT_LIST_HEAD(&rba->rbd_empty);
1099	spin_unlock_bh(&rba->lock);
1100
1101	/* free all first - we overwrite everything here */
1102	iwl_pcie_free_rbs_pool(trans);
1103
1104	for (i = 0; i < RX_QUEUE_SIZE; i++)
1105		def_rxq->queue[i] = NULL;
1106
1107	for (i = 0; i < trans->num_rx_queues; i++) {
1108		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1109
1110		spin_lock_bh(&rxq->lock);
1111		/*
1112		 * Set read write pointer to reflect that we have processed
1113		 * and used all buffers, but have not restocked the Rx queue
1114		 * with fresh buffers
1115		 */
1116		rxq->read = 0;
1117		rxq->write = 0;
1118		rxq->write_actual = 0;
1119		memset(rxq->rb_stts, 0,
1120		       (trans->trans_cfg->device_family >=
1121			IWL_DEVICE_FAMILY_AX210) ?
1122		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1123
1124		iwl_pcie_rx_init_rxb_lists(rxq);
1125
1126		spin_unlock_bh(&rxq->lock);
1127
1128		if (!rxq->napi.poll) {
1129			int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1130
1131			if (trans_pcie->msix_enabled)
1132				poll = iwl_pcie_napi_poll_msix;
1133
1134			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1135				       poll);
1136			napi_enable(&rxq->napi);
1137		}
1138
1139	}
1140
1141	/* move the pool to the default queue and allocator ownerships */
1142	queue_size = trans->trans_cfg->mq_rx_supported ?
1143			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1144	allocator_pool_size = trans->num_rx_queues *
1145		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1146	num_alloc = queue_size + allocator_pool_size;
1147
1148	for (i = 0; i < num_alloc; i++) {
1149		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1150
1151		if (i < allocator_pool_size)
1152			list_add(&rxb->list, &rba->rbd_empty);
1153		else
1154			list_add(&rxb->list, &def_rxq->rx_used);
1155		trans_pcie->global_table[i] = rxb;
1156		rxb->vid = (u16)(i + 1);
1157		rxb->invalid = true;
1158	}
1159
1160	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1161
1162	return 0;
1163}
1164
1165int iwl_pcie_rx_init(struct iwl_trans *trans)
1166{
1167	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1168	int ret = _iwl_pcie_rx_init(trans);
1169
1170	if (ret)
1171		return ret;
1172
1173	if (trans->trans_cfg->mq_rx_supported)
1174		iwl_pcie_rx_mq_hw_init(trans);
1175	else
1176		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1177
1178	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1179
1180	spin_lock_bh(&trans_pcie->rxq->lock);
1181	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1182	spin_unlock_bh(&trans_pcie->rxq->lock);
1183
1184	return 0;
1185}
1186
1187int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1188{
1189	/* Set interrupt coalescing timer to default (2048 usecs) */
1190	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1191
1192	/*
1193	 * We don't configure the RFH.
1194	 * Restock will be done at alive, after firmware configured the RFH.
1195	 */
1196	return _iwl_pcie_rx_init(trans);
1197}
1198
1199void iwl_pcie_rx_free(struct iwl_trans *trans)
1200{
1201	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1202	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
1203	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1204	int i;
1205
1206	/*
1207	 * if rxq is NULL, it means that nothing has been allocated,
1208	 * exit now
1209	 */
1210	if (!trans_pcie->rxq) {
1211		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1212		return;
1213	}
1214
1215	cancel_work_sync(&rba->rx_alloc);
1216
1217	iwl_pcie_free_rbs_pool(trans);
1218
1219	if (trans_pcie->base_rb_stts) {
1220		dma_free_coherent(trans->dev,
1221				  rb_stts_size * trans->num_rx_queues,
1222				  trans_pcie->base_rb_stts,
1223				  trans_pcie->base_rb_stts_dma);
1224		trans_pcie->base_rb_stts = NULL;
1225		trans_pcie->base_rb_stts_dma = 0;
1226	}
1227
1228	for (i = 0; i < trans->num_rx_queues; i++) {
1229		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1230
1231		iwl_pcie_free_rxq_dma(trans, rxq);
1232
1233		if (rxq->napi.poll) {
1234			napi_disable(&rxq->napi);
1235			netif_napi_del(&rxq->napi);
1236		}
1237	}
1238	kfree(trans_pcie->rx_pool);
1239	kfree(trans_pcie->global_table);
1240	kfree(trans_pcie->rxq);
1241
1242	if (trans_pcie->alloc_page)
1243		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1244}
1245
1246static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1247					  struct iwl_rb_allocator *rba)
1248{
1249	spin_lock(&rba->lock);
1250	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1251	spin_unlock(&rba->lock);
1252}
1253
1254/*
1255 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1256 *
1257 * Called when a RBD can be reused. The RBD is transferred to the allocator.
1258 * When there are 2 empty RBDs - a request for allocation is posted
1259 */
1260static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1261				  struct iwl_rx_mem_buffer *rxb,
1262				  struct iwl_rxq *rxq, bool emergency)
1263{
1264	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1265	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1266
1267	/* Move the RBD to the used list, will be moved to allocator in batches
1268	 * before claiming or posting a request*/
1269	list_add_tail(&rxb->list, &rxq->rx_used);
1270
1271	if (unlikely(emergency))
1272		return;
1273
1274	/* Count the allocator owned RBDs */
1275	rxq->used_count++;
1276
1277	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1278	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1279	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1280	 * after but we still need to post another request.
1281	 */
1282	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1283		/* Move the 2 RBDs to the allocator ownership.
1284		 Allocator has another 6 from pool for the request completion*/
1285		iwl_pcie_rx_move_to_allocator(rxq, rba);
1286
1287		atomic_inc(&rba->req_pending);
1288		queue_work(rba->alloc_wq, &rba->rx_alloc);
1289	}
1290}
1291
1292static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1293				struct iwl_rxq *rxq,
1294				struct iwl_rx_mem_buffer *rxb,
1295				bool emergency,
1296				int i)
1297{
1298	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1299	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1300	bool page_stolen = false;
1301	int max_len = trans_pcie->rx_buf_bytes;
1302	u32 offset = 0;
1303
1304	if (WARN_ON(!rxb))
1305		return;
1306
1307	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1308
1309	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1310		struct iwl_rx_packet *pkt;
1311		bool reclaim;
1312		int len;
1313		struct iwl_rx_cmd_buffer rxcb = {
1314			._offset = rxb->offset + offset,
1315			._rx_page_order = trans_pcie->rx_page_order,
1316			._page = rxb->page,
1317			._page_stolen = false,
1318			.truesize = max_len,
1319		};
1320
1321		pkt = rxb_addr(&rxcb);
1322
1323		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1324			IWL_DEBUG_RX(trans,
1325				     "Q %d: RB end marker at offset %d\n",
1326				     rxq->id, offset);
1327			break;
1328		}
1329
1330		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1331			FH_RSCSR_RXQ_POS != rxq->id,
1332		     "frame on invalid queue - is on %d and indicates %d\n",
1333		     rxq->id,
1334		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1335			FH_RSCSR_RXQ_POS);
1336
1337		IWL_DEBUG_RX(trans,
1338			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1339			     rxq->id, offset,
1340			     iwl_get_cmd_string(trans,
1341						WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
1342			     pkt->hdr.group_id, pkt->hdr.cmd,
1343			     le16_to_cpu(pkt->hdr.sequence));
1344
1345		len = iwl_rx_packet_len(pkt);
1346		len += sizeof(u32); /* account for status word */
1347
1348		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1349
1350		/* check that what the device tells us made sense */
1351		if (len < sizeof(*pkt) || offset > max_len)
1352			break;
1353
1354		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1355		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1356
1357		/* Reclaim a command buffer only if this packet is a response
1358		 *   to a (driver-originated) command.
1359		 * If the packet (e.g. Rx frame) originated from uCode,
1360		 *   there is no command buffer to reclaim.
1361		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1362		 *   but apparently a few don't get set; catch them here. */
1363		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1364		if (reclaim && !pkt->hdr.group_id) {
1365			int i;
1366
1367			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1368				if (trans_pcie->no_reclaim_cmds[i] ==
1369							pkt->hdr.cmd) {
1370					reclaim = false;
1371					break;
1372				}
1373			}
1374		}
1375
1376		if (rxq->id == IWL_DEFAULT_RX_QUEUE)
1377			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1378				       &rxcb);
1379		else
1380			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1381					   &rxcb, rxq->id);
1382
1383		/*
1384		 * After here, we should always check rxcb._page_stolen,
1385		 * if it is true then one of the handlers took the page.
1386		 */
1387
1388		if (reclaim && txq) {
1389			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1390			int index = SEQ_TO_INDEX(sequence);
1391			int cmd_index = iwl_txq_get_cmd_index(txq, index);
1392
1393			kfree_sensitive(txq->entries[cmd_index].free_buf);
1394			txq->entries[cmd_index].free_buf = NULL;
1395
1396			/* Invoke any callbacks, transfer the buffer to caller,
1397			 * and fire off the (possibly) blocking
1398			 * iwl_trans_send_cmd()
1399			 * as we reclaim the driver command queue */
1400			if (!rxcb._page_stolen)
1401				iwl_pcie_hcmd_complete(trans, &rxcb);
1402			else
1403				IWL_WARN(trans, "Claim null rxb?\n");
1404		}
1405
1406		page_stolen |= rxcb._page_stolen;
1407		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1408			break;
1409	}
1410
1411	/* page was stolen from us -- free our reference */
1412	if (page_stolen) {
1413		__free_pages(rxb->page, trans_pcie->rx_page_order);
1414		rxb->page = NULL;
1415	}
1416
1417	/* Reuse the page if possible. For notification packets and
1418	 * SKBs that fail to Rx correctly, add them back into the
1419	 * rx_free list for reuse later. */
1420	if (rxb->page != NULL) {
1421		rxb->page_dma =
1422			dma_map_page(trans->dev, rxb->page, rxb->offset,
1423				     trans_pcie->rx_buf_bytes,
1424				     DMA_FROM_DEVICE);
1425		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1426			/*
1427			 * free the page(s) as well to not break
1428			 * the invariant that the items on the used
1429			 * list have no page(s)
1430			 */
1431			__free_pages(rxb->page, trans_pcie->rx_page_order);
1432			rxb->page = NULL;
1433			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1434		} else {
1435			list_add_tail(&rxb->list, &rxq->rx_free);
1436			rxq->free_count++;
1437		}
1438	} else
1439		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1440}
1441
1442static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1443						  struct iwl_rxq *rxq, int i,
1444						  bool *join)
1445{
1446	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1447	struct iwl_rx_mem_buffer *rxb;
1448	u16 vid;
1449
1450	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1451	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
1452
1453	if (!trans->trans_cfg->mq_rx_supported) {
1454		rxb = rxq->queue[i];
1455		rxq->queue[i] = NULL;
1456		return rxb;
1457	}
1458
1459	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1460		struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1461
1462		vid = le16_to_cpu(cd[i].rbid);
1463		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1464	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1465		struct iwl_rx_completion_desc *cd = rxq->used_bd;
1466
1467		vid = le16_to_cpu(cd[i].rbid);
1468		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1469	} else {
1470		__le32 *cd = rxq->used_bd;
1471
1472		vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
1473	}
1474
1475	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1476		goto out_err;
1477
1478	rxb = trans_pcie->global_table[vid - 1];
1479	if (rxb->invalid)
1480		goto out_err;
1481
1482	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1483
1484	rxb->invalid = true;
1485
1486	return rxb;
1487
1488out_err:
1489	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1490	iwl_force_nmi(trans);
1491	return NULL;
1492}
1493
1494/*
1495 * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1496 */
1497static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1498{
1499	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1500	struct iwl_rxq *rxq;
1501	u32 r, i, count = 0, handled = 0;
1502	bool emergency = false;
1503
1504	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1505		return budget;
1506
1507	rxq = &trans_pcie->rxq[queue];
1508
1509restart:
1510	spin_lock(&rxq->lock);
1511	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1512	 * buffer that the driver may process (last buffer filled by ucode). */
1513	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1514	i = rxq->read;
1515
1516	/* W/A 9000 device step A0 wrap-around bug */
1517	r &= (rxq->queue_size - 1);
1518
1519	/* Rx interrupt, but nothing sent from uCode */
1520	if (i == r)
1521		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1522
1523	while (i != r && ++handled < budget) {
1524		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1525		struct iwl_rx_mem_buffer *rxb;
1526		/* number of RBDs still waiting for page allocation */
1527		u32 rb_pending_alloc =
1528			atomic_read(&trans_pcie->rba.req_pending) *
1529			RX_CLAIM_REQ_ALLOC;
1530		bool join = false;
1531
1532		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1533			     !emergency)) {
1534			iwl_pcie_rx_move_to_allocator(rxq, rba);
1535			emergency = true;
1536			IWL_DEBUG_TPT(trans,
1537				      "RX path is in emergency. Pending allocations %d\n",
1538				      rb_pending_alloc);
1539		}
1540
1541		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1542
1543		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1544		if (!rxb)
1545			goto out;
1546
1547		if (unlikely(join || rxq->next_rb_is_fragment)) {
1548			rxq->next_rb_is_fragment = join;
1549			/*
1550			 * We can only get a multi-RB in the following cases:
1551			 *  - firmware issue, sending a too big notification
1552			 *  - sniffer mode with a large A-MSDU
1553			 *  - large MTU frames (>2k)
1554			 * since the multi-RB functionality is limited to newer
1555			 * hardware that cannot put multiple entries into a
1556			 * single RB.
1557			 *
1558			 * Right now, the higher layers aren't set up to deal
1559			 * with that, so discard all of these.
1560			 */
1561			list_add_tail(&rxb->list, &rxq->rx_free);
1562			rxq->free_count++;
1563		} else {
1564			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1565		}
1566
1567		i = (i + 1) & (rxq->queue_size - 1);
1568
1569		/*
1570		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1571		 * try to claim the pre-allocated buffers from the allocator.
1572		 * If not ready - will try to reclaim next time.
1573		 * There is no need to reschedule work - allocator exits only
1574		 * on success
1575		 */
1576		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1577			iwl_pcie_rx_allocator_get(trans, rxq);
1578
1579		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1580			/* Add the remaining empty RBDs for allocator use */
1581			iwl_pcie_rx_move_to_allocator(rxq, rba);
1582		} else if (emergency) {
1583			count++;
1584			if (count == 8) {
1585				count = 0;
1586				if (rb_pending_alloc < rxq->queue_size / 3) {
1587					IWL_DEBUG_TPT(trans,
1588						      "RX path exited emergency. Pending allocations %d\n",
1589						      rb_pending_alloc);
1590					emergency = false;
1591				}
1592
1593				rxq->read = i;
1594				spin_unlock(&rxq->lock);
1595				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1596				iwl_pcie_rxq_restock(trans, rxq);
1597				goto restart;
1598			}
1599		}
1600	}
1601out:
1602	/* Backtrack one entry */
1603	rxq->read = i;
1604	spin_unlock(&rxq->lock);
1605
1606	/*
1607	 * handle a case where in emergency there are some unallocated RBDs.
1608	 * those RBDs are in the used list, but are not tracked by the queue's
1609	 * used_count which counts allocator owned RBDs.
1610	 * unallocated emergency RBDs must be allocated on exit, otherwise
1611	 * when called again the function may not be in emergency mode and
1612	 * they will be handed to the allocator with no tracking in the RBD
1613	 * allocator counters, which will lead to them never being claimed back
1614	 * by the queue.
1615	 * by allocating them here, they are now in the queue free list, and
1616	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1617	 */
1618	if (unlikely(emergency && count))
1619		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1620
1621	iwl_pcie_rxq_restock(trans, rxq);
1622
1623	return handled;
1624}
1625
1626static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1627{
1628	u8 queue = entry->entry;
1629	struct msix_entry *entries = entry - queue;
1630
1631	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1632}
1633
1634/*
1635 * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1636 * This interrupt handler should be used with RSS queue only.
1637 */
1638irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1639{
1640	struct msix_entry *entry = dev_id;
1641	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1642	struct iwl_trans *trans = trans_pcie->trans;
1643	struct iwl_rxq *rxq;
1644
1645	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1646
1647	if (WARN_ON(entry->entry >= trans->num_rx_queues))
1648		return IRQ_NONE;
1649
1650	if (!trans_pcie->rxq) {
1651		if (net_ratelimit())
1652			IWL_ERR(trans,
1653				"[%d] Got MSI-X interrupt before we have Rx queues\n",
1654				entry->entry);
1655		return IRQ_NONE;
1656	}
1657
1658	rxq = &trans_pcie->rxq[entry->entry];
1659	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1660	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1661
1662	local_bh_disable();
1663	if (napi_schedule_prep(&rxq->napi))
1664		__napi_schedule(&rxq->napi);
1665	else
1666		iwl_pcie_clear_irq(trans, entry->entry);
1667	local_bh_enable();
1668
1669	lock_map_release(&trans->sync_cmd_lockdep_map);
1670
1671	return IRQ_HANDLED;
1672}
1673
1674/*
1675 * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1676 */
1677static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1678{
1679	int i;
1680
1681	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1682	if (trans->cfg->internal_wimax_coex &&
1683	    !trans->cfg->apmg_not_supported &&
1684	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1685			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1686	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1687			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1688		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1689		iwl_op_mode_wimax_active(trans->op_mode);
1690		wake_up(&trans->wait_command_queue);
1691		return;
1692	}
1693
1694	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1695		if (!trans->txqs.txq[i])
1696			continue;
1697		del_timer(&trans->txqs.txq[i]->stuck_timer);
1698	}
1699
1700	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1701	 * before we wake up the command caller, to ensure a proper cleanup. */
1702	iwl_trans_fw_error(trans, false);
1703
1704	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1705	wake_up(&trans->wait_command_queue);
1706}
1707
1708static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1709{
1710	u32 inta;
1711
1712	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1713
1714	trace_iwlwifi_dev_irq(trans->dev);
1715
1716	/* Discover which interrupts are active/pending */
1717	inta = iwl_read32(trans, CSR_INT);
1718
1719	/* the thread will service interrupts and re-enable them */
1720	return inta;
1721}
1722
1723/* a device (PCI-E) page is 4096 bytes long */
1724#define ICT_SHIFT	12
1725#define ICT_SIZE	(1 << ICT_SHIFT)
1726#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1727
1728/* interrupt handler using ict table, with this interrupt driver will
1729 * stop using INTA register to get device's interrupt, reading this register
1730 * is expensive, device will write interrupts in ICT dram table, increment
1731 * index then will fire interrupt to driver, driver will OR all ICT table
1732 * entries from current index up to table entry with 0 value. the result is
1733 * the interrupt we need to service, driver will set the entries back to 0 and
1734 * set index.
1735 */
1736static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1737{
1738	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1739	u32 inta;
1740	u32 val = 0;
1741	u32 read;
1742
1743	trace_iwlwifi_dev_irq(trans->dev);
1744
1745	/* Ignore interrupt if there's nothing in NIC to service.
1746	 * This may be due to IRQ shared with another device,
1747	 * or due to sporadic interrupts thrown from our NIC. */
1748	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1749	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1750	if (!read)
1751		return 0;
1752
1753	/*
1754	 * Collect all entries up to the first 0, starting from ict_index;
1755	 * note we already read at ict_index.
1756	 */
1757	do {
1758		val |= read;
1759		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1760				trans_pcie->ict_index, read);
1761		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1762		trans_pcie->ict_index =
1763			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1764
1765		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1766		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1767					   read);
1768	} while (read);
1769
1770	/* We should not get this value, just ignore it. */
1771	if (val == 0xffffffff)
1772		val = 0;
1773
1774	/*
1775	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1776	 * (bit 15 before shifting it to 31) to clear when using interrupt
1777	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1778	 * so we use them to decide on the real state of the Rx bit.
1779	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1780	 */
1781	if (val & 0xC0000)
1782		val |= 0x8000;
1783
1784	inta = (0xff & val) | ((0xff00 & val) << 16);
1785	return inta;
1786}
1787
1788void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
1789{
1790	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1791	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1792	bool hw_rfkill, prev, report;
1793
1794	mutex_lock(&trans_pcie->mutex);
1795	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1796	hw_rfkill = iwl_is_rfkill_set(trans);
1797	if (hw_rfkill) {
1798		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1799		set_bit(STATUS_RFKILL_HW, &trans->status);
1800	}
1801	if (trans_pcie->opmode_down)
1802		report = hw_rfkill;
1803	else
1804		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1805
1806	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1807		 hw_rfkill ? "disable radio" : "enable radio");
1808
1809	isr_stats->rfkill++;
1810
1811	if (prev != report)
1812		iwl_trans_pcie_rf_kill(trans, report, from_irq);
1813	mutex_unlock(&trans_pcie->mutex);
1814
1815	if (hw_rfkill) {
1816		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1817				       &trans->status))
1818			IWL_DEBUG_RF_KILL(trans,
1819					  "Rfkill while SYNC HCMD in flight\n");
1820		wake_up(&trans->wait_command_queue);
1821	} else {
1822		clear_bit(STATUS_RFKILL_HW, &trans->status);
1823		if (trans_pcie->opmode_down)
1824			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1825	}
1826}
1827
1828irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1829{
1830	struct iwl_trans *trans = dev_id;
1831	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1832	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1833	u32 inta = 0;
1834	u32 handled = 0;
1835	bool polling = false;
1836
1837	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1838
1839	spin_lock_bh(&trans_pcie->irq_lock);
1840
1841	/* dram interrupt table not set yet,
1842	 * use legacy interrupt.
1843	 */
1844	if (likely(trans_pcie->use_ict))
1845		inta = iwl_pcie_int_cause_ict(trans);
1846	else
1847		inta = iwl_pcie_int_cause_non_ict(trans);
1848
1849	if (iwl_have_debug_level(IWL_DL_ISR)) {
1850		IWL_DEBUG_ISR(trans,
1851			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1852			      inta, trans_pcie->inta_mask,
1853			      iwl_read32(trans, CSR_INT_MASK),
1854			      iwl_read32(trans, CSR_FH_INT_STATUS));
1855		if (inta & (~trans_pcie->inta_mask))
1856			IWL_DEBUG_ISR(trans,
1857				      "We got a masked interrupt (0x%08x)\n",
1858				      inta & (~trans_pcie->inta_mask));
1859	}
1860
1861	inta &= trans_pcie->inta_mask;
1862
1863	/*
1864	 * Ignore interrupt if there's nothing in NIC to service.
1865	 * This may be due to IRQ shared with another device,
1866	 * or due to sporadic interrupts thrown from our NIC.
1867	 */
1868	if (unlikely(!inta)) {
1869		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1870		/*
1871		 * Re-enable interrupts here since we don't
1872		 * have anything to service
1873		 */
1874		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1875			_iwl_enable_interrupts(trans);
1876		spin_unlock_bh(&trans_pcie->irq_lock);
1877		lock_map_release(&trans->sync_cmd_lockdep_map);
1878		return IRQ_NONE;
1879	}
1880
1881	if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
1882		/*
1883		 * Hardware disappeared. It might have
1884		 * already raised an interrupt.
1885		 */
1886		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1887		spin_unlock_bh(&trans_pcie->irq_lock);
1888		goto out;
1889	}
1890
1891	/* Ack/clear/reset pending uCode interrupts.
1892	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1893	 */
1894	/* There is a hardware bug in the interrupt mask function that some
1895	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1896	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1897	 * ICT interrupt handling mechanism has another bug that might cause
1898	 * these unmasked interrupts fail to be detected. We workaround the
1899	 * hardware bugs here by ACKing all the possible interrupts so that
1900	 * interrupt coalescing can still be achieved.
1901	 */
1902	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1903
1904	if (iwl_have_debug_level(IWL_DL_ISR))
1905		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1906			      inta, iwl_read32(trans, CSR_INT_MASK));
1907
1908	spin_unlock_bh(&trans_pcie->irq_lock);
1909
1910	/* Now service all interrupt bits discovered above. */
1911	if (inta & CSR_INT_BIT_HW_ERR) {
1912		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1913
1914		/* Tell the device to stop sending interrupts */
1915		iwl_disable_interrupts(trans);
1916
1917		isr_stats->hw++;
1918		iwl_pcie_irq_handle_error(trans);
1919
1920		handled |= CSR_INT_BIT_HW_ERR;
1921
1922		goto out;
1923	}
1924
1925	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1926	if (inta & CSR_INT_BIT_SCD) {
1927		IWL_DEBUG_ISR(trans,
1928			      "Scheduler finished to transmit the frame/frames.\n");
1929		isr_stats->sch++;
1930	}
1931
1932	/* Alive notification via Rx interrupt will do the real work */
1933	if (inta & CSR_INT_BIT_ALIVE) {
1934		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1935		isr_stats->alive++;
1936		if (trans->trans_cfg->gen2) {
1937			/*
1938			 * We can restock, since firmware configured
1939			 * the RFH
1940			 */
1941			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1942		}
1943
1944		handled |= CSR_INT_BIT_ALIVE;
1945	}
1946
1947	/* Safely ignore these bits for debug checks below */
1948	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1949
1950	/* HW RF KILL switch toggled */
1951	if (inta & CSR_INT_BIT_RF_KILL) {
1952		iwl_pcie_handle_rfkill_irq(trans, true);
1953		handled |= CSR_INT_BIT_RF_KILL;
1954	}
1955
1956	/* Chip got too hot and stopped itself */
1957	if (inta & CSR_INT_BIT_CT_KILL) {
1958		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1959		isr_stats->ctkill++;
1960		handled |= CSR_INT_BIT_CT_KILL;
1961	}
1962
1963	/* Error detected by uCode */
1964	if (inta & CSR_INT_BIT_SW_ERR) {
1965		IWL_ERR(trans, "Microcode SW error detected. "
1966			" Restarting 0x%X.\n", inta);
1967		isr_stats->sw++;
1968		iwl_pcie_irq_handle_error(trans);
1969		handled |= CSR_INT_BIT_SW_ERR;
1970	}
1971
1972	/* uCode wakes up after power-down sleep */
1973	if (inta & CSR_INT_BIT_WAKEUP) {
1974		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1975		iwl_pcie_rxq_check_wrptr(trans);
1976		iwl_pcie_txq_check_wrptrs(trans);
1977
1978		isr_stats->wakeup++;
1979
1980		handled |= CSR_INT_BIT_WAKEUP;
1981	}
1982
1983	/* All uCode command responses, including Tx command responses,
1984	 * Rx "responses" (frame-received notification), and other
1985	 * notifications from uCode come through here*/
1986	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1987		    CSR_INT_BIT_RX_PERIODIC)) {
1988		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1989		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1990			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1991			iwl_write32(trans, CSR_FH_INT_STATUS,
1992					CSR_FH_INT_RX_MASK);
1993		}
1994		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1995			handled |= CSR_INT_BIT_RX_PERIODIC;
1996			iwl_write32(trans,
1997				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1998		}
1999		/* Sending RX interrupt require many steps to be done in the
2000		 * device:
2001		 * 1- write interrupt to current index in ICT table.
2002		 * 2- dma RX frame.
2003		 * 3- update RX shared data to indicate last write index.
2004		 * 4- send interrupt.
2005		 * This could lead to RX race, driver could receive RX interrupt
2006		 * but the shared data changes does not reflect this;
2007		 * periodic interrupt will detect any dangling Rx activity.
2008		 */
2009
2010		/* Disable periodic interrupt; we use it as just a one-shot. */
2011		iwl_write8(trans, CSR_INT_PERIODIC_REG,
2012			    CSR_INT_PERIODIC_DIS);
2013
2014		/*
2015		 * Enable periodic interrupt in 8 msec only if we received
2016		 * real RX interrupt (instead of just periodic int), to catch
2017		 * any dangling Rx interrupt.  If it was just the periodic
2018		 * interrupt, there was no dangling Rx activity, and no need
2019		 * to extend the periodic interrupt; one-shot is enough.
2020		 */
2021		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2022			iwl_write8(trans, CSR_INT_PERIODIC_REG,
2023				   CSR_INT_PERIODIC_ENA);
2024
2025		isr_stats->rx++;
2026
2027		local_bh_disable();
2028		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2029			polling = true;
2030			__napi_schedule(&trans_pcie->rxq[0].napi);
2031		}
2032		local_bh_enable();
2033	}
2034
2035	/* This "Tx" DMA channel is used only for loading uCode */
2036	if (inta & CSR_INT_BIT_FH_TX) {
2037		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2038		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2039		isr_stats->tx++;
2040		handled |= CSR_INT_BIT_FH_TX;
2041		/* Wake up uCode load routine, now that load is complete */
2042		trans_pcie->ucode_write_complete = true;
2043		wake_up(&trans_pcie->ucode_write_waitq);
2044		/* Wake up IMR write routine, now that write to SRAM is complete */
2045		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2046			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2047			wake_up(&trans_pcie->ucode_write_waitq);
2048		}
2049	}
2050
2051	if (inta & ~handled) {
2052		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2053		isr_stats->unhandled++;
2054	}
2055
2056	if (inta & ~(trans_pcie->inta_mask)) {
2057		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2058			 inta & ~trans_pcie->inta_mask);
2059	}
2060
2061	if (!polling) {
2062		spin_lock_bh(&trans_pcie->irq_lock);
2063		/* only Re-enable all interrupt if disabled by irq */
2064		if (test_bit(STATUS_INT_ENABLED, &trans->status))
2065			_iwl_enable_interrupts(trans);
2066		/* we are loading the firmware, enable FH_TX interrupt only */
2067		else if (handled & CSR_INT_BIT_FH_TX)
2068			iwl_enable_fw_load_int(trans);
2069		/* Re-enable RF_KILL if it occurred */
2070		else if (handled & CSR_INT_BIT_RF_KILL)
2071			iwl_enable_rfkill_int(trans);
2072		/* Re-enable the ALIVE / Rx interrupt if it occurred */
2073		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2074			iwl_enable_fw_load_int_ctx_info(trans);
2075		spin_unlock_bh(&trans_pcie->irq_lock);
2076	}
2077
2078out:
2079	lock_map_release(&trans->sync_cmd_lockdep_map);
2080	return IRQ_HANDLED;
2081}
2082
2083/******************************************************************************
2084 *
2085 * ICT functions
2086 *
2087 ******************************************************************************/
2088
2089/* Free dram table */
2090void iwl_pcie_free_ict(struct iwl_trans *trans)
2091{
2092	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2093
2094	if (trans_pcie->ict_tbl) {
2095		dma_free_coherent(trans->dev, ICT_SIZE,
2096				  trans_pcie->ict_tbl,
2097				  trans_pcie->ict_tbl_dma);
2098		trans_pcie->ict_tbl = NULL;
2099		trans_pcie->ict_tbl_dma = 0;
2100	}
2101}
2102
2103/*
2104 * allocate dram shared table, it is an aligned memory
2105 * block of ICT_SIZE.
2106 * also reset all data related to ICT table interrupt.
2107 */
2108int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2109{
2110	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2111
2112	trans_pcie->ict_tbl =
2113		dma_alloc_coherent(trans->dev, ICT_SIZE,
2114				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2115	if (!trans_pcie->ict_tbl)
2116		return -ENOMEM;
2117
2118	/* just an API sanity check ... it is guaranteed to be aligned */
2119	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2120		iwl_pcie_free_ict(trans);
2121		return -EINVAL;
2122	}
2123
2124	return 0;
2125}
2126
2127/* Device is going up inform it about using ICT interrupt table,
2128 * also we need to tell the driver to start using ICT interrupt.
2129 */
2130void iwl_pcie_reset_ict(struct iwl_trans *trans)
2131{
2132	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2133	u32 val;
2134
2135	if (!trans_pcie->ict_tbl)
2136		return;
2137
2138	spin_lock_bh(&trans_pcie->irq_lock);
2139	_iwl_disable_interrupts(trans);
2140
2141	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2142
2143	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2144
2145	val |= CSR_DRAM_INT_TBL_ENABLE |
2146	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2147	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2148
2149	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2150
2151	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2152	trans_pcie->use_ict = true;
2153	trans_pcie->ict_index = 0;
2154	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2155	_iwl_enable_interrupts(trans);
2156	spin_unlock_bh(&trans_pcie->irq_lock);
2157}
2158
2159/* Device is going down disable ict interrupt usage */
2160void iwl_pcie_disable_ict(struct iwl_trans *trans)
2161{
2162	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2163
2164	spin_lock_bh(&trans_pcie->irq_lock);
2165	trans_pcie->use_ict = false;
2166	spin_unlock_bh(&trans_pcie->irq_lock);
2167}
2168
2169irqreturn_t iwl_pcie_isr(int irq, void *data)
2170{
2171	struct iwl_trans *trans = data;
2172
2173	if (!trans)
2174		return IRQ_NONE;
2175
2176	/* Disable (but don't clear!) interrupts here to avoid
2177	 * back-to-back ISRs and sporadic interrupts from our NIC.
2178	 * If we have something to service, the tasklet will re-enable ints.
2179	 * If we *don't* have something, we'll re-enable before leaving here.
2180	 */
2181	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2182
2183	return IRQ_WAKE_THREAD;
2184}
2185
2186irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2187{
2188	return IRQ_WAKE_THREAD;
2189}
2190
2191irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2192{
2193	struct msix_entry *entry = dev_id;
2194	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2195	struct iwl_trans *trans = trans_pcie->trans;
2196	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2197	u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2198	u32 inta_fh, inta_hw;
2199	bool polling = false;
2200	bool sw_err;
2201
2202	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2203		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2204
2205	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2206		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2207
2208	lock_map_acquire(&trans->sync_cmd_lockdep_map);
2209
2210	spin_lock_bh(&trans_pcie->irq_lock);
2211	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2212	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2213	/*
2214	 * Clear causes registers to avoid being handling the same cause.
2215	 */
2216	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2217	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2218	spin_unlock_bh(&trans_pcie->irq_lock);
2219
2220	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2221
2222	if (unlikely(!(inta_fh | inta_hw))) {
2223		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2224		lock_map_release(&trans->sync_cmd_lockdep_map);
2225		return IRQ_NONE;
2226	}
2227
2228	if (iwl_have_debug_level(IWL_DL_ISR)) {
2229		IWL_DEBUG_ISR(trans,
2230			      "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2231			      entry->entry, inta_fh, trans_pcie->fh_mask,
2232			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2233		if (inta_fh & ~trans_pcie->fh_mask)
2234			IWL_DEBUG_ISR(trans,
2235				      "We got a masked interrupt (0x%08x)\n",
2236				      inta_fh & ~trans_pcie->fh_mask);
2237	}
2238
2239	inta_fh &= trans_pcie->fh_mask;
2240
2241	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2242	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2243		local_bh_disable();
2244		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2245			polling = true;
2246			__napi_schedule(&trans_pcie->rxq[0].napi);
2247		}
2248		local_bh_enable();
2249	}
2250
2251	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2252	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2253		local_bh_disable();
2254		if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2255			polling = true;
2256			__napi_schedule(&trans_pcie->rxq[1].napi);
2257		}
2258		local_bh_enable();
2259	}
2260
2261	/* This "Tx" DMA channel is used only for loading uCode */
2262	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
2263	    trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2264		IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2265		isr_stats->tx++;
2266
2267		/* Wake up IMR routine once write to SRAM is complete */
2268		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2269			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2270			wake_up(&trans_pcie->ucode_write_waitq);
2271		}
2272	} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2273		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2274		isr_stats->tx++;
2275		/*
2276		 * Wake up uCode load routine,
2277		 * now that load is complete
2278		 */
2279		trans_pcie->ucode_write_complete = true;
2280		wake_up(&trans_pcie->ucode_write_waitq);
2281
2282		/* Wake up IMR routine once write to SRAM is complete */
2283		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2284			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2285			wake_up(&trans_pcie->ucode_write_waitq);
2286		}
2287	}
2288
2289	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2290		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2291	else
2292		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2293
2294	/* Error detected by uCode */
2295	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2296		IWL_ERR(trans,
2297			"Microcode SW error detected. Restarting 0x%X.\n",
2298			inta_fh);
2299		isr_stats->sw++;
2300		/* during FW reset flow report errors from there */
2301		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2302			trans_pcie->imr_status = IMR_D2S_ERROR;
2303			wake_up(&trans_pcie->imr_waitq);
2304		} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2305			trans_pcie->fw_reset_state = FW_RESET_ERROR;
2306			wake_up(&trans_pcie->fw_reset_waitq);
2307		} else {
2308			iwl_pcie_irq_handle_error(trans);
2309		}
2310	}
2311
2312	/* After checking FH register check HW register */
2313	if (iwl_have_debug_level(IWL_DL_ISR)) {
2314		IWL_DEBUG_ISR(trans,
2315			      "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2316			      entry->entry, inta_hw, trans_pcie->hw_mask,
2317			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2318		if (inta_hw & ~trans_pcie->hw_mask)
2319			IWL_DEBUG_ISR(trans,
2320				      "We got a masked interrupt 0x%08x\n",
2321				      inta_hw & ~trans_pcie->hw_mask);
2322	}
2323
2324	inta_hw &= trans_pcie->hw_mask;
2325
2326	/* Alive notification via Rx interrupt will do the real work */
2327	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2328		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2329		isr_stats->alive++;
2330		if (trans->trans_cfg->gen2) {
2331			/* We can restock, since firmware configured the RFH */
2332			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2333		}
2334	}
2335
2336	/*
2337	 * In some rare cases when the HW is in a bad state, we may
2338	 * get this interrupt too early, when prph_info is still NULL.
2339	 * So make sure that it's not NULL to prevent crashing.
2340	 */
2341	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2342		u32 sleep_notif =
2343			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2344		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2345		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2346			IWL_DEBUG_ISR(trans,
2347				      "Sx interrupt: sleep notification = 0x%x\n",
2348				      sleep_notif);
2349			trans_pcie->sx_complete = true;
2350			wake_up(&trans_pcie->sx_waitq);
2351		} else {
2352			/* uCode wakes up after power-down sleep */
2353			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2354			iwl_pcie_rxq_check_wrptr(trans);
2355			iwl_pcie_txq_check_wrptrs(trans);
2356
2357			isr_stats->wakeup++;
2358		}
2359	}
2360
2361	/* Chip got too hot and stopped itself */
2362	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2363		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2364		isr_stats->ctkill++;
2365	}
2366
2367	/* HW RF KILL switch toggled */
2368	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2369		iwl_pcie_handle_rfkill_irq(trans, true);
2370
2371	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2372		IWL_ERR(trans,
2373			"Hardware error detected. Restarting.\n");
2374
2375		isr_stats->hw++;
2376		trans->dbg.hw_error = true;
2377		iwl_pcie_irq_handle_error(trans);
2378	}
2379
2380	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
2381		IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2382		trans_pcie->fw_reset_state = FW_RESET_OK;
2383		wake_up(&trans_pcie->fw_reset_waitq);
2384	}
2385
2386	if (!polling)
2387		iwl_pcie_clear_irq(trans, entry->entry);
2388
2389	lock_map_release(&trans->sync_cmd_lockdep_map);
2390
2391	return IRQ_HANDLED;
2392}
2393