162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
462306a36Sopenharmony_ci * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
562306a36Sopenharmony_ci * Copyright (C) 2016-2017 Intel Deutschland GmbH
662306a36Sopenharmony_ci */
762306a36Sopenharmony_ci#include <linux/sched.h>
862306a36Sopenharmony_ci#include <linux/wait.h>
962306a36Sopenharmony_ci#include <linux/gfp.h>
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#include "iwl-prph.h"
1262306a36Sopenharmony_ci#include "iwl-io.h"
1362306a36Sopenharmony_ci#include "internal.h"
1462306a36Sopenharmony_ci#include "iwl-op-mode.h"
1562306a36Sopenharmony_ci#include "iwl-context-info-gen3.h"
1662306a36Sopenharmony_ci
1762306a36Sopenharmony_ci/******************************************************************************
1862306a36Sopenharmony_ci *
1962306a36Sopenharmony_ci * RX path functions
2062306a36Sopenharmony_ci *
2162306a36Sopenharmony_ci ******************************************************************************/
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_ci/*
2462306a36Sopenharmony_ci * Rx theory of operation
2562306a36Sopenharmony_ci *
2662306a36Sopenharmony_ci * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
2762306a36Sopenharmony_ci * each of which point to Receive Buffers to be filled by the NIC.  These get
2862306a36Sopenharmony_ci * used not only for Rx frames, but for any command response or notification
2962306a36Sopenharmony_ci * from the NIC.  The driver and NIC manage the Rx buffers by means
3062306a36Sopenharmony_ci * of indexes into the circular buffer.
3162306a36Sopenharmony_ci *
3262306a36Sopenharmony_ci * Rx Queue Indexes
3362306a36Sopenharmony_ci * The host/firmware share two index registers for managing the Rx buffers.
3462306a36Sopenharmony_ci *
3562306a36Sopenharmony_ci * The READ index maps to the first position that the firmware may be writing
3662306a36Sopenharmony_ci * to -- the driver can read up to (but not including) this position and get
3762306a36Sopenharmony_ci * good data.
3862306a36Sopenharmony_ci * The READ index is managed by the firmware once the card is enabled.
3962306a36Sopenharmony_ci *
4062306a36Sopenharmony_ci * The WRITE index maps to the last position the driver has read from -- the
4162306a36Sopenharmony_ci * position preceding WRITE is the last slot the firmware can place a packet.
4262306a36Sopenharmony_ci *
4362306a36Sopenharmony_ci * The queue is empty (no good data) if WRITE = READ - 1, and is full if
4462306a36Sopenharmony_ci * WRITE = READ.
4562306a36Sopenharmony_ci *
4662306a36Sopenharmony_ci * During initialization, the host sets up the READ queue position to the first
4762306a36Sopenharmony_ci * INDEX position, and WRITE to the last (READ - 1 wrapped)
4862306a36Sopenharmony_ci *
4962306a36Sopenharmony_ci * When the firmware places a packet in a buffer, it will advance the READ index
5062306a36Sopenharmony_ci * and fire the RX interrupt.  The driver can then query the READ index and
5162306a36Sopenharmony_ci * process as many packets as possible, moving the WRITE index forward as it
5262306a36Sopenharmony_ci * resets the Rx queue buffers with new memory.
5362306a36Sopenharmony_ci *
5462306a36Sopenharmony_ci * The management in the driver is as follows:
5562306a36Sopenharmony_ci * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
5662306a36Sopenharmony_ci *   When the interrupt handler is called, the request is processed.
5762306a36Sopenharmony_ci *   The page is either stolen - transferred to the upper layer
5862306a36Sopenharmony_ci *   or reused - added immediately to the iwl->rxq->rx_free list.
5962306a36Sopenharmony_ci * + When the page is stolen - the driver updates the matching queue's used
6062306a36Sopenharmony_ci *   count, detaches the RBD and transfers it to the queue used list.
6162306a36Sopenharmony_ci *   When there are two used RBDs - they are transferred to the allocator empty
6262306a36Sopenharmony_ci *   list. Work is then scheduled for the allocator to start allocating
6362306a36Sopenharmony_ci *   eight buffers.
6462306a36Sopenharmony_ci *   When there are another 6 used RBDs - they are transferred to the allocator
6562306a36Sopenharmony_ci *   empty list and the driver tries to claim the pre-allocated buffers and
6662306a36Sopenharmony_ci *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
6762306a36Sopenharmony_ci *   until ready.
6862306a36Sopenharmony_ci *   When there are 8+ buffers in the free list - either from allocation or from
6962306a36Sopenharmony_ci *   8 reused unstolen pages - restock is called to update the FW and indexes.
7062306a36Sopenharmony_ci * + In order to make sure the allocator always has RBDs to use for allocation
7162306a36Sopenharmony_ci *   the allocator has initial pool in the size of num_queues*(8-2) - the
7262306a36Sopenharmony_ci *   maximum missing RBDs per allocation request (request posted with 2
7362306a36Sopenharmony_ci *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
7462306a36Sopenharmony_ci *   The queues supplies the recycle of the rest of the RBDs.
7562306a36Sopenharmony_ci * + A received packet is processed and handed to the kernel network stack,
7662306a36Sopenharmony_ci *   detached from the iwl->rxq.  The driver 'processed' index is updated.
7762306a36Sopenharmony_ci * + If there are no allocated buffers in iwl->rxq->rx_free,
7862306a36Sopenharmony_ci *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
7962306a36Sopenharmony_ci *   If there were enough free buffers and RX_STALLED is set it is cleared.
8062306a36Sopenharmony_ci *
8162306a36Sopenharmony_ci *
8262306a36Sopenharmony_ci * Driver sequence:
8362306a36Sopenharmony_ci *
8462306a36Sopenharmony_ci * iwl_rxq_alloc()            Allocates rx_free
8562306a36Sopenharmony_ci * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
8662306a36Sopenharmony_ci *                            iwl_pcie_rxq_restock.
8762306a36Sopenharmony_ci *                            Used only during initialization.
8862306a36Sopenharmony_ci * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
8962306a36Sopenharmony_ci *                            queue, updates firmware pointers, and updates
9062306a36Sopenharmony_ci *                            the WRITE index.
9162306a36Sopenharmony_ci * iwl_pcie_rx_allocator()     Background work for allocating pages.
9262306a36Sopenharmony_ci *
9362306a36Sopenharmony_ci * -- enable interrupts --
9462306a36Sopenharmony_ci * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
9562306a36Sopenharmony_ci *                            READ INDEX, detaching the SKB from the pool.
9662306a36Sopenharmony_ci *                            Moves the packet buffer from queue to rx_used.
9762306a36Sopenharmony_ci *                            Posts and claims requests to the allocator.
9862306a36Sopenharmony_ci *                            Calls iwl_pcie_rxq_restock to refill any empty
9962306a36Sopenharmony_ci *                            slots.
10062306a36Sopenharmony_ci *
10162306a36Sopenharmony_ci * RBD life-cycle:
10262306a36Sopenharmony_ci *
10362306a36Sopenharmony_ci * Init:
10462306a36Sopenharmony_ci * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
10562306a36Sopenharmony_ci *
10662306a36Sopenharmony_ci * Regular Receive interrupt:
10762306a36Sopenharmony_ci * Page Stolen:
10862306a36Sopenharmony_ci * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
10962306a36Sopenharmony_ci * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
11062306a36Sopenharmony_ci * Page not Stolen:
11162306a36Sopenharmony_ci * rxq.queue -> rxq.rx_free -> rxq.queue
11262306a36Sopenharmony_ci * ...
11362306a36Sopenharmony_ci *
11462306a36Sopenharmony_ci */
11562306a36Sopenharmony_ci
11662306a36Sopenharmony_ci/*
11762306a36Sopenharmony_ci * iwl_rxq_space - Return number of free slots available in queue.
11862306a36Sopenharmony_ci */
11962306a36Sopenharmony_cistatic int iwl_rxq_space(const struct iwl_rxq *rxq)
12062306a36Sopenharmony_ci{
12162306a36Sopenharmony_ci	/* Make sure rx queue size is a power of 2 */
12262306a36Sopenharmony_ci	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_ci	/*
12562306a36Sopenharmony_ci	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
12662306a36Sopenharmony_ci	 * between empty and completely full queues.
12762306a36Sopenharmony_ci	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
12862306a36Sopenharmony_ci	 * defined for negative dividends.
12962306a36Sopenharmony_ci	 */
13062306a36Sopenharmony_ci	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
13162306a36Sopenharmony_ci}
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci/*
13462306a36Sopenharmony_ci * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
13562306a36Sopenharmony_ci */
13662306a36Sopenharmony_cistatic inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
13762306a36Sopenharmony_ci{
13862306a36Sopenharmony_ci	return cpu_to_le32((u32)(dma_addr >> 8));
13962306a36Sopenharmony_ci}
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci/*
14262306a36Sopenharmony_ci * iwl_pcie_rx_stop - stops the Rx DMA
14362306a36Sopenharmony_ci */
14462306a36Sopenharmony_ciint iwl_pcie_rx_stop(struct iwl_trans *trans)
14562306a36Sopenharmony_ci{
14662306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
14762306a36Sopenharmony_ci		/* TODO: remove this once fw does it */
14862306a36Sopenharmony_ci		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
14962306a36Sopenharmony_ci		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
15062306a36Sopenharmony_ci					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
15162306a36Sopenharmony_ci	} else if (trans->trans_cfg->mq_rx_supported) {
15262306a36Sopenharmony_ci		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
15362306a36Sopenharmony_ci		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
15462306a36Sopenharmony_ci					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
15562306a36Sopenharmony_ci	} else {
15662306a36Sopenharmony_ci		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
15762306a36Sopenharmony_ci		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
15862306a36Sopenharmony_ci					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
15962306a36Sopenharmony_ci					   1000);
16062306a36Sopenharmony_ci	}
16162306a36Sopenharmony_ci}
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci/*
16462306a36Sopenharmony_ci * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
16562306a36Sopenharmony_ci */
16662306a36Sopenharmony_cistatic void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
16762306a36Sopenharmony_ci				    struct iwl_rxq *rxq)
16862306a36Sopenharmony_ci{
16962306a36Sopenharmony_ci	u32 reg;
17062306a36Sopenharmony_ci
17162306a36Sopenharmony_ci	lockdep_assert_held(&rxq->lock);
17262306a36Sopenharmony_ci
17362306a36Sopenharmony_ci	/*
17462306a36Sopenharmony_ci	 * explicitly wake up the NIC if:
17562306a36Sopenharmony_ci	 * 1. shadow registers aren't enabled
17662306a36Sopenharmony_ci	 * 2. there is a chance that the NIC is asleep
17762306a36Sopenharmony_ci	 */
17862306a36Sopenharmony_ci	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
17962306a36Sopenharmony_ci	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
18062306a36Sopenharmony_ci		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
18362306a36Sopenharmony_ci			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
18462306a36Sopenharmony_ci				       reg);
18562306a36Sopenharmony_ci			iwl_set_bit(trans, CSR_GP_CNTRL,
18662306a36Sopenharmony_ci				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
18762306a36Sopenharmony_ci			rxq->need_update = true;
18862306a36Sopenharmony_ci			return;
18962306a36Sopenharmony_ci		}
19062306a36Sopenharmony_ci	}
19162306a36Sopenharmony_ci
19262306a36Sopenharmony_ci	rxq->write_actual = round_down(rxq->write, 8);
19362306a36Sopenharmony_ci	if (!trans->trans_cfg->mq_rx_supported)
19462306a36Sopenharmony_ci		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
19562306a36Sopenharmony_ci	else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
19662306a36Sopenharmony_ci		iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
19762306a36Sopenharmony_ci			    HBUS_TARG_WRPTR_RX_Q(rxq->id));
19862306a36Sopenharmony_ci	else
19962306a36Sopenharmony_ci		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
20062306a36Sopenharmony_ci			    rxq->write_actual);
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_cistatic void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
20462306a36Sopenharmony_ci{
20562306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
20662306a36Sopenharmony_ci	int i;
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
20962306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
21062306a36Sopenharmony_ci
21162306a36Sopenharmony_ci		if (!rxq->need_update)
21262306a36Sopenharmony_ci			continue;
21362306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
21462306a36Sopenharmony_ci		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
21562306a36Sopenharmony_ci		rxq->need_update = false;
21662306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
21762306a36Sopenharmony_ci	}
21862306a36Sopenharmony_ci}
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_cistatic void iwl_pcie_restock_bd(struct iwl_trans *trans,
22162306a36Sopenharmony_ci				struct iwl_rxq *rxq,
22262306a36Sopenharmony_ci				struct iwl_rx_mem_buffer *rxb)
22362306a36Sopenharmony_ci{
22462306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
22562306a36Sopenharmony_ci		struct iwl_rx_transfer_desc *bd = rxq->bd;
22662306a36Sopenharmony_ci
22762306a36Sopenharmony_ci		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
22862306a36Sopenharmony_ci
22962306a36Sopenharmony_ci		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
23062306a36Sopenharmony_ci		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
23162306a36Sopenharmony_ci	} else {
23262306a36Sopenharmony_ci		__le64 *bd = rxq->bd;
23362306a36Sopenharmony_ci
23462306a36Sopenharmony_ci		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
23562306a36Sopenharmony_ci	}
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
23862306a36Sopenharmony_ci		     (u32)rxb->vid, rxq->id, rxq->write);
23962306a36Sopenharmony_ci}
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_ci/*
24262306a36Sopenharmony_ci * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
24362306a36Sopenharmony_ci */
24462306a36Sopenharmony_cistatic void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
24562306a36Sopenharmony_ci				  struct iwl_rxq *rxq)
24662306a36Sopenharmony_ci{
24762306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
24862306a36Sopenharmony_ci	struct iwl_rx_mem_buffer *rxb;
24962306a36Sopenharmony_ci
25062306a36Sopenharmony_ci	/*
25162306a36Sopenharmony_ci	 * If the device isn't enabled - no need to try to add buffers...
25262306a36Sopenharmony_ci	 * This can happen when we stop the device and still have an interrupt
25362306a36Sopenharmony_ci	 * pending. We stop the APM before we sync the interrupts because we
25462306a36Sopenharmony_ci	 * have to (see comment there). On the other hand, since the APM is
25562306a36Sopenharmony_ci	 * stopped, we cannot access the HW (in particular not prph).
25662306a36Sopenharmony_ci	 * So don't try to restock if the APM has been already stopped.
25762306a36Sopenharmony_ci	 */
25862306a36Sopenharmony_ci	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
25962306a36Sopenharmony_ci		return;
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ci	spin_lock_bh(&rxq->lock);
26262306a36Sopenharmony_ci	while (rxq->free_count) {
26362306a36Sopenharmony_ci		/* Get next free Rx buffer, remove from free list */
26462306a36Sopenharmony_ci		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
26562306a36Sopenharmony_ci				       list);
26662306a36Sopenharmony_ci		list_del(&rxb->list);
26762306a36Sopenharmony_ci		rxb->invalid = false;
26862306a36Sopenharmony_ci		/* some low bits are expected to be unset (depending on hw) */
26962306a36Sopenharmony_ci		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
27062306a36Sopenharmony_ci		/* Point to Rx buffer via next RBD in circular buffer */
27162306a36Sopenharmony_ci		iwl_pcie_restock_bd(trans, rxq, rxb);
27262306a36Sopenharmony_ci		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
27362306a36Sopenharmony_ci		rxq->free_count--;
27462306a36Sopenharmony_ci	}
27562306a36Sopenharmony_ci	spin_unlock_bh(&rxq->lock);
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_ci	/*
27862306a36Sopenharmony_ci	 * If we've added more space for the firmware to place data, tell it.
27962306a36Sopenharmony_ci	 * Increment device's write pointer in multiples of 8.
28062306a36Sopenharmony_ci	 */
28162306a36Sopenharmony_ci	if (rxq->write_actual != (rxq->write & ~0x7)) {
28262306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
28362306a36Sopenharmony_ci		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
28462306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
28562306a36Sopenharmony_ci	}
28662306a36Sopenharmony_ci}
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci/*
28962306a36Sopenharmony_ci * iwl_pcie_rxsq_restock - restock implementation for single queue rx
29062306a36Sopenharmony_ci */
29162306a36Sopenharmony_cistatic void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
29262306a36Sopenharmony_ci				  struct iwl_rxq *rxq)
29362306a36Sopenharmony_ci{
29462306a36Sopenharmony_ci	struct iwl_rx_mem_buffer *rxb;
29562306a36Sopenharmony_ci
29662306a36Sopenharmony_ci	/*
29762306a36Sopenharmony_ci	 * If the device isn't enabled - not need to try to add buffers...
29862306a36Sopenharmony_ci	 * This can happen when we stop the device and still have an interrupt
29962306a36Sopenharmony_ci	 * pending. We stop the APM before we sync the interrupts because we
30062306a36Sopenharmony_ci	 * have to (see comment there). On the other hand, since the APM is
30162306a36Sopenharmony_ci	 * stopped, we cannot access the HW (in particular not prph).
30262306a36Sopenharmony_ci	 * So don't try to restock if the APM has been already stopped.
30362306a36Sopenharmony_ci	 */
30462306a36Sopenharmony_ci	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
30562306a36Sopenharmony_ci		return;
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ci	spin_lock_bh(&rxq->lock);
30862306a36Sopenharmony_ci	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
30962306a36Sopenharmony_ci		__le32 *bd = (__le32 *)rxq->bd;
31062306a36Sopenharmony_ci		/* The overwritten rxb must be a used one */
31162306a36Sopenharmony_ci		rxb = rxq->queue[rxq->write];
31262306a36Sopenharmony_ci		BUG_ON(rxb && rxb->page);
31362306a36Sopenharmony_ci
31462306a36Sopenharmony_ci		/* Get next free Rx buffer, remove from free list */
31562306a36Sopenharmony_ci		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
31662306a36Sopenharmony_ci				       list);
31762306a36Sopenharmony_ci		list_del(&rxb->list);
31862306a36Sopenharmony_ci		rxb->invalid = false;
31962306a36Sopenharmony_ci
32062306a36Sopenharmony_ci		/* Point to Rx buffer via next RBD in circular buffer */
32162306a36Sopenharmony_ci		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
32262306a36Sopenharmony_ci		rxq->queue[rxq->write] = rxb;
32362306a36Sopenharmony_ci		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
32462306a36Sopenharmony_ci		rxq->free_count--;
32562306a36Sopenharmony_ci	}
32662306a36Sopenharmony_ci	spin_unlock_bh(&rxq->lock);
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci	/* If we've added more space for the firmware to place data, tell it.
32962306a36Sopenharmony_ci	 * Increment device's write pointer in multiples of 8. */
33062306a36Sopenharmony_ci	if (rxq->write_actual != (rxq->write & ~0x7)) {
33162306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
33262306a36Sopenharmony_ci		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
33362306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
33462306a36Sopenharmony_ci	}
33562306a36Sopenharmony_ci}
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci/*
33862306a36Sopenharmony_ci * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
33962306a36Sopenharmony_ci *
34062306a36Sopenharmony_ci * If there are slots in the RX queue that need to be restocked,
34162306a36Sopenharmony_ci * and we have free pre-allocated buffers, fill the ranks as much
34262306a36Sopenharmony_ci * as we can, pulling from rx_free.
34362306a36Sopenharmony_ci *
34462306a36Sopenharmony_ci * This moves the 'write' index forward to catch up with 'processed', and
34562306a36Sopenharmony_ci * also updates the memory address in the firmware to reference the new
34662306a36Sopenharmony_ci * target buffer.
34762306a36Sopenharmony_ci */
34862306a36Sopenharmony_cistatic
34962306a36Sopenharmony_civoid iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
35062306a36Sopenharmony_ci{
35162306a36Sopenharmony_ci	if (trans->trans_cfg->mq_rx_supported)
35262306a36Sopenharmony_ci		iwl_pcie_rxmq_restock(trans, rxq);
35362306a36Sopenharmony_ci	else
35462306a36Sopenharmony_ci		iwl_pcie_rxsq_restock(trans, rxq);
35562306a36Sopenharmony_ci}
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci/*
35862306a36Sopenharmony_ci * iwl_pcie_rx_alloc_page - allocates and returns a page.
35962306a36Sopenharmony_ci *
36062306a36Sopenharmony_ci */
36162306a36Sopenharmony_cistatic struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
36262306a36Sopenharmony_ci					   u32 *offset, gfp_t priority)
36362306a36Sopenharmony_ci{
36462306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
36562306a36Sopenharmony_ci	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
36662306a36Sopenharmony_ci	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
36762306a36Sopenharmony_ci	struct page *page;
36862306a36Sopenharmony_ci	gfp_t gfp_mask = priority;
36962306a36Sopenharmony_ci
37062306a36Sopenharmony_ci	if (trans_pcie->rx_page_order > 0)
37162306a36Sopenharmony_ci		gfp_mask |= __GFP_COMP;
37262306a36Sopenharmony_ci
37362306a36Sopenharmony_ci	if (trans_pcie->alloc_page) {
37462306a36Sopenharmony_ci		spin_lock_bh(&trans_pcie->alloc_page_lock);
37562306a36Sopenharmony_ci		/* recheck */
37662306a36Sopenharmony_ci		if (trans_pcie->alloc_page) {
37762306a36Sopenharmony_ci			*offset = trans_pcie->alloc_page_used;
37862306a36Sopenharmony_ci			page = trans_pcie->alloc_page;
37962306a36Sopenharmony_ci			trans_pcie->alloc_page_used += rbsize;
38062306a36Sopenharmony_ci			if (trans_pcie->alloc_page_used >= allocsize)
38162306a36Sopenharmony_ci				trans_pcie->alloc_page = NULL;
38262306a36Sopenharmony_ci			else
38362306a36Sopenharmony_ci				get_page(page);
38462306a36Sopenharmony_ci			spin_unlock_bh(&trans_pcie->alloc_page_lock);
38562306a36Sopenharmony_ci			return page;
38662306a36Sopenharmony_ci		}
38762306a36Sopenharmony_ci		spin_unlock_bh(&trans_pcie->alloc_page_lock);
38862306a36Sopenharmony_ci	}
38962306a36Sopenharmony_ci
39062306a36Sopenharmony_ci	/* Alloc a new receive buffer */
39162306a36Sopenharmony_ci	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
39262306a36Sopenharmony_ci	if (!page) {
39362306a36Sopenharmony_ci		if (net_ratelimit())
39462306a36Sopenharmony_ci			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
39562306a36Sopenharmony_ci				       trans_pcie->rx_page_order);
39662306a36Sopenharmony_ci		/*
39762306a36Sopenharmony_ci		 * Issue an error if we don't have enough pre-allocated
39862306a36Sopenharmony_ci		  * buffers.
39962306a36Sopenharmony_ci		 */
40062306a36Sopenharmony_ci		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
40162306a36Sopenharmony_ci			IWL_CRIT(trans,
40262306a36Sopenharmony_ci				 "Failed to alloc_pages\n");
40362306a36Sopenharmony_ci		return NULL;
40462306a36Sopenharmony_ci	}
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci	if (2 * rbsize <= allocsize) {
40762306a36Sopenharmony_ci		spin_lock_bh(&trans_pcie->alloc_page_lock);
40862306a36Sopenharmony_ci		if (!trans_pcie->alloc_page) {
40962306a36Sopenharmony_ci			get_page(page);
41062306a36Sopenharmony_ci			trans_pcie->alloc_page = page;
41162306a36Sopenharmony_ci			trans_pcie->alloc_page_used = rbsize;
41262306a36Sopenharmony_ci		}
41362306a36Sopenharmony_ci		spin_unlock_bh(&trans_pcie->alloc_page_lock);
41462306a36Sopenharmony_ci	}
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	*offset = 0;
41762306a36Sopenharmony_ci	return page;
41862306a36Sopenharmony_ci}
41962306a36Sopenharmony_ci
42062306a36Sopenharmony_ci/*
42162306a36Sopenharmony_ci * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
42262306a36Sopenharmony_ci *
42362306a36Sopenharmony_ci * A used RBD is an Rx buffer that has been given to the stack. To use it again
42462306a36Sopenharmony_ci * a page must be allocated and the RBD must point to the page. This function
42562306a36Sopenharmony_ci * doesn't change the HW pointer but handles the list of pages that is used by
42662306a36Sopenharmony_ci * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
42762306a36Sopenharmony_ci * allocated buffers.
42862306a36Sopenharmony_ci */
42962306a36Sopenharmony_civoid iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
43062306a36Sopenharmony_ci			    struct iwl_rxq *rxq)
43162306a36Sopenharmony_ci{
43262306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
43362306a36Sopenharmony_ci	struct iwl_rx_mem_buffer *rxb;
43462306a36Sopenharmony_ci	struct page *page;
43562306a36Sopenharmony_ci
43662306a36Sopenharmony_ci	while (1) {
43762306a36Sopenharmony_ci		unsigned int offset;
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
44062306a36Sopenharmony_ci		if (list_empty(&rxq->rx_used)) {
44162306a36Sopenharmony_ci			spin_unlock_bh(&rxq->lock);
44262306a36Sopenharmony_ci			return;
44362306a36Sopenharmony_ci		}
44462306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
44562306a36Sopenharmony_ci
44662306a36Sopenharmony_ci		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
44762306a36Sopenharmony_ci		if (!page)
44862306a36Sopenharmony_ci			return;
44962306a36Sopenharmony_ci
45062306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
45162306a36Sopenharmony_ci
45262306a36Sopenharmony_ci		if (list_empty(&rxq->rx_used)) {
45362306a36Sopenharmony_ci			spin_unlock_bh(&rxq->lock);
45462306a36Sopenharmony_ci			__free_pages(page, trans_pcie->rx_page_order);
45562306a36Sopenharmony_ci			return;
45662306a36Sopenharmony_ci		}
45762306a36Sopenharmony_ci		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
45862306a36Sopenharmony_ci				       list);
45962306a36Sopenharmony_ci		list_del(&rxb->list);
46062306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
46162306a36Sopenharmony_ci
46262306a36Sopenharmony_ci		BUG_ON(rxb->page);
46362306a36Sopenharmony_ci		rxb->page = page;
46462306a36Sopenharmony_ci		rxb->offset = offset;
46562306a36Sopenharmony_ci		/* Get physical address of the RB */
46662306a36Sopenharmony_ci		rxb->page_dma =
46762306a36Sopenharmony_ci			dma_map_page(trans->dev, page, rxb->offset,
46862306a36Sopenharmony_ci				     trans_pcie->rx_buf_bytes,
46962306a36Sopenharmony_ci				     DMA_FROM_DEVICE);
47062306a36Sopenharmony_ci		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
47162306a36Sopenharmony_ci			rxb->page = NULL;
47262306a36Sopenharmony_ci			spin_lock_bh(&rxq->lock);
47362306a36Sopenharmony_ci			list_add(&rxb->list, &rxq->rx_used);
47462306a36Sopenharmony_ci			spin_unlock_bh(&rxq->lock);
47562306a36Sopenharmony_ci			__free_pages(page, trans_pcie->rx_page_order);
47662306a36Sopenharmony_ci			return;
47762306a36Sopenharmony_ci		}
47862306a36Sopenharmony_ci
47962306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
48062306a36Sopenharmony_ci
48162306a36Sopenharmony_ci		list_add_tail(&rxb->list, &rxq->rx_free);
48262306a36Sopenharmony_ci		rxq->free_count++;
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
48562306a36Sopenharmony_ci	}
48662306a36Sopenharmony_ci}
48762306a36Sopenharmony_ci
48862306a36Sopenharmony_civoid iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
48962306a36Sopenharmony_ci{
49062306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
49162306a36Sopenharmony_ci	int i;
49262306a36Sopenharmony_ci
49362306a36Sopenharmony_ci	if (!trans_pcie->rx_pool)
49462306a36Sopenharmony_ci		return;
49562306a36Sopenharmony_ci
49662306a36Sopenharmony_ci	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
49762306a36Sopenharmony_ci		if (!trans_pcie->rx_pool[i].page)
49862306a36Sopenharmony_ci			continue;
49962306a36Sopenharmony_ci		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
50062306a36Sopenharmony_ci			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
50162306a36Sopenharmony_ci		__free_pages(trans_pcie->rx_pool[i].page,
50262306a36Sopenharmony_ci			     trans_pcie->rx_page_order);
50362306a36Sopenharmony_ci		trans_pcie->rx_pool[i].page = NULL;
50462306a36Sopenharmony_ci	}
50562306a36Sopenharmony_ci}
50662306a36Sopenharmony_ci
50762306a36Sopenharmony_ci/*
50862306a36Sopenharmony_ci * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
50962306a36Sopenharmony_ci *
51062306a36Sopenharmony_ci * Allocates for each received request 8 pages
51162306a36Sopenharmony_ci * Called as a scheduled work item.
51262306a36Sopenharmony_ci */
51362306a36Sopenharmony_cistatic void iwl_pcie_rx_allocator(struct iwl_trans *trans)
51462306a36Sopenharmony_ci{
51562306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
51662306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
51762306a36Sopenharmony_ci	struct list_head local_empty;
51862306a36Sopenharmony_ci	int pending = atomic_read(&rba->req_pending);
51962306a36Sopenharmony_ci
52062306a36Sopenharmony_ci	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
52162306a36Sopenharmony_ci
52262306a36Sopenharmony_ci	/* If we were scheduled - there is at least one request */
52362306a36Sopenharmony_ci	spin_lock_bh(&rba->lock);
52462306a36Sopenharmony_ci	/* swap out the rba->rbd_empty to a local list */
52562306a36Sopenharmony_ci	list_replace_init(&rba->rbd_empty, &local_empty);
52662306a36Sopenharmony_ci	spin_unlock_bh(&rba->lock);
52762306a36Sopenharmony_ci
52862306a36Sopenharmony_ci	while (pending) {
52962306a36Sopenharmony_ci		int i;
53062306a36Sopenharmony_ci		LIST_HEAD(local_allocated);
53162306a36Sopenharmony_ci		gfp_t gfp_mask = GFP_KERNEL;
53262306a36Sopenharmony_ci
53362306a36Sopenharmony_ci		/* Do not post a warning if there are only a few requests */
53462306a36Sopenharmony_ci		if (pending < RX_PENDING_WATERMARK)
53562306a36Sopenharmony_ci			gfp_mask |= __GFP_NOWARN;
53662306a36Sopenharmony_ci
53762306a36Sopenharmony_ci		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
53862306a36Sopenharmony_ci			struct iwl_rx_mem_buffer *rxb;
53962306a36Sopenharmony_ci			struct page *page;
54062306a36Sopenharmony_ci
54162306a36Sopenharmony_ci			/* List should never be empty - each reused RBD is
54262306a36Sopenharmony_ci			 * returned to the list, and initial pool covers any
54362306a36Sopenharmony_ci			 * possible gap between the time the page is allocated
54462306a36Sopenharmony_ci			 * to the time the RBD is added.
54562306a36Sopenharmony_ci			 */
54662306a36Sopenharmony_ci			BUG_ON(list_empty(&local_empty));
54762306a36Sopenharmony_ci			/* Get the first rxb from the rbd list */
54862306a36Sopenharmony_ci			rxb = list_first_entry(&local_empty,
54962306a36Sopenharmony_ci					       struct iwl_rx_mem_buffer, list);
55062306a36Sopenharmony_ci			BUG_ON(rxb->page);
55162306a36Sopenharmony_ci
55262306a36Sopenharmony_ci			/* Alloc a new receive buffer */
55362306a36Sopenharmony_ci			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
55462306a36Sopenharmony_ci						      gfp_mask);
55562306a36Sopenharmony_ci			if (!page)
55662306a36Sopenharmony_ci				continue;
55762306a36Sopenharmony_ci			rxb->page = page;
55862306a36Sopenharmony_ci
55962306a36Sopenharmony_ci			/* Get physical address of the RB */
56062306a36Sopenharmony_ci			rxb->page_dma = dma_map_page(trans->dev, page,
56162306a36Sopenharmony_ci						     rxb->offset,
56262306a36Sopenharmony_ci						     trans_pcie->rx_buf_bytes,
56362306a36Sopenharmony_ci						     DMA_FROM_DEVICE);
56462306a36Sopenharmony_ci			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
56562306a36Sopenharmony_ci				rxb->page = NULL;
56662306a36Sopenharmony_ci				__free_pages(page, trans_pcie->rx_page_order);
56762306a36Sopenharmony_ci				continue;
56862306a36Sopenharmony_ci			}
56962306a36Sopenharmony_ci
57062306a36Sopenharmony_ci			/* move the allocated entry to the out list */
57162306a36Sopenharmony_ci			list_move(&rxb->list, &local_allocated);
57262306a36Sopenharmony_ci			i++;
57362306a36Sopenharmony_ci		}
57462306a36Sopenharmony_ci
57562306a36Sopenharmony_ci		atomic_dec(&rba->req_pending);
57662306a36Sopenharmony_ci		pending--;
57762306a36Sopenharmony_ci
57862306a36Sopenharmony_ci		if (!pending) {
57962306a36Sopenharmony_ci			pending = atomic_read(&rba->req_pending);
58062306a36Sopenharmony_ci			if (pending)
58162306a36Sopenharmony_ci				IWL_DEBUG_TPT(trans,
58262306a36Sopenharmony_ci					      "Got more pending allocation requests = %d\n",
58362306a36Sopenharmony_ci					      pending);
58462306a36Sopenharmony_ci		}
58562306a36Sopenharmony_ci
58662306a36Sopenharmony_ci		spin_lock_bh(&rba->lock);
58762306a36Sopenharmony_ci		/* add the allocated rbds to the allocator allocated list */
58862306a36Sopenharmony_ci		list_splice_tail(&local_allocated, &rba->rbd_allocated);
58962306a36Sopenharmony_ci		/* get more empty RBDs for current pending requests */
59062306a36Sopenharmony_ci		list_splice_tail_init(&rba->rbd_empty, &local_empty);
59162306a36Sopenharmony_ci		spin_unlock_bh(&rba->lock);
59262306a36Sopenharmony_ci
59362306a36Sopenharmony_ci		atomic_inc(&rba->req_ready);
59462306a36Sopenharmony_ci
59562306a36Sopenharmony_ci	}
59662306a36Sopenharmony_ci
59762306a36Sopenharmony_ci	spin_lock_bh(&rba->lock);
59862306a36Sopenharmony_ci	/* return unused rbds to the allocator empty list */
59962306a36Sopenharmony_ci	list_splice_tail(&local_empty, &rba->rbd_empty);
60062306a36Sopenharmony_ci	spin_unlock_bh(&rba->lock);
60162306a36Sopenharmony_ci
60262306a36Sopenharmony_ci	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
60362306a36Sopenharmony_ci}
60462306a36Sopenharmony_ci
60562306a36Sopenharmony_ci/*
60662306a36Sopenharmony_ci * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
60762306a36Sopenharmony_ci.*
60862306a36Sopenharmony_ci.* Called by queue when the queue posted allocation request and
60962306a36Sopenharmony_ci * has freed 8 RBDs in order to restock itself.
61062306a36Sopenharmony_ci * This function directly moves the allocated RBs to the queue's ownership
61162306a36Sopenharmony_ci * and updates the relevant counters.
61262306a36Sopenharmony_ci */
61362306a36Sopenharmony_cistatic void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
61462306a36Sopenharmony_ci				      struct iwl_rxq *rxq)
61562306a36Sopenharmony_ci{
61662306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
61762306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
61862306a36Sopenharmony_ci	int i;
61962306a36Sopenharmony_ci
62062306a36Sopenharmony_ci	lockdep_assert_held(&rxq->lock);
62162306a36Sopenharmony_ci
62262306a36Sopenharmony_ci	/*
62362306a36Sopenharmony_ci	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
62462306a36Sopenharmony_ci	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
62562306a36Sopenharmony_ci	 * function will return early, as there are no ready requests.
62662306a36Sopenharmony_ci	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
62762306a36Sopenharmony_ci	 * req_ready > 0, i.e. - there are ready requests and the function
62862306a36Sopenharmony_ci	 * hands one request to the caller.
62962306a36Sopenharmony_ci	 */
63062306a36Sopenharmony_ci	if (atomic_dec_if_positive(&rba->req_ready) < 0)
63162306a36Sopenharmony_ci		return;
63262306a36Sopenharmony_ci
63362306a36Sopenharmony_ci	spin_lock(&rba->lock);
63462306a36Sopenharmony_ci	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
63562306a36Sopenharmony_ci		/* Get next free Rx buffer, remove it from free list */
63662306a36Sopenharmony_ci		struct iwl_rx_mem_buffer *rxb =
63762306a36Sopenharmony_ci			list_first_entry(&rba->rbd_allocated,
63862306a36Sopenharmony_ci					 struct iwl_rx_mem_buffer, list);
63962306a36Sopenharmony_ci
64062306a36Sopenharmony_ci		list_move(&rxb->list, &rxq->rx_free);
64162306a36Sopenharmony_ci	}
64262306a36Sopenharmony_ci	spin_unlock(&rba->lock);
64362306a36Sopenharmony_ci
64462306a36Sopenharmony_ci	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
64562306a36Sopenharmony_ci	rxq->free_count += RX_CLAIM_REQ_ALLOC;
64662306a36Sopenharmony_ci}
64762306a36Sopenharmony_ci
64862306a36Sopenharmony_civoid iwl_pcie_rx_allocator_work(struct work_struct *data)
64962306a36Sopenharmony_ci{
65062306a36Sopenharmony_ci	struct iwl_rb_allocator *rba_p =
65162306a36Sopenharmony_ci		container_of(data, struct iwl_rb_allocator, rx_alloc);
65262306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie =
65362306a36Sopenharmony_ci		container_of(rba_p, struct iwl_trans_pcie, rba);
65462306a36Sopenharmony_ci
65562306a36Sopenharmony_ci	iwl_pcie_rx_allocator(trans_pcie->trans);
65662306a36Sopenharmony_ci}
65762306a36Sopenharmony_ci
65862306a36Sopenharmony_cistatic int iwl_pcie_free_bd_size(struct iwl_trans *trans)
65962306a36Sopenharmony_ci{
66062306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
66162306a36Sopenharmony_ci		return sizeof(struct iwl_rx_transfer_desc);
66262306a36Sopenharmony_ci
66362306a36Sopenharmony_ci	return trans->trans_cfg->mq_rx_supported ?
66462306a36Sopenharmony_ci			sizeof(__le64) : sizeof(__le32);
66562306a36Sopenharmony_ci}
66662306a36Sopenharmony_ci
66762306a36Sopenharmony_cistatic int iwl_pcie_used_bd_size(struct iwl_trans *trans)
66862306a36Sopenharmony_ci{
66962306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
67062306a36Sopenharmony_ci		return sizeof(struct iwl_rx_completion_desc_bz);
67162306a36Sopenharmony_ci
67262306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
67362306a36Sopenharmony_ci		return sizeof(struct iwl_rx_completion_desc);
67462306a36Sopenharmony_ci
67562306a36Sopenharmony_ci	return sizeof(__le32);
67662306a36Sopenharmony_ci}
67762306a36Sopenharmony_ci
67862306a36Sopenharmony_cistatic void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
67962306a36Sopenharmony_ci				  struct iwl_rxq *rxq)
68062306a36Sopenharmony_ci{
68162306a36Sopenharmony_ci	int free_size = iwl_pcie_free_bd_size(trans);
68262306a36Sopenharmony_ci
68362306a36Sopenharmony_ci	if (rxq->bd)
68462306a36Sopenharmony_ci		dma_free_coherent(trans->dev,
68562306a36Sopenharmony_ci				  free_size * rxq->queue_size,
68662306a36Sopenharmony_ci				  rxq->bd, rxq->bd_dma);
68762306a36Sopenharmony_ci	rxq->bd_dma = 0;
68862306a36Sopenharmony_ci	rxq->bd = NULL;
68962306a36Sopenharmony_ci
69062306a36Sopenharmony_ci	rxq->rb_stts_dma = 0;
69162306a36Sopenharmony_ci	rxq->rb_stts = NULL;
69262306a36Sopenharmony_ci
69362306a36Sopenharmony_ci	if (rxq->used_bd)
69462306a36Sopenharmony_ci		dma_free_coherent(trans->dev,
69562306a36Sopenharmony_ci				  iwl_pcie_used_bd_size(trans) *
69662306a36Sopenharmony_ci					rxq->queue_size,
69762306a36Sopenharmony_ci				  rxq->used_bd, rxq->used_bd_dma);
69862306a36Sopenharmony_ci	rxq->used_bd_dma = 0;
69962306a36Sopenharmony_ci	rxq->used_bd = NULL;
70062306a36Sopenharmony_ci}
70162306a36Sopenharmony_ci
70262306a36Sopenharmony_cistatic size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
70362306a36Sopenharmony_ci{
70462306a36Sopenharmony_ci	bool use_rx_td = (trans->trans_cfg->device_family >=
70562306a36Sopenharmony_ci			  IWL_DEVICE_FAMILY_AX210);
70662306a36Sopenharmony_ci
70762306a36Sopenharmony_ci	if (use_rx_td)
70862306a36Sopenharmony_ci		return sizeof(__le16);
70962306a36Sopenharmony_ci
71062306a36Sopenharmony_ci	return sizeof(struct iwl_rb_status);
71162306a36Sopenharmony_ci}
71262306a36Sopenharmony_ci
71362306a36Sopenharmony_cistatic int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
71462306a36Sopenharmony_ci				  struct iwl_rxq *rxq)
71562306a36Sopenharmony_ci{
71662306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
71762306a36Sopenharmony_ci	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
71862306a36Sopenharmony_ci	struct device *dev = trans->dev;
71962306a36Sopenharmony_ci	int i;
72062306a36Sopenharmony_ci	int free_size;
72162306a36Sopenharmony_ci
72262306a36Sopenharmony_ci	spin_lock_init(&rxq->lock);
72362306a36Sopenharmony_ci	if (trans->trans_cfg->mq_rx_supported)
72462306a36Sopenharmony_ci		rxq->queue_size = trans->cfg->num_rbds;
72562306a36Sopenharmony_ci	else
72662306a36Sopenharmony_ci		rxq->queue_size = RX_QUEUE_SIZE;
72762306a36Sopenharmony_ci
72862306a36Sopenharmony_ci	free_size = iwl_pcie_free_bd_size(trans);
72962306a36Sopenharmony_ci
73062306a36Sopenharmony_ci	/*
73162306a36Sopenharmony_ci	 * Allocate the circular buffer of Read Buffer Descriptors
73262306a36Sopenharmony_ci	 * (RBDs)
73362306a36Sopenharmony_ci	 */
73462306a36Sopenharmony_ci	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
73562306a36Sopenharmony_ci				     &rxq->bd_dma, GFP_KERNEL);
73662306a36Sopenharmony_ci	if (!rxq->bd)
73762306a36Sopenharmony_ci		goto err;
73862306a36Sopenharmony_ci
73962306a36Sopenharmony_ci	if (trans->trans_cfg->mq_rx_supported) {
74062306a36Sopenharmony_ci		rxq->used_bd = dma_alloc_coherent(dev,
74162306a36Sopenharmony_ci						  iwl_pcie_used_bd_size(trans) *
74262306a36Sopenharmony_ci							rxq->queue_size,
74362306a36Sopenharmony_ci						  &rxq->used_bd_dma,
74462306a36Sopenharmony_ci						  GFP_KERNEL);
74562306a36Sopenharmony_ci		if (!rxq->used_bd)
74662306a36Sopenharmony_ci			goto err;
74762306a36Sopenharmony_ci	}
74862306a36Sopenharmony_ci
74962306a36Sopenharmony_ci	rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
75062306a36Sopenharmony_ci	rxq->rb_stts_dma =
75162306a36Sopenharmony_ci		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
75262306a36Sopenharmony_ci
75362306a36Sopenharmony_ci	return 0;
75462306a36Sopenharmony_ci
75562306a36Sopenharmony_cierr:
75662306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
75762306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
75862306a36Sopenharmony_ci
75962306a36Sopenharmony_ci		iwl_pcie_free_rxq_dma(trans, rxq);
76062306a36Sopenharmony_ci	}
76162306a36Sopenharmony_ci
76262306a36Sopenharmony_ci	return -ENOMEM;
76362306a36Sopenharmony_ci}
76462306a36Sopenharmony_ci
76562306a36Sopenharmony_cistatic int iwl_pcie_rx_alloc(struct iwl_trans *trans)
76662306a36Sopenharmony_ci{
76762306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
76862306a36Sopenharmony_ci	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
76962306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
77062306a36Sopenharmony_ci	int i, ret;
77162306a36Sopenharmony_ci
77262306a36Sopenharmony_ci	if (WARN_ON(trans_pcie->rxq))
77362306a36Sopenharmony_ci		return -EINVAL;
77462306a36Sopenharmony_ci
77562306a36Sopenharmony_ci	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
77662306a36Sopenharmony_ci				  GFP_KERNEL);
77762306a36Sopenharmony_ci	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
77862306a36Sopenharmony_ci				      sizeof(trans_pcie->rx_pool[0]),
77962306a36Sopenharmony_ci				      GFP_KERNEL);
78062306a36Sopenharmony_ci	trans_pcie->global_table =
78162306a36Sopenharmony_ci		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
78262306a36Sopenharmony_ci			sizeof(trans_pcie->global_table[0]),
78362306a36Sopenharmony_ci			GFP_KERNEL);
78462306a36Sopenharmony_ci	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
78562306a36Sopenharmony_ci	    !trans_pcie->global_table) {
78662306a36Sopenharmony_ci		ret = -ENOMEM;
78762306a36Sopenharmony_ci		goto err;
78862306a36Sopenharmony_ci	}
78962306a36Sopenharmony_ci
79062306a36Sopenharmony_ci	spin_lock_init(&rba->lock);
79162306a36Sopenharmony_ci
79262306a36Sopenharmony_ci	/*
79362306a36Sopenharmony_ci	 * Allocate the driver's pointer to receive buffer status.
79462306a36Sopenharmony_ci	 * Allocate for all queues continuously (HW requirement).
79562306a36Sopenharmony_ci	 */
79662306a36Sopenharmony_ci	trans_pcie->base_rb_stts =
79762306a36Sopenharmony_ci			dma_alloc_coherent(trans->dev,
79862306a36Sopenharmony_ci					   rb_stts_size * trans->num_rx_queues,
79962306a36Sopenharmony_ci					   &trans_pcie->base_rb_stts_dma,
80062306a36Sopenharmony_ci					   GFP_KERNEL);
80162306a36Sopenharmony_ci	if (!trans_pcie->base_rb_stts) {
80262306a36Sopenharmony_ci		ret = -ENOMEM;
80362306a36Sopenharmony_ci		goto err;
80462306a36Sopenharmony_ci	}
80562306a36Sopenharmony_ci
80662306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
80762306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
80862306a36Sopenharmony_ci
80962306a36Sopenharmony_ci		rxq->id = i;
81062306a36Sopenharmony_ci		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
81162306a36Sopenharmony_ci		if (ret)
81262306a36Sopenharmony_ci			goto err;
81362306a36Sopenharmony_ci	}
81462306a36Sopenharmony_ci	return 0;
81562306a36Sopenharmony_ci
81662306a36Sopenharmony_cierr:
81762306a36Sopenharmony_ci	if (trans_pcie->base_rb_stts) {
81862306a36Sopenharmony_ci		dma_free_coherent(trans->dev,
81962306a36Sopenharmony_ci				  rb_stts_size * trans->num_rx_queues,
82062306a36Sopenharmony_ci				  trans_pcie->base_rb_stts,
82162306a36Sopenharmony_ci				  trans_pcie->base_rb_stts_dma);
82262306a36Sopenharmony_ci		trans_pcie->base_rb_stts = NULL;
82362306a36Sopenharmony_ci		trans_pcie->base_rb_stts_dma = 0;
82462306a36Sopenharmony_ci	}
82562306a36Sopenharmony_ci	kfree(trans_pcie->rx_pool);
82662306a36Sopenharmony_ci	trans_pcie->rx_pool = NULL;
82762306a36Sopenharmony_ci	kfree(trans_pcie->global_table);
82862306a36Sopenharmony_ci	trans_pcie->global_table = NULL;
82962306a36Sopenharmony_ci	kfree(trans_pcie->rxq);
83062306a36Sopenharmony_ci	trans_pcie->rxq = NULL;
83162306a36Sopenharmony_ci
83262306a36Sopenharmony_ci	return ret;
83362306a36Sopenharmony_ci}
83462306a36Sopenharmony_ci
83562306a36Sopenharmony_cistatic void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
83662306a36Sopenharmony_ci{
83762306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
83862306a36Sopenharmony_ci	u32 rb_size;
83962306a36Sopenharmony_ci	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
84062306a36Sopenharmony_ci
84162306a36Sopenharmony_ci	switch (trans_pcie->rx_buf_size) {
84262306a36Sopenharmony_ci	case IWL_AMSDU_4K:
84362306a36Sopenharmony_ci		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
84462306a36Sopenharmony_ci		break;
84562306a36Sopenharmony_ci	case IWL_AMSDU_8K:
84662306a36Sopenharmony_ci		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
84762306a36Sopenharmony_ci		break;
84862306a36Sopenharmony_ci	case IWL_AMSDU_12K:
84962306a36Sopenharmony_ci		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
85062306a36Sopenharmony_ci		break;
85162306a36Sopenharmony_ci	default:
85262306a36Sopenharmony_ci		WARN_ON(1);
85362306a36Sopenharmony_ci		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
85462306a36Sopenharmony_ci	}
85562306a36Sopenharmony_ci
85662306a36Sopenharmony_ci	if (!iwl_trans_grab_nic_access(trans))
85762306a36Sopenharmony_ci		return;
85862306a36Sopenharmony_ci
85962306a36Sopenharmony_ci	/* Stop Rx DMA */
86062306a36Sopenharmony_ci	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
86162306a36Sopenharmony_ci	/* reset and flush pointers */
86262306a36Sopenharmony_ci	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
86362306a36Sopenharmony_ci	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
86462306a36Sopenharmony_ci	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
86562306a36Sopenharmony_ci
86662306a36Sopenharmony_ci	/* Reset driver's Rx queue write index */
86762306a36Sopenharmony_ci	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
86862306a36Sopenharmony_ci
86962306a36Sopenharmony_ci	/* Tell device where to find RBD circular buffer in DRAM */
87062306a36Sopenharmony_ci	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
87162306a36Sopenharmony_ci		    (u32)(rxq->bd_dma >> 8));
87262306a36Sopenharmony_ci
87362306a36Sopenharmony_ci	/* Tell device where in DRAM to update its Rx status */
87462306a36Sopenharmony_ci	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
87562306a36Sopenharmony_ci		    rxq->rb_stts_dma >> 4);
87662306a36Sopenharmony_ci
87762306a36Sopenharmony_ci	/* Enable Rx DMA
87862306a36Sopenharmony_ci	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
87962306a36Sopenharmony_ci	 *      the credit mechanism in 5000 HW RX FIFO
88062306a36Sopenharmony_ci	 * Direct rx interrupts to hosts
88162306a36Sopenharmony_ci	 * Rx buffer size 4 or 8k or 12k
88262306a36Sopenharmony_ci	 * RB timeout 0x10
88362306a36Sopenharmony_ci	 * 256 RBDs
88462306a36Sopenharmony_ci	 */
88562306a36Sopenharmony_ci	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
88662306a36Sopenharmony_ci		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
88762306a36Sopenharmony_ci		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
88862306a36Sopenharmony_ci		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
88962306a36Sopenharmony_ci		    rb_size |
89062306a36Sopenharmony_ci		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
89162306a36Sopenharmony_ci		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
89262306a36Sopenharmony_ci
89362306a36Sopenharmony_ci	iwl_trans_release_nic_access(trans);
89462306a36Sopenharmony_ci
89562306a36Sopenharmony_ci	/* Set interrupt coalescing timer to default (2048 usecs) */
89662306a36Sopenharmony_ci	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
89762306a36Sopenharmony_ci
89862306a36Sopenharmony_ci	/* W/A for interrupt coalescing bug in 7260 and 3160 */
89962306a36Sopenharmony_ci	if (trans->cfg->host_interrupt_operation_mode)
90062306a36Sopenharmony_ci		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
90162306a36Sopenharmony_ci}
90262306a36Sopenharmony_ci
90362306a36Sopenharmony_cistatic void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
90462306a36Sopenharmony_ci{
90562306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
90662306a36Sopenharmony_ci	u32 rb_size, enabled = 0;
90762306a36Sopenharmony_ci	int i;
90862306a36Sopenharmony_ci
90962306a36Sopenharmony_ci	switch (trans_pcie->rx_buf_size) {
91062306a36Sopenharmony_ci	case IWL_AMSDU_2K:
91162306a36Sopenharmony_ci		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
91262306a36Sopenharmony_ci		break;
91362306a36Sopenharmony_ci	case IWL_AMSDU_4K:
91462306a36Sopenharmony_ci		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
91562306a36Sopenharmony_ci		break;
91662306a36Sopenharmony_ci	case IWL_AMSDU_8K:
91762306a36Sopenharmony_ci		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
91862306a36Sopenharmony_ci		break;
91962306a36Sopenharmony_ci	case IWL_AMSDU_12K:
92062306a36Sopenharmony_ci		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
92162306a36Sopenharmony_ci		break;
92262306a36Sopenharmony_ci	default:
92362306a36Sopenharmony_ci		WARN_ON(1);
92462306a36Sopenharmony_ci		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
92562306a36Sopenharmony_ci	}
92662306a36Sopenharmony_ci
92762306a36Sopenharmony_ci	if (!iwl_trans_grab_nic_access(trans))
92862306a36Sopenharmony_ci		return;
92962306a36Sopenharmony_ci
93062306a36Sopenharmony_ci	/* Stop Rx DMA */
93162306a36Sopenharmony_ci	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
93262306a36Sopenharmony_ci	/* disable free amd used rx queue operation */
93362306a36Sopenharmony_ci	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
93462306a36Sopenharmony_ci
93562306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
93662306a36Sopenharmony_ci		/* Tell device where to find RBD free table in DRAM */
93762306a36Sopenharmony_ci		iwl_write_prph64_no_grab(trans,
93862306a36Sopenharmony_ci					 RFH_Q_FRBDCB_BA_LSB(i),
93962306a36Sopenharmony_ci					 trans_pcie->rxq[i].bd_dma);
94062306a36Sopenharmony_ci		/* Tell device where to find RBD used table in DRAM */
94162306a36Sopenharmony_ci		iwl_write_prph64_no_grab(trans,
94262306a36Sopenharmony_ci					 RFH_Q_URBDCB_BA_LSB(i),
94362306a36Sopenharmony_ci					 trans_pcie->rxq[i].used_bd_dma);
94462306a36Sopenharmony_ci		/* Tell device where in DRAM to update its Rx status */
94562306a36Sopenharmony_ci		iwl_write_prph64_no_grab(trans,
94662306a36Sopenharmony_ci					 RFH_Q_URBD_STTS_WPTR_LSB(i),
94762306a36Sopenharmony_ci					 trans_pcie->rxq[i].rb_stts_dma);
94862306a36Sopenharmony_ci		/* Reset device indice tables */
94962306a36Sopenharmony_ci		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
95062306a36Sopenharmony_ci		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
95162306a36Sopenharmony_ci		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
95262306a36Sopenharmony_ci
95362306a36Sopenharmony_ci		enabled |= BIT(i) | BIT(i + 16);
95462306a36Sopenharmony_ci	}
95562306a36Sopenharmony_ci
95662306a36Sopenharmony_ci	/*
95762306a36Sopenharmony_ci	 * Enable Rx DMA
95862306a36Sopenharmony_ci	 * Rx buffer size 4 or 8k or 12k
95962306a36Sopenharmony_ci	 * Min RB size 4 or 8
96062306a36Sopenharmony_ci	 * Drop frames that exceed RB size
96162306a36Sopenharmony_ci	 * 512 RBDs
96262306a36Sopenharmony_ci	 */
96362306a36Sopenharmony_ci	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
96462306a36Sopenharmony_ci			       RFH_DMA_EN_ENABLE_VAL | rb_size |
96562306a36Sopenharmony_ci			       RFH_RXF_DMA_MIN_RB_4_8 |
96662306a36Sopenharmony_ci			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
96762306a36Sopenharmony_ci			       RFH_RXF_DMA_RBDCB_SIZE_512);
96862306a36Sopenharmony_ci
96962306a36Sopenharmony_ci	/*
97062306a36Sopenharmony_ci	 * Activate DMA snooping.
97162306a36Sopenharmony_ci	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
97262306a36Sopenharmony_ci	 * Default queue is 0
97362306a36Sopenharmony_ci	 */
97462306a36Sopenharmony_ci	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
97562306a36Sopenharmony_ci			       RFH_GEN_CFG_RFH_DMA_SNOOP |
97662306a36Sopenharmony_ci			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
97762306a36Sopenharmony_ci			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
97862306a36Sopenharmony_ci			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
97962306a36Sopenharmony_ci					       trans->trans_cfg->integrated ?
98062306a36Sopenharmony_ci					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
98162306a36Sopenharmony_ci					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
98262306a36Sopenharmony_ci	/* Enable the relevant rx queues */
98362306a36Sopenharmony_ci	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
98462306a36Sopenharmony_ci
98562306a36Sopenharmony_ci	iwl_trans_release_nic_access(trans);
98662306a36Sopenharmony_ci
98762306a36Sopenharmony_ci	/* Set interrupt coalescing timer to default (2048 usecs) */
98862306a36Sopenharmony_ci	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
98962306a36Sopenharmony_ci}
99062306a36Sopenharmony_ci
99162306a36Sopenharmony_civoid iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
99262306a36Sopenharmony_ci{
99362306a36Sopenharmony_ci	lockdep_assert_held(&rxq->lock);
99462306a36Sopenharmony_ci
99562306a36Sopenharmony_ci	INIT_LIST_HEAD(&rxq->rx_free);
99662306a36Sopenharmony_ci	INIT_LIST_HEAD(&rxq->rx_used);
99762306a36Sopenharmony_ci	rxq->free_count = 0;
99862306a36Sopenharmony_ci	rxq->used_count = 0;
99962306a36Sopenharmony_ci}
100062306a36Sopenharmony_ci
100162306a36Sopenharmony_cistatic int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
100262306a36Sopenharmony_ci
100362306a36Sopenharmony_cistatic int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
100462306a36Sopenharmony_ci{
100562306a36Sopenharmony_ci	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
100662306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie;
100762306a36Sopenharmony_ci	struct iwl_trans *trans;
100862306a36Sopenharmony_ci	int ret;
100962306a36Sopenharmony_ci
101062306a36Sopenharmony_ci	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
101162306a36Sopenharmony_ci	trans = trans_pcie->trans;
101262306a36Sopenharmony_ci
101362306a36Sopenharmony_ci	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
101462306a36Sopenharmony_ci
101562306a36Sopenharmony_ci	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
101662306a36Sopenharmony_ci		      rxq->id, ret, budget);
101762306a36Sopenharmony_ci
101862306a36Sopenharmony_ci	if (ret < budget) {
101962306a36Sopenharmony_ci		spin_lock(&trans_pcie->irq_lock);
102062306a36Sopenharmony_ci		if (test_bit(STATUS_INT_ENABLED, &trans->status))
102162306a36Sopenharmony_ci			_iwl_enable_interrupts(trans);
102262306a36Sopenharmony_ci		spin_unlock(&trans_pcie->irq_lock);
102362306a36Sopenharmony_ci
102462306a36Sopenharmony_ci		napi_complete_done(&rxq->napi, ret);
102562306a36Sopenharmony_ci	}
102662306a36Sopenharmony_ci
102762306a36Sopenharmony_ci	return ret;
102862306a36Sopenharmony_ci}
102962306a36Sopenharmony_ci
103062306a36Sopenharmony_cistatic int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
103162306a36Sopenharmony_ci{
103262306a36Sopenharmony_ci	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
103362306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie;
103462306a36Sopenharmony_ci	struct iwl_trans *trans;
103562306a36Sopenharmony_ci	int ret;
103662306a36Sopenharmony_ci
103762306a36Sopenharmony_ci	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
103862306a36Sopenharmony_ci	trans = trans_pcie->trans;
103962306a36Sopenharmony_ci
104062306a36Sopenharmony_ci	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
104162306a36Sopenharmony_ci	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
104262306a36Sopenharmony_ci		      budget);
104362306a36Sopenharmony_ci
104462306a36Sopenharmony_ci	if (ret < budget) {
104562306a36Sopenharmony_ci		int irq_line = rxq->id;
104662306a36Sopenharmony_ci
104762306a36Sopenharmony_ci		/* FIRST_RSS is shared with line 0 */
104862306a36Sopenharmony_ci		if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
104962306a36Sopenharmony_ci		    rxq->id == 1)
105062306a36Sopenharmony_ci			irq_line = 0;
105162306a36Sopenharmony_ci
105262306a36Sopenharmony_ci		spin_lock(&trans_pcie->irq_lock);
105362306a36Sopenharmony_ci		iwl_pcie_clear_irq(trans, irq_line);
105462306a36Sopenharmony_ci		spin_unlock(&trans_pcie->irq_lock);
105562306a36Sopenharmony_ci
105662306a36Sopenharmony_ci		napi_complete_done(&rxq->napi, ret);
105762306a36Sopenharmony_ci	}
105862306a36Sopenharmony_ci
105962306a36Sopenharmony_ci	return ret;
106062306a36Sopenharmony_ci}
106162306a36Sopenharmony_ci
106262306a36Sopenharmony_civoid iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
106362306a36Sopenharmony_ci{
106462306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
106562306a36Sopenharmony_ci	int i;
106662306a36Sopenharmony_ci
106762306a36Sopenharmony_ci	if (unlikely(!trans_pcie->rxq))
106862306a36Sopenharmony_ci		return;
106962306a36Sopenharmony_ci
107062306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
107162306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
107262306a36Sopenharmony_ci
107362306a36Sopenharmony_ci		if (rxq && rxq->napi.poll)
107462306a36Sopenharmony_ci			napi_synchronize(&rxq->napi);
107562306a36Sopenharmony_ci	}
107662306a36Sopenharmony_ci}
107762306a36Sopenharmony_ci
107862306a36Sopenharmony_cistatic int _iwl_pcie_rx_init(struct iwl_trans *trans)
107962306a36Sopenharmony_ci{
108062306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
108162306a36Sopenharmony_ci	struct iwl_rxq *def_rxq;
108262306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
108362306a36Sopenharmony_ci	int i, err, queue_size, allocator_pool_size, num_alloc;
108462306a36Sopenharmony_ci
108562306a36Sopenharmony_ci	if (!trans_pcie->rxq) {
108662306a36Sopenharmony_ci		err = iwl_pcie_rx_alloc(trans);
108762306a36Sopenharmony_ci		if (err)
108862306a36Sopenharmony_ci			return err;
108962306a36Sopenharmony_ci	}
109062306a36Sopenharmony_ci	def_rxq = trans_pcie->rxq;
109162306a36Sopenharmony_ci
109262306a36Sopenharmony_ci	cancel_work_sync(&rba->rx_alloc);
109362306a36Sopenharmony_ci
109462306a36Sopenharmony_ci	spin_lock_bh(&rba->lock);
109562306a36Sopenharmony_ci	atomic_set(&rba->req_pending, 0);
109662306a36Sopenharmony_ci	atomic_set(&rba->req_ready, 0);
109762306a36Sopenharmony_ci	INIT_LIST_HEAD(&rba->rbd_allocated);
109862306a36Sopenharmony_ci	INIT_LIST_HEAD(&rba->rbd_empty);
109962306a36Sopenharmony_ci	spin_unlock_bh(&rba->lock);
110062306a36Sopenharmony_ci
110162306a36Sopenharmony_ci	/* free all first - we overwrite everything here */
110262306a36Sopenharmony_ci	iwl_pcie_free_rbs_pool(trans);
110362306a36Sopenharmony_ci
110462306a36Sopenharmony_ci	for (i = 0; i < RX_QUEUE_SIZE; i++)
110562306a36Sopenharmony_ci		def_rxq->queue[i] = NULL;
110662306a36Sopenharmony_ci
110762306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
110862306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
110962306a36Sopenharmony_ci
111062306a36Sopenharmony_ci		spin_lock_bh(&rxq->lock);
111162306a36Sopenharmony_ci		/*
111262306a36Sopenharmony_ci		 * Set read write pointer to reflect that we have processed
111362306a36Sopenharmony_ci		 * and used all buffers, but have not restocked the Rx queue
111462306a36Sopenharmony_ci		 * with fresh buffers
111562306a36Sopenharmony_ci		 */
111662306a36Sopenharmony_ci		rxq->read = 0;
111762306a36Sopenharmony_ci		rxq->write = 0;
111862306a36Sopenharmony_ci		rxq->write_actual = 0;
111962306a36Sopenharmony_ci		memset(rxq->rb_stts, 0,
112062306a36Sopenharmony_ci		       (trans->trans_cfg->device_family >=
112162306a36Sopenharmony_ci			IWL_DEVICE_FAMILY_AX210) ?
112262306a36Sopenharmony_ci		       sizeof(__le16) : sizeof(struct iwl_rb_status));
112362306a36Sopenharmony_ci
112462306a36Sopenharmony_ci		iwl_pcie_rx_init_rxb_lists(rxq);
112562306a36Sopenharmony_ci
112662306a36Sopenharmony_ci		spin_unlock_bh(&rxq->lock);
112762306a36Sopenharmony_ci
112862306a36Sopenharmony_ci		if (!rxq->napi.poll) {
112962306a36Sopenharmony_ci			int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
113062306a36Sopenharmony_ci
113162306a36Sopenharmony_ci			if (trans_pcie->msix_enabled)
113262306a36Sopenharmony_ci				poll = iwl_pcie_napi_poll_msix;
113362306a36Sopenharmony_ci
113462306a36Sopenharmony_ci			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
113562306a36Sopenharmony_ci				       poll);
113662306a36Sopenharmony_ci			napi_enable(&rxq->napi);
113762306a36Sopenharmony_ci		}
113862306a36Sopenharmony_ci
113962306a36Sopenharmony_ci	}
114062306a36Sopenharmony_ci
114162306a36Sopenharmony_ci	/* move the pool to the default queue and allocator ownerships */
114262306a36Sopenharmony_ci	queue_size = trans->trans_cfg->mq_rx_supported ?
114362306a36Sopenharmony_ci			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
114462306a36Sopenharmony_ci	allocator_pool_size = trans->num_rx_queues *
114562306a36Sopenharmony_ci		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
114662306a36Sopenharmony_ci	num_alloc = queue_size + allocator_pool_size;
114762306a36Sopenharmony_ci
114862306a36Sopenharmony_ci	for (i = 0; i < num_alloc; i++) {
114962306a36Sopenharmony_ci		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
115062306a36Sopenharmony_ci
115162306a36Sopenharmony_ci		if (i < allocator_pool_size)
115262306a36Sopenharmony_ci			list_add(&rxb->list, &rba->rbd_empty);
115362306a36Sopenharmony_ci		else
115462306a36Sopenharmony_ci			list_add(&rxb->list, &def_rxq->rx_used);
115562306a36Sopenharmony_ci		trans_pcie->global_table[i] = rxb;
115662306a36Sopenharmony_ci		rxb->vid = (u16)(i + 1);
115762306a36Sopenharmony_ci		rxb->invalid = true;
115862306a36Sopenharmony_ci	}
115962306a36Sopenharmony_ci
116062306a36Sopenharmony_ci	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
116162306a36Sopenharmony_ci
116262306a36Sopenharmony_ci	return 0;
116362306a36Sopenharmony_ci}
116462306a36Sopenharmony_ci
116562306a36Sopenharmony_ciint iwl_pcie_rx_init(struct iwl_trans *trans)
116662306a36Sopenharmony_ci{
116762306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
116862306a36Sopenharmony_ci	int ret = _iwl_pcie_rx_init(trans);
116962306a36Sopenharmony_ci
117062306a36Sopenharmony_ci	if (ret)
117162306a36Sopenharmony_ci		return ret;
117262306a36Sopenharmony_ci
117362306a36Sopenharmony_ci	if (trans->trans_cfg->mq_rx_supported)
117462306a36Sopenharmony_ci		iwl_pcie_rx_mq_hw_init(trans);
117562306a36Sopenharmony_ci	else
117662306a36Sopenharmony_ci		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
117762306a36Sopenharmony_ci
117862306a36Sopenharmony_ci	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
117962306a36Sopenharmony_ci
118062306a36Sopenharmony_ci	spin_lock_bh(&trans_pcie->rxq->lock);
118162306a36Sopenharmony_ci	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
118262306a36Sopenharmony_ci	spin_unlock_bh(&trans_pcie->rxq->lock);
118362306a36Sopenharmony_ci
118462306a36Sopenharmony_ci	return 0;
118562306a36Sopenharmony_ci}
118662306a36Sopenharmony_ci
118762306a36Sopenharmony_ciint iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
118862306a36Sopenharmony_ci{
118962306a36Sopenharmony_ci	/* Set interrupt coalescing timer to default (2048 usecs) */
119062306a36Sopenharmony_ci	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
119162306a36Sopenharmony_ci
119262306a36Sopenharmony_ci	/*
119362306a36Sopenharmony_ci	 * We don't configure the RFH.
119462306a36Sopenharmony_ci	 * Restock will be done at alive, after firmware configured the RFH.
119562306a36Sopenharmony_ci	 */
119662306a36Sopenharmony_ci	return _iwl_pcie_rx_init(trans);
119762306a36Sopenharmony_ci}
119862306a36Sopenharmony_ci
119962306a36Sopenharmony_civoid iwl_pcie_rx_free(struct iwl_trans *trans)
120062306a36Sopenharmony_ci{
120162306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
120262306a36Sopenharmony_ci	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
120362306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
120462306a36Sopenharmony_ci	int i;
120562306a36Sopenharmony_ci
120662306a36Sopenharmony_ci	/*
120762306a36Sopenharmony_ci	 * if rxq is NULL, it means that nothing has been allocated,
120862306a36Sopenharmony_ci	 * exit now
120962306a36Sopenharmony_ci	 */
121062306a36Sopenharmony_ci	if (!trans_pcie->rxq) {
121162306a36Sopenharmony_ci		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
121262306a36Sopenharmony_ci		return;
121362306a36Sopenharmony_ci	}
121462306a36Sopenharmony_ci
121562306a36Sopenharmony_ci	cancel_work_sync(&rba->rx_alloc);
121662306a36Sopenharmony_ci
121762306a36Sopenharmony_ci	iwl_pcie_free_rbs_pool(trans);
121862306a36Sopenharmony_ci
121962306a36Sopenharmony_ci	if (trans_pcie->base_rb_stts) {
122062306a36Sopenharmony_ci		dma_free_coherent(trans->dev,
122162306a36Sopenharmony_ci				  rb_stts_size * trans->num_rx_queues,
122262306a36Sopenharmony_ci				  trans_pcie->base_rb_stts,
122362306a36Sopenharmony_ci				  trans_pcie->base_rb_stts_dma);
122462306a36Sopenharmony_ci		trans_pcie->base_rb_stts = NULL;
122562306a36Sopenharmony_ci		trans_pcie->base_rb_stts_dma = 0;
122662306a36Sopenharmony_ci	}
122762306a36Sopenharmony_ci
122862306a36Sopenharmony_ci	for (i = 0; i < trans->num_rx_queues; i++) {
122962306a36Sopenharmony_ci		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
123062306a36Sopenharmony_ci
123162306a36Sopenharmony_ci		iwl_pcie_free_rxq_dma(trans, rxq);
123262306a36Sopenharmony_ci
123362306a36Sopenharmony_ci		if (rxq->napi.poll) {
123462306a36Sopenharmony_ci			napi_disable(&rxq->napi);
123562306a36Sopenharmony_ci			netif_napi_del(&rxq->napi);
123662306a36Sopenharmony_ci		}
123762306a36Sopenharmony_ci	}
123862306a36Sopenharmony_ci	kfree(trans_pcie->rx_pool);
123962306a36Sopenharmony_ci	kfree(trans_pcie->global_table);
124062306a36Sopenharmony_ci	kfree(trans_pcie->rxq);
124162306a36Sopenharmony_ci
124262306a36Sopenharmony_ci	if (trans_pcie->alloc_page)
124362306a36Sopenharmony_ci		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
124462306a36Sopenharmony_ci}
124562306a36Sopenharmony_ci
124662306a36Sopenharmony_cistatic void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
124762306a36Sopenharmony_ci					  struct iwl_rb_allocator *rba)
124862306a36Sopenharmony_ci{
124962306a36Sopenharmony_ci	spin_lock(&rba->lock);
125062306a36Sopenharmony_ci	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
125162306a36Sopenharmony_ci	spin_unlock(&rba->lock);
125262306a36Sopenharmony_ci}
125362306a36Sopenharmony_ci
125462306a36Sopenharmony_ci/*
125562306a36Sopenharmony_ci * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
125662306a36Sopenharmony_ci *
125762306a36Sopenharmony_ci * Called when a RBD can be reused. The RBD is transferred to the allocator.
125862306a36Sopenharmony_ci * When there are 2 empty RBDs - a request for allocation is posted
125962306a36Sopenharmony_ci */
126062306a36Sopenharmony_cistatic void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
126162306a36Sopenharmony_ci				  struct iwl_rx_mem_buffer *rxb,
126262306a36Sopenharmony_ci				  struct iwl_rxq *rxq, bool emergency)
126362306a36Sopenharmony_ci{
126462306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
126562306a36Sopenharmony_ci	struct iwl_rb_allocator *rba = &trans_pcie->rba;
126662306a36Sopenharmony_ci
126762306a36Sopenharmony_ci	/* Move the RBD to the used list, will be moved to allocator in batches
126862306a36Sopenharmony_ci	 * before claiming or posting a request*/
126962306a36Sopenharmony_ci	list_add_tail(&rxb->list, &rxq->rx_used);
127062306a36Sopenharmony_ci
127162306a36Sopenharmony_ci	if (unlikely(emergency))
127262306a36Sopenharmony_ci		return;
127362306a36Sopenharmony_ci
127462306a36Sopenharmony_ci	/* Count the allocator owned RBDs */
127562306a36Sopenharmony_ci	rxq->used_count++;
127662306a36Sopenharmony_ci
127762306a36Sopenharmony_ci	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
127862306a36Sopenharmony_ci	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
127962306a36Sopenharmony_ci	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
128062306a36Sopenharmony_ci	 * after but we still need to post another request.
128162306a36Sopenharmony_ci	 */
128262306a36Sopenharmony_ci	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
128362306a36Sopenharmony_ci		/* Move the 2 RBDs to the allocator ownership.
128462306a36Sopenharmony_ci		 Allocator has another 6 from pool for the request completion*/
128562306a36Sopenharmony_ci		iwl_pcie_rx_move_to_allocator(rxq, rba);
128662306a36Sopenharmony_ci
128762306a36Sopenharmony_ci		atomic_inc(&rba->req_pending);
128862306a36Sopenharmony_ci		queue_work(rba->alloc_wq, &rba->rx_alloc);
128962306a36Sopenharmony_ci	}
129062306a36Sopenharmony_ci}
129162306a36Sopenharmony_ci
129262306a36Sopenharmony_cistatic void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
129362306a36Sopenharmony_ci				struct iwl_rxq *rxq,
129462306a36Sopenharmony_ci				struct iwl_rx_mem_buffer *rxb,
129562306a36Sopenharmony_ci				bool emergency,
129662306a36Sopenharmony_ci				int i)
129762306a36Sopenharmony_ci{
129862306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
129962306a36Sopenharmony_ci	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
130062306a36Sopenharmony_ci	bool page_stolen = false;
130162306a36Sopenharmony_ci	int max_len = trans_pcie->rx_buf_bytes;
130262306a36Sopenharmony_ci	u32 offset = 0;
130362306a36Sopenharmony_ci
130462306a36Sopenharmony_ci	if (WARN_ON(!rxb))
130562306a36Sopenharmony_ci		return;
130662306a36Sopenharmony_ci
130762306a36Sopenharmony_ci	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
130862306a36Sopenharmony_ci
130962306a36Sopenharmony_ci	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
131062306a36Sopenharmony_ci		struct iwl_rx_packet *pkt;
131162306a36Sopenharmony_ci		bool reclaim;
131262306a36Sopenharmony_ci		int len;
131362306a36Sopenharmony_ci		struct iwl_rx_cmd_buffer rxcb = {
131462306a36Sopenharmony_ci			._offset = rxb->offset + offset,
131562306a36Sopenharmony_ci			._rx_page_order = trans_pcie->rx_page_order,
131662306a36Sopenharmony_ci			._page = rxb->page,
131762306a36Sopenharmony_ci			._page_stolen = false,
131862306a36Sopenharmony_ci			.truesize = max_len,
131962306a36Sopenharmony_ci		};
132062306a36Sopenharmony_ci
132162306a36Sopenharmony_ci		pkt = rxb_addr(&rxcb);
132262306a36Sopenharmony_ci
132362306a36Sopenharmony_ci		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
132462306a36Sopenharmony_ci			IWL_DEBUG_RX(trans,
132562306a36Sopenharmony_ci				     "Q %d: RB end marker at offset %d\n",
132662306a36Sopenharmony_ci				     rxq->id, offset);
132762306a36Sopenharmony_ci			break;
132862306a36Sopenharmony_ci		}
132962306a36Sopenharmony_ci
133062306a36Sopenharmony_ci		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
133162306a36Sopenharmony_ci			FH_RSCSR_RXQ_POS != rxq->id,
133262306a36Sopenharmony_ci		     "frame on invalid queue - is on %d and indicates %d\n",
133362306a36Sopenharmony_ci		     rxq->id,
133462306a36Sopenharmony_ci		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
133562306a36Sopenharmony_ci			FH_RSCSR_RXQ_POS);
133662306a36Sopenharmony_ci
133762306a36Sopenharmony_ci		IWL_DEBUG_RX(trans,
133862306a36Sopenharmony_ci			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
133962306a36Sopenharmony_ci			     rxq->id, offset,
134062306a36Sopenharmony_ci			     iwl_get_cmd_string(trans,
134162306a36Sopenharmony_ci						WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
134262306a36Sopenharmony_ci			     pkt->hdr.group_id, pkt->hdr.cmd,
134362306a36Sopenharmony_ci			     le16_to_cpu(pkt->hdr.sequence));
134462306a36Sopenharmony_ci
134562306a36Sopenharmony_ci		len = iwl_rx_packet_len(pkt);
134662306a36Sopenharmony_ci		len += sizeof(u32); /* account for status word */
134762306a36Sopenharmony_ci
134862306a36Sopenharmony_ci		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
134962306a36Sopenharmony_ci
135062306a36Sopenharmony_ci		/* check that what the device tells us made sense */
135162306a36Sopenharmony_ci		if (len < sizeof(*pkt) || offset > max_len)
135262306a36Sopenharmony_ci			break;
135362306a36Sopenharmony_ci
135462306a36Sopenharmony_ci		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
135562306a36Sopenharmony_ci		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
135662306a36Sopenharmony_ci
135762306a36Sopenharmony_ci		/* Reclaim a command buffer only if this packet is a response
135862306a36Sopenharmony_ci		 *   to a (driver-originated) command.
135962306a36Sopenharmony_ci		 * If the packet (e.g. Rx frame) originated from uCode,
136062306a36Sopenharmony_ci		 *   there is no command buffer to reclaim.
136162306a36Sopenharmony_ci		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
136262306a36Sopenharmony_ci		 *   but apparently a few don't get set; catch them here. */
136362306a36Sopenharmony_ci		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
136462306a36Sopenharmony_ci		if (reclaim && !pkt->hdr.group_id) {
136562306a36Sopenharmony_ci			int i;
136662306a36Sopenharmony_ci
136762306a36Sopenharmony_ci			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
136862306a36Sopenharmony_ci				if (trans_pcie->no_reclaim_cmds[i] ==
136962306a36Sopenharmony_ci							pkt->hdr.cmd) {
137062306a36Sopenharmony_ci					reclaim = false;
137162306a36Sopenharmony_ci					break;
137262306a36Sopenharmony_ci				}
137362306a36Sopenharmony_ci			}
137462306a36Sopenharmony_ci		}
137562306a36Sopenharmony_ci
137662306a36Sopenharmony_ci		if (rxq->id == IWL_DEFAULT_RX_QUEUE)
137762306a36Sopenharmony_ci			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
137862306a36Sopenharmony_ci				       &rxcb);
137962306a36Sopenharmony_ci		else
138062306a36Sopenharmony_ci			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
138162306a36Sopenharmony_ci					   &rxcb, rxq->id);
138262306a36Sopenharmony_ci
138362306a36Sopenharmony_ci		/*
138462306a36Sopenharmony_ci		 * After here, we should always check rxcb._page_stolen,
138562306a36Sopenharmony_ci		 * if it is true then one of the handlers took the page.
138662306a36Sopenharmony_ci		 */
138762306a36Sopenharmony_ci
138862306a36Sopenharmony_ci		if (reclaim && txq) {
138962306a36Sopenharmony_ci			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
139062306a36Sopenharmony_ci			int index = SEQ_TO_INDEX(sequence);
139162306a36Sopenharmony_ci			int cmd_index = iwl_txq_get_cmd_index(txq, index);
139262306a36Sopenharmony_ci
139362306a36Sopenharmony_ci			kfree_sensitive(txq->entries[cmd_index].free_buf);
139462306a36Sopenharmony_ci			txq->entries[cmd_index].free_buf = NULL;
139562306a36Sopenharmony_ci
139662306a36Sopenharmony_ci			/* Invoke any callbacks, transfer the buffer to caller,
139762306a36Sopenharmony_ci			 * and fire off the (possibly) blocking
139862306a36Sopenharmony_ci			 * iwl_trans_send_cmd()
139962306a36Sopenharmony_ci			 * as we reclaim the driver command queue */
140062306a36Sopenharmony_ci			if (!rxcb._page_stolen)
140162306a36Sopenharmony_ci				iwl_pcie_hcmd_complete(trans, &rxcb);
140262306a36Sopenharmony_ci			else
140362306a36Sopenharmony_ci				IWL_WARN(trans, "Claim null rxb?\n");
140462306a36Sopenharmony_ci		}
140562306a36Sopenharmony_ci
140662306a36Sopenharmony_ci		page_stolen |= rxcb._page_stolen;
140762306a36Sopenharmony_ci		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
140862306a36Sopenharmony_ci			break;
140962306a36Sopenharmony_ci	}
141062306a36Sopenharmony_ci
141162306a36Sopenharmony_ci	/* page was stolen from us -- free our reference */
141262306a36Sopenharmony_ci	if (page_stolen) {
141362306a36Sopenharmony_ci		__free_pages(rxb->page, trans_pcie->rx_page_order);
141462306a36Sopenharmony_ci		rxb->page = NULL;
141562306a36Sopenharmony_ci	}
141662306a36Sopenharmony_ci
141762306a36Sopenharmony_ci	/* Reuse the page if possible. For notification packets and
141862306a36Sopenharmony_ci	 * SKBs that fail to Rx correctly, add them back into the
141962306a36Sopenharmony_ci	 * rx_free list for reuse later. */
142062306a36Sopenharmony_ci	if (rxb->page != NULL) {
142162306a36Sopenharmony_ci		rxb->page_dma =
142262306a36Sopenharmony_ci			dma_map_page(trans->dev, rxb->page, rxb->offset,
142362306a36Sopenharmony_ci				     trans_pcie->rx_buf_bytes,
142462306a36Sopenharmony_ci				     DMA_FROM_DEVICE);
142562306a36Sopenharmony_ci		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
142662306a36Sopenharmony_ci			/*
142762306a36Sopenharmony_ci			 * free the page(s) as well to not break
142862306a36Sopenharmony_ci			 * the invariant that the items on the used
142962306a36Sopenharmony_ci			 * list have no page(s)
143062306a36Sopenharmony_ci			 */
143162306a36Sopenharmony_ci			__free_pages(rxb->page, trans_pcie->rx_page_order);
143262306a36Sopenharmony_ci			rxb->page = NULL;
143362306a36Sopenharmony_ci			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
143462306a36Sopenharmony_ci		} else {
143562306a36Sopenharmony_ci			list_add_tail(&rxb->list, &rxq->rx_free);
143662306a36Sopenharmony_ci			rxq->free_count++;
143762306a36Sopenharmony_ci		}
143862306a36Sopenharmony_ci	} else
143962306a36Sopenharmony_ci		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
144062306a36Sopenharmony_ci}
144162306a36Sopenharmony_ci
144262306a36Sopenharmony_cistatic struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
144362306a36Sopenharmony_ci						  struct iwl_rxq *rxq, int i,
144462306a36Sopenharmony_ci						  bool *join)
144562306a36Sopenharmony_ci{
144662306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
144762306a36Sopenharmony_ci	struct iwl_rx_mem_buffer *rxb;
144862306a36Sopenharmony_ci	u16 vid;
144962306a36Sopenharmony_ci
145062306a36Sopenharmony_ci	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
145162306a36Sopenharmony_ci	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
145262306a36Sopenharmony_ci
145362306a36Sopenharmony_ci	if (!trans->trans_cfg->mq_rx_supported) {
145462306a36Sopenharmony_ci		rxb = rxq->queue[i];
145562306a36Sopenharmony_ci		rxq->queue[i] = NULL;
145662306a36Sopenharmony_ci		return rxb;
145762306a36Sopenharmony_ci	}
145862306a36Sopenharmony_ci
145962306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
146062306a36Sopenharmony_ci		struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
146162306a36Sopenharmony_ci
146262306a36Sopenharmony_ci		vid = le16_to_cpu(cd[i].rbid);
146362306a36Sopenharmony_ci		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
146462306a36Sopenharmony_ci	} else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
146562306a36Sopenharmony_ci		struct iwl_rx_completion_desc *cd = rxq->used_bd;
146662306a36Sopenharmony_ci
146762306a36Sopenharmony_ci		vid = le16_to_cpu(cd[i].rbid);
146862306a36Sopenharmony_ci		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
146962306a36Sopenharmony_ci	} else {
147062306a36Sopenharmony_ci		__le32 *cd = rxq->used_bd;
147162306a36Sopenharmony_ci
147262306a36Sopenharmony_ci		vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
147362306a36Sopenharmony_ci	}
147462306a36Sopenharmony_ci
147562306a36Sopenharmony_ci	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
147662306a36Sopenharmony_ci		goto out_err;
147762306a36Sopenharmony_ci
147862306a36Sopenharmony_ci	rxb = trans_pcie->global_table[vid - 1];
147962306a36Sopenharmony_ci	if (rxb->invalid)
148062306a36Sopenharmony_ci		goto out_err;
148162306a36Sopenharmony_ci
148262306a36Sopenharmony_ci	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
148362306a36Sopenharmony_ci
148462306a36Sopenharmony_ci	rxb->invalid = true;
148562306a36Sopenharmony_ci
148662306a36Sopenharmony_ci	return rxb;
148762306a36Sopenharmony_ci
148862306a36Sopenharmony_ciout_err:
148962306a36Sopenharmony_ci	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
149062306a36Sopenharmony_ci	iwl_force_nmi(trans);
149162306a36Sopenharmony_ci	return NULL;
149262306a36Sopenharmony_ci}
149362306a36Sopenharmony_ci
149462306a36Sopenharmony_ci/*
149562306a36Sopenharmony_ci * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
149662306a36Sopenharmony_ci */
149762306a36Sopenharmony_cistatic int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
149862306a36Sopenharmony_ci{
149962306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
150062306a36Sopenharmony_ci	struct iwl_rxq *rxq;
150162306a36Sopenharmony_ci	u32 r, i, count = 0, handled = 0;
150262306a36Sopenharmony_ci	bool emergency = false;
150362306a36Sopenharmony_ci
150462306a36Sopenharmony_ci	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
150562306a36Sopenharmony_ci		return budget;
150662306a36Sopenharmony_ci
150762306a36Sopenharmony_ci	rxq = &trans_pcie->rxq[queue];
150862306a36Sopenharmony_ci
150962306a36Sopenharmony_cirestart:
151062306a36Sopenharmony_ci	spin_lock(&rxq->lock);
151162306a36Sopenharmony_ci	/* uCode's read index (stored in shared DRAM) indicates the last Rx
151262306a36Sopenharmony_ci	 * buffer that the driver may process (last buffer filled by ucode). */
151362306a36Sopenharmony_ci	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
151462306a36Sopenharmony_ci	i = rxq->read;
151562306a36Sopenharmony_ci
151662306a36Sopenharmony_ci	/* W/A 9000 device step A0 wrap-around bug */
151762306a36Sopenharmony_ci	r &= (rxq->queue_size - 1);
151862306a36Sopenharmony_ci
151962306a36Sopenharmony_ci	/* Rx interrupt, but nothing sent from uCode */
152062306a36Sopenharmony_ci	if (i == r)
152162306a36Sopenharmony_ci		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
152262306a36Sopenharmony_ci
152362306a36Sopenharmony_ci	while (i != r && ++handled < budget) {
152462306a36Sopenharmony_ci		struct iwl_rb_allocator *rba = &trans_pcie->rba;
152562306a36Sopenharmony_ci		struct iwl_rx_mem_buffer *rxb;
152662306a36Sopenharmony_ci		/* number of RBDs still waiting for page allocation */
152762306a36Sopenharmony_ci		u32 rb_pending_alloc =
152862306a36Sopenharmony_ci			atomic_read(&trans_pcie->rba.req_pending) *
152962306a36Sopenharmony_ci			RX_CLAIM_REQ_ALLOC;
153062306a36Sopenharmony_ci		bool join = false;
153162306a36Sopenharmony_ci
153262306a36Sopenharmony_ci		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
153362306a36Sopenharmony_ci			     !emergency)) {
153462306a36Sopenharmony_ci			iwl_pcie_rx_move_to_allocator(rxq, rba);
153562306a36Sopenharmony_ci			emergency = true;
153662306a36Sopenharmony_ci			IWL_DEBUG_TPT(trans,
153762306a36Sopenharmony_ci				      "RX path is in emergency. Pending allocations %d\n",
153862306a36Sopenharmony_ci				      rb_pending_alloc);
153962306a36Sopenharmony_ci		}
154062306a36Sopenharmony_ci
154162306a36Sopenharmony_ci		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
154262306a36Sopenharmony_ci
154362306a36Sopenharmony_ci		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
154462306a36Sopenharmony_ci		if (!rxb)
154562306a36Sopenharmony_ci			goto out;
154662306a36Sopenharmony_ci
154762306a36Sopenharmony_ci		if (unlikely(join || rxq->next_rb_is_fragment)) {
154862306a36Sopenharmony_ci			rxq->next_rb_is_fragment = join;
154962306a36Sopenharmony_ci			/*
155062306a36Sopenharmony_ci			 * We can only get a multi-RB in the following cases:
155162306a36Sopenharmony_ci			 *  - firmware issue, sending a too big notification
155262306a36Sopenharmony_ci			 *  - sniffer mode with a large A-MSDU
155362306a36Sopenharmony_ci			 *  - large MTU frames (>2k)
155462306a36Sopenharmony_ci			 * since the multi-RB functionality is limited to newer
155562306a36Sopenharmony_ci			 * hardware that cannot put multiple entries into a
155662306a36Sopenharmony_ci			 * single RB.
155762306a36Sopenharmony_ci			 *
155862306a36Sopenharmony_ci			 * Right now, the higher layers aren't set up to deal
155962306a36Sopenharmony_ci			 * with that, so discard all of these.
156062306a36Sopenharmony_ci			 */
156162306a36Sopenharmony_ci			list_add_tail(&rxb->list, &rxq->rx_free);
156262306a36Sopenharmony_ci			rxq->free_count++;
156362306a36Sopenharmony_ci		} else {
156462306a36Sopenharmony_ci			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
156562306a36Sopenharmony_ci		}
156662306a36Sopenharmony_ci
156762306a36Sopenharmony_ci		i = (i + 1) & (rxq->queue_size - 1);
156862306a36Sopenharmony_ci
156962306a36Sopenharmony_ci		/*
157062306a36Sopenharmony_ci		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
157162306a36Sopenharmony_ci		 * try to claim the pre-allocated buffers from the allocator.
157262306a36Sopenharmony_ci		 * If not ready - will try to reclaim next time.
157362306a36Sopenharmony_ci		 * There is no need to reschedule work - allocator exits only
157462306a36Sopenharmony_ci		 * on success
157562306a36Sopenharmony_ci		 */
157662306a36Sopenharmony_ci		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
157762306a36Sopenharmony_ci			iwl_pcie_rx_allocator_get(trans, rxq);
157862306a36Sopenharmony_ci
157962306a36Sopenharmony_ci		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
158062306a36Sopenharmony_ci			/* Add the remaining empty RBDs for allocator use */
158162306a36Sopenharmony_ci			iwl_pcie_rx_move_to_allocator(rxq, rba);
158262306a36Sopenharmony_ci		} else if (emergency) {
158362306a36Sopenharmony_ci			count++;
158462306a36Sopenharmony_ci			if (count == 8) {
158562306a36Sopenharmony_ci				count = 0;
158662306a36Sopenharmony_ci				if (rb_pending_alloc < rxq->queue_size / 3) {
158762306a36Sopenharmony_ci					IWL_DEBUG_TPT(trans,
158862306a36Sopenharmony_ci						      "RX path exited emergency. Pending allocations %d\n",
158962306a36Sopenharmony_ci						      rb_pending_alloc);
159062306a36Sopenharmony_ci					emergency = false;
159162306a36Sopenharmony_ci				}
159262306a36Sopenharmony_ci
159362306a36Sopenharmony_ci				rxq->read = i;
159462306a36Sopenharmony_ci				spin_unlock(&rxq->lock);
159562306a36Sopenharmony_ci				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
159662306a36Sopenharmony_ci				iwl_pcie_rxq_restock(trans, rxq);
159762306a36Sopenharmony_ci				goto restart;
159862306a36Sopenharmony_ci			}
159962306a36Sopenharmony_ci		}
160062306a36Sopenharmony_ci	}
160162306a36Sopenharmony_ciout:
160262306a36Sopenharmony_ci	/* Backtrack one entry */
160362306a36Sopenharmony_ci	rxq->read = i;
160462306a36Sopenharmony_ci	spin_unlock(&rxq->lock);
160562306a36Sopenharmony_ci
160662306a36Sopenharmony_ci	/*
160762306a36Sopenharmony_ci	 * handle a case where in emergency there are some unallocated RBDs.
160862306a36Sopenharmony_ci	 * those RBDs are in the used list, but are not tracked by the queue's
160962306a36Sopenharmony_ci	 * used_count which counts allocator owned RBDs.
161062306a36Sopenharmony_ci	 * unallocated emergency RBDs must be allocated on exit, otherwise
161162306a36Sopenharmony_ci	 * when called again the function may not be in emergency mode and
161262306a36Sopenharmony_ci	 * they will be handed to the allocator with no tracking in the RBD
161362306a36Sopenharmony_ci	 * allocator counters, which will lead to them never being claimed back
161462306a36Sopenharmony_ci	 * by the queue.
161562306a36Sopenharmony_ci	 * by allocating them here, they are now in the queue free list, and
161662306a36Sopenharmony_ci	 * will be restocked by the next call of iwl_pcie_rxq_restock.
161762306a36Sopenharmony_ci	 */
161862306a36Sopenharmony_ci	if (unlikely(emergency && count))
161962306a36Sopenharmony_ci		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
162062306a36Sopenharmony_ci
162162306a36Sopenharmony_ci	iwl_pcie_rxq_restock(trans, rxq);
162262306a36Sopenharmony_ci
162362306a36Sopenharmony_ci	return handled;
162462306a36Sopenharmony_ci}
162562306a36Sopenharmony_ci
162662306a36Sopenharmony_cistatic struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
162762306a36Sopenharmony_ci{
162862306a36Sopenharmony_ci	u8 queue = entry->entry;
162962306a36Sopenharmony_ci	struct msix_entry *entries = entry - queue;
163062306a36Sopenharmony_ci
163162306a36Sopenharmony_ci	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
163262306a36Sopenharmony_ci}
163362306a36Sopenharmony_ci
163462306a36Sopenharmony_ci/*
163562306a36Sopenharmony_ci * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
163662306a36Sopenharmony_ci * This interrupt handler should be used with RSS queue only.
163762306a36Sopenharmony_ci */
163862306a36Sopenharmony_ciirqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
163962306a36Sopenharmony_ci{
164062306a36Sopenharmony_ci	struct msix_entry *entry = dev_id;
164162306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
164262306a36Sopenharmony_ci	struct iwl_trans *trans = trans_pcie->trans;
164362306a36Sopenharmony_ci	struct iwl_rxq *rxq;
164462306a36Sopenharmony_ci
164562306a36Sopenharmony_ci	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
164662306a36Sopenharmony_ci
164762306a36Sopenharmony_ci	if (WARN_ON(entry->entry >= trans->num_rx_queues))
164862306a36Sopenharmony_ci		return IRQ_NONE;
164962306a36Sopenharmony_ci
165062306a36Sopenharmony_ci	if (!trans_pcie->rxq) {
165162306a36Sopenharmony_ci		if (net_ratelimit())
165262306a36Sopenharmony_ci			IWL_ERR(trans,
165362306a36Sopenharmony_ci				"[%d] Got MSI-X interrupt before we have Rx queues\n",
165462306a36Sopenharmony_ci				entry->entry);
165562306a36Sopenharmony_ci		return IRQ_NONE;
165662306a36Sopenharmony_ci	}
165762306a36Sopenharmony_ci
165862306a36Sopenharmony_ci	rxq = &trans_pcie->rxq[entry->entry];
165962306a36Sopenharmony_ci	lock_map_acquire(&trans->sync_cmd_lockdep_map);
166062306a36Sopenharmony_ci	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
166162306a36Sopenharmony_ci
166262306a36Sopenharmony_ci	local_bh_disable();
166362306a36Sopenharmony_ci	if (napi_schedule_prep(&rxq->napi))
166462306a36Sopenharmony_ci		__napi_schedule(&rxq->napi);
166562306a36Sopenharmony_ci	else
166662306a36Sopenharmony_ci		iwl_pcie_clear_irq(trans, entry->entry);
166762306a36Sopenharmony_ci	local_bh_enable();
166862306a36Sopenharmony_ci
166962306a36Sopenharmony_ci	lock_map_release(&trans->sync_cmd_lockdep_map);
167062306a36Sopenharmony_ci
167162306a36Sopenharmony_ci	return IRQ_HANDLED;
167262306a36Sopenharmony_ci}
167362306a36Sopenharmony_ci
167462306a36Sopenharmony_ci/*
167562306a36Sopenharmony_ci * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
167662306a36Sopenharmony_ci */
167762306a36Sopenharmony_cistatic void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
167862306a36Sopenharmony_ci{
167962306a36Sopenharmony_ci	int i;
168062306a36Sopenharmony_ci
168162306a36Sopenharmony_ci	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
168262306a36Sopenharmony_ci	if (trans->cfg->internal_wimax_coex &&
168362306a36Sopenharmony_ci	    !trans->cfg->apmg_not_supported &&
168462306a36Sopenharmony_ci	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
168562306a36Sopenharmony_ci			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
168662306a36Sopenharmony_ci	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
168762306a36Sopenharmony_ci			    APMG_PS_CTRL_VAL_RESET_REQ))) {
168862306a36Sopenharmony_ci		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
168962306a36Sopenharmony_ci		iwl_op_mode_wimax_active(trans->op_mode);
169062306a36Sopenharmony_ci		wake_up(&trans->wait_command_queue);
169162306a36Sopenharmony_ci		return;
169262306a36Sopenharmony_ci	}
169362306a36Sopenharmony_ci
169462306a36Sopenharmony_ci	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
169562306a36Sopenharmony_ci		if (!trans->txqs.txq[i])
169662306a36Sopenharmony_ci			continue;
169762306a36Sopenharmony_ci		del_timer(&trans->txqs.txq[i]->stuck_timer);
169862306a36Sopenharmony_ci	}
169962306a36Sopenharmony_ci
170062306a36Sopenharmony_ci	/* The STATUS_FW_ERROR bit is set in this function. This must happen
170162306a36Sopenharmony_ci	 * before we wake up the command caller, to ensure a proper cleanup. */
170262306a36Sopenharmony_ci	iwl_trans_fw_error(trans, false);
170362306a36Sopenharmony_ci
170462306a36Sopenharmony_ci	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
170562306a36Sopenharmony_ci	wake_up(&trans->wait_command_queue);
170662306a36Sopenharmony_ci}
170762306a36Sopenharmony_ci
170862306a36Sopenharmony_cistatic u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
170962306a36Sopenharmony_ci{
171062306a36Sopenharmony_ci	u32 inta;
171162306a36Sopenharmony_ci
171262306a36Sopenharmony_ci	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
171362306a36Sopenharmony_ci
171462306a36Sopenharmony_ci	trace_iwlwifi_dev_irq(trans->dev);
171562306a36Sopenharmony_ci
171662306a36Sopenharmony_ci	/* Discover which interrupts are active/pending */
171762306a36Sopenharmony_ci	inta = iwl_read32(trans, CSR_INT);
171862306a36Sopenharmony_ci
171962306a36Sopenharmony_ci	/* the thread will service interrupts and re-enable them */
172062306a36Sopenharmony_ci	return inta;
172162306a36Sopenharmony_ci}
172262306a36Sopenharmony_ci
172362306a36Sopenharmony_ci/* a device (PCI-E) page is 4096 bytes long */
172462306a36Sopenharmony_ci#define ICT_SHIFT	12
172562306a36Sopenharmony_ci#define ICT_SIZE	(1 << ICT_SHIFT)
172662306a36Sopenharmony_ci#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
172762306a36Sopenharmony_ci
172862306a36Sopenharmony_ci/* interrupt handler using ict table, with this interrupt driver will
172962306a36Sopenharmony_ci * stop using INTA register to get device's interrupt, reading this register
173062306a36Sopenharmony_ci * is expensive, device will write interrupts in ICT dram table, increment
173162306a36Sopenharmony_ci * index then will fire interrupt to driver, driver will OR all ICT table
173262306a36Sopenharmony_ci * entries from current index up to table entry with 0 value. the result is
173362306a36Sopenharmony_ci * the interrupt we need to service, driver will set the entries back to 0 and
173462306a36Sopenharmony_ci * set index.
173562306a36Sopenharmony_ci */
173662306a36Sopenharmony_cistatic u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
173762306a36Sopenharmony_ci{
173862306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
173962306a36Sopenharmony_ci	u32 inta;
174062306a36Sopenharmony_ci	u32 val = 0;
174162306a36Sopenharmony_ci	u32 read;
174262306a36Sopenharmony_ci
174362306a36Sopenharmony_ci	trace_iwlwifi_dev_irq(trans->dev);
174462306a36Sopenharmony_ci
174562306a36Sopenharmony_ci	/* Ignore interrupt if there's nothing in NIC to service.
174662306a36Sopenharmony_ci	 * This may be due to IRQ shared with another device,
174762306a36Sopenharmony_ci	 * or due to sporadic interrupts thrown from our NIC. */
174862306a36Sopenharmony_ci	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
174962306a36Sopenharmony_ci	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
175062306a36Sopenharmony_ci	if (!read)
175162306a36Sopenharmony_ci		return 0;
175262306a36Sopenharmony_ci
175362306a36Sopenharmony_ci	/*
175462306a36Sopenharmony_ci	 * Collect all entries up to the first 0, starting from ict_index;
175562306a36Sopenharmony_ci	 * note we already read at ict_index.
175662306a36Sopenharmony_ci	 */
175762306a36Sopenharmony_ci	do {
175862306a36Sopenharmony_ci		val |= read;
175962306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
176062306a36Sopenharmony_ci				trans_pcie->ict_index, read);
176162306a36Sopenharmony_ci		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
176262306a36Sopenharmony_ci		trans_pcie->ict_index =
176362306a36Sopenharmony_ci			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
176462306a36Sopenharmony_ci
176562306a36Sopenharmony_ci		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
176662306a36Sopenharmony_ci		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
176762306a36Sopenharmony_ci					   read);
176862306a36Sopenharmony_ci	} while (read);
176962306a36Sopenharmony_ci
177062306a36Sopenharmony_ci	/* We should not get this value, just ignore it. */
177162306a36Sopenharmony_ci	if (val == 0xffffffff)
177262306a36Sopenharmony_ci		val = 0;
177362306a36Sopenharmony_ci
177462306a36Sopenharmony_ci	/*
177562306a36Sopenharmony_ci	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
177662306a36Sopenharmony_ci	 * (bit 15 before shifting it to 31) to clear when using interrupt
177762306a36Sopenharmony_ci	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
177862306a36Sopenharmony_ci	 * so we use them to decide on the real state of the Rx bit.
177962306a36Sopenharmony_ci	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
178062306a36Sopenharmony_ci	 */
178162306a36Sopenharmony_ci	if (val & 0xC0000)
178262306a36Sopenharmony_ci		val |= 0x8000;
178362306a36Sopenharmony_ci
178462306a36Sopenharmony_ci	inta = (0xff & val) | ((0xff00 & val) << 16);
178562306a36Sopenharmony_ci	return inta;
178662306a36Sopenharmony_ci}
178762306a36Sopenharmony_ci
178862306a36Sopenharmony_civoid iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
178962306a36Sopenharmony_ci{
179062306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
179162306a36Sopenharmony_ci	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
179262306a36Sopenharmony_ci	bool hw_rfkill, prev, report;
179362306a36Sopenharmony_ci
179462306a36Sopenharmony_ci	mutex_lock(&trans_pcie->mutex);
179562306a36Sopenharmony_ci	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
179662306a36Sopenharmony_ci	hw_rfkill = iwl_is_rfkill_set(trans);
179762306a36Sopenharmony_ci	if (hw_rfkill) {
179862306a36Sopenharmony_ci		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
179962306a36Sopenharmony_ci		set_bit(STATUS_RFKILL_HW, &trans->status);
180062306a36Sopenharmony_ci	}
180162306a36Sopenharmony_ci	if (trans_pcie->opmode_down)
180262306a36Sopenharmony_ci		report = hw_rfkill;
180362306a36Sopenharmony_ci	else
180462306a36Sopenharmony_ci		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
180562306a36Sopenharmony_ci
180662306a36Sopenharmony_ci	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
180762306a36Sopenharmony_ci		 hw_rfkill ? "disable radio" : "enable radio");
180862306a36Sopenharmony_ci
180962306a36Sopenharmony_ci	isr_stats->rfkill++;
181062306a36Sopenharmony_ci
181162306a36Sopenharmony_ci	if (prev != report)
181262306a36Sopenharmony_ci		iwl_trans_pcie_rf_kill(trans, report, from_irq);
181362306a36Sopenharmony_ci	mutex_unlock(&trans_pcie->mutex);
181462306a36Sopenharmony_ci
181562306a36Sopenharmony_ci	if (hw_rfkill) {
181662306a36Sopenharmony_ci		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
181762306a36Sopenharmony_ci				       &trans->status))
181862306a36Sopenharmony_ci			IWL_DEBUG_RF_KILL(trans,
181962306a36Sopenharmony_ci					  "Rfkill while SYNC HCMD in flight\n");
182062306a36Sopenharmony_ci		wake_up(&trans->wait_command_queue);
182162306a36Sopenharmony_ci	} else {
182262306a36Sopenharmony_ci		clear_bit(STATUS_RFKILL_HW, &trans->status);
182362306a36Sopenharmony_ci		if (trans_pcie->opmode_down)
182462306a36Sopenharmony_ci			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
182562306a36Sopenharmony_ci	}
182662306a36Sopenharmony_ci}
182762306a36Sopenharmony_ci
182862306a36Sopenharmony_ciirqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
182962306a36Sopenharmony_ci{
183062306a36Sopenharmony_ci	struct iwl_trans *trans = dev_id;
183162306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
183262306a36Sopenharmony_ci	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
183362306a36Sopenharmony_ci	u32 inta = 0;
183462306a36Sopenharmony_ci	u32 handled = 0;
183562306a36Sopenharmony_ci	bool polling = false;
183662306a36Sopenharmony_ci
183762306a36Sopenharmony_ci	lock_map_acquire(&trans->sync_cmd_lockdep_map);
183862306a36Sopenharmony_ci
183962306a36Sopenharmony_ci	spin_lock_bh(&trans_pcie->irq_lock);
184062306a36Sopenharmony_ci
184162306a36Sopenharmony_ci	/* dram interrupt table not set yet,
184262306a36Sopenharmony_ci	 * use legacy interrupt.
184362306a36Sopenharmony_ci	 */
184462306a36Sopenharmony_ci	if (likely(trans_pcie->use_ict))
184562306a36Sopenharmony_ci		inta = iwl_pcie_int_cause_ict(trans);
184662306a36Sopenharmony_ci	else
184762306a36Sopenharmony_ci		inta = iwl_pcie_int_cause_non_ict(trans);
184862306a36Sopenharmony_ci
184962306a36Sopenharmony_ci	if (iwl_have_debug_level(IWL_DL_ISR)) {
185062306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans,
185162306a36Sopenharmony_ci			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
185262306a36Sopenharmony_ci			      inta, trans_pcie->inta_mask,
185362306a36Sopenharmony_ci			      iwl_read32(trans, CSR_INT_MASK),
185462306a36Sopenharmony_ci			      iwl_read32(trans, CSR_FH_INT_STATUS));
185562306a36Sopenharmony_ci		if (inta & (~trans_pcie->inta_mask))
185662306a36Sopenharmony_ci			IWL_DEBUG_ISR(trans,
185762306a36Sopenharmony_ci				      "We got a masked interrupt (0x%08x)\n",
185862306a36Sopenharmony_ci				      inta & (~trans_pcie->inta_mask));
185962306a36Sopenharmony_ci	}
186062306a36Sopenharmony_ci
186162306a36Sopenharmony_ci	inta &= trans_pcie->inta_mask;
186262306a36Sopenharmony_ci
186362306a36Sopenharmony_ci	/*
186462306a36Sopenharmony_ci	 * Ignore interrupt if there's nothing in NIC to service.
186562306a36Sopenharmony_ci	 * This may be due to IRQ shared with another device,
186662306a36Sopenharmony_ci	 * or due to sporadic interrupts thrown from our NIC.
186762306a36Sopenharmony_ci	 */
186862306a36Sopenharmony_ci	if (unlikely(!inta)) {
186962306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
187062306a36Sopenharmony_ci		/*
187162306a36Sopenharmony_ci		 * Re-enable interrupts here since we don't
187262306a36Sopenharmony_ci		 * have anything to service
187362306a36Sopenharmony_ci		 */
187462306a36Sopenharmony_ci		if (test_bit(STATUS_INT_ENABLED, &trans->status))
187562306a36Sopenharmony_ci			_iwl_enable_interrupts(trans);
187662306a36Sopenharmony_ci		spin_unlock_bh(&trans_pcie->irq_lock);
187762306a36Sopenharmony_ci		lock_map_release(&trans->sync_cmd_lockdep_map);
187862306a36Sopenharmony_ci		return IRQ_NONE;
187962306a36Sopenharmony_ci	}
188062306a36Sopenharmony_ci
188162306a36Sopenharmony_ci	if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
188262306a36Sopenharmony_ci		/*
188362306a36Sopenharmony_ci		 * Hardware disappeared. It might have
188462306a36Sopenharmony_ci		 * already raised an interrupt.
188562306a36Sopenharmony_ci		 */
188662306a36Sopenharmony_ci		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
188762306a36Sopenharmony_ci		spin_unlock_bh(&trans_pcie->irq_lock);
188862306a36Sopenharmony_ci		goto out;
188962306a36Sopenharmony_ci	}
189062306a36Sopenharmony_ci
189162306a36Sopenharmony_ci	/* Ack/clear/reset pending uCode interrupts.
189262306a36Sopenharmony_ci	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
189362306a36Sopenharmony_ci	 */
189462306a36Sopenharmony_ci	/* There is a hardware bug in the interrupt mask function that some
189562306a36Sopenharmony_ci	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
189662306a36Sopenharmony_ci	 * they are disabled in the CSR_INT_MASK register. Furthermore the
189762306a36Sopenharmony_ci	 * ICT interrupt handling mechanism has another bug that might cause
189862306a36Sopenharmony_ci	 * these unmasked interrupts fail to be detected. We workaround the
189962306a36Sopenharmony_ci	 * hardware bugs here by ACKing all the possible interrupts so that
190062306a36Sopenharmony_ci	 * interrupt coalescing can still be achieved.
190162306a36Sopenharmony_ci	 */
190262306a36Sopenharmony_ci	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
190362306a36Sopenharmony_ci
190462306a36Sopenharmony_ci	if (iwl_have_debug_level(IWL_DL_ISR))
190562306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
190662306a36Sopenharmony_ci			      inta, iwl_read32(trans, CSR_INT_MASK));
190762306a36Sopenharmony_ci
190862306a36Sopenharmony_ci	spin_unlock_bh(&trans_pcie->irq_lock);
190962306a36Sopenharmony_ci
191062306a36Sopenharmony_ci	/* Now service all interrupt bits discovered above. */
191162306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_HW_ERR) {
191262306a36Sopenharmony_ci		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
191362306a36Sopenharmony_ci
191462306a36Sopenharmony_ci		/* Tell the device to stop sending interrupts */
191562306a36Sopenharmony_ci		iwl_disable_interrupts(trans);
191662306a36Sopenharmony_ci
191762306a36Sopenharmony_ci		isr_stats->hw++;
191862306a36Sopenharmony_ci		iwl_pcie_irq_handle_error(trans);
191962306a36Sopenharmony_ci
192062306a36Sopenharmony_ci		handled |= CSR_INT_BIT_HW_ERR;
192162306a36Sopenharmony_ci
192262306a36Sopenharmony_ci		goto out;
192362306a36Sopenharmony_ci	}
192462306a36Sopenharmony_ci
192562306a36Sopenharmony_ci	/* NIC fires this, but we don't use it, redundant with WAKEUP */
192662306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_SCD) {
192762306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans,
192862306a36Sopenharmony_ci			      "Scheduler finished to transmit the frame/frames.\n");
192962306a36Sopenharmony_ci		isr_stats->sch++;
193062306a36Sopenharmony_ci	}
193162306a36Sopenharmony_ci
193262306a36Sopenharmony_ci	/* Alive notification via Rx interrupt will do the real work */
193362306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_ALIVE) {
193462306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
193562306a36Sopenharmony_ci		isr_stats->alive++;
193662306a36Sopenharmony_ci		if (trans->trans_cfg->gen2) {
193762306a36Sopenharmony_ci			/*
193862306a36Sopenharmony_ci			 * We can restock, since firmware configured
193962306a36Sopenharmony_ci			 * the RFH
194062306a36Sopenharmony_ci			 */
194162306a36Sopenharmony_ci			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
194262306a36Sopenharmony_ci		}
194362306a36Sopenharmony_ci
194462306a36Sopenharmony_ci		handled |= CSR_INT_BIT_ALIVE;
194562306a36Sopenharmony_ci	}
194662306a36Sopenharmony_ci
194762306a36Sopenharmony_ci	/* Safely ignore these bits for debug checks below */
194862306a36Sopenharmony_ci	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
194962306a36Sopenharmony_ci
195062306a36Sopenharmony_ci	/* HW RF KILL switch toggled */
195162306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_RF_KILL) {
195262306a36Sopenharmony_ci		iwl_pcie_handle_rfkill_irq(trans, true);
195362306a36Sopenharmony_ci		handled |= CSR_INT_BIT_RF_KILL;
195462306a36Sopenharmony_ci	}
195562306a36Sopenharmony_ci
195662306a36Sopenharmony_ci	/* Chip got too hot and stopped itself */
195762306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_CT_KILL) {
195862306a36Sopenharmony_ci		IWL_ERR(trans, "Microcode CT kill error detected.\n");
195962306a36Sopenharmony_ci		isr_stats->ctkill++;
196062306a36Sopenharmony_ci		handled |= CSR_INT_BIT_CT_KILL;
196162306a36Sopenharmony_ci	}
196262306a36Sopenharmony_ci
196362306a36Sopenharmony_ci	/* Error detected by uCode */
196462306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_SW_ERR) {
196562306a36Sopenharmony_ci		IWL_ERR(trans, "Microcode SW error detected. "
196662306a36Sopenharmony_ci			" Restarting 0x%X.\n", inta);
196762306a36Sopenharmony_ci		isr_stats->sw++;
196862306a36Sopenharmony_ci		iwl_pcie_irq_handle_error(trans);
196962306a36Sopenharmony_ci		handled |= CSR_INT_BIT_SW_ERR;
197062306a36Sopenharmony_ci	}
197162306a36Sopenharmony_ci
197262306a36Sopenharmony_ci	/* uCode wakes up after power-down sleep */
197362306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_WAKEUP) {
197462306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
197562306a36Sopenharmony_ci		iwl_pcie_rxq_check_wrptr(trans);
197662306a36Sopenharmony_ci		iwl_pcie_txq_check_wrptrs(trans);
197762306a36Sopenharmony_ci
197862306a36Sopenharmony_ci		isr_stats->wakeup++;
197962306a36Sopenharmony_ci
198062306a36Sopenharmony_ci		handled |= CSR_INT_BIT_WAKEUP;
198162306a36Sopenharmony_ci	}
198262306a36Sopenharmony_ci
198362306a36Sopenharmony_ci	/* All uCode command responses, including Tx command responses,
198462306a36Sopenharmony_ci	 * Rx "responses" (frame-received notification), and other
198562306a36Sopenharmony_ci	 * notifications from uCode come through here*/
198662306a36Sopenharmony_ci	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
198762306a36Sopenharmony_ci		    CSR_INT_BIT_RX_PERIODIC)) {
198862306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
198962306a36Sopenharmony_ci		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
199062306a36Sopenharmony_ci			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
199162306a36Sopenharmony_ci			iwl_write32(trans, CSR_FH_INT_STATUS,
199262306a36Sopenharmony_ci					CSR_FH_INT_RX_MASK);
199362306a36Sopenharmony_ci		}
199462306a36Sopenharmony_ci		if (inta & CSR_INT_BIT_RX_PERIODIC) {
199562306a36Sopenharmony_ci			handled |= CSR_INT_BIT_RX_PERIODIC;
199662306a36Sopenharmony_ci			iwl_write32(trans,
199762306a36Sopenharmony_ci				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
199862306a36Sopenharmony_ci		}
199962306a36Sopenharmony_ci		/* Sending RX interrupt require many steps to be done in the
200062306a36Sopenharmony_ci		 * device:
200162306a36Sopenharmony_ci		 * 1- write interrupt to current index in ICT table.
200262306a36Sopenharmony_ci		 * 2- dma RX frame.
200362306a36Sopenharmony_ci		 * 3- update RX shared data to indicate last write index.
200462306a36Sopenharmony_ci		 * 4- send interrupt.
200562306a36Sopenharmony_ci		 * This could lead to RX race, driver could receive RX interrupt
200662306a36Sopenharmony_ci		 * but the shared data changes does not reflect this;
200762306a36Sopenharmony_ci		 * periodic interrupt will detect any dangling Rx activity.
200862306a36Sopenharmony_ci		 */
200962306a36Sopenharmony_ci
201062306a36Sopenharmony_ci		/* Disable periodic interrupt; we use it as just a one-shot. */
201162306a36Sopenharmony_ci		iwl_write8(trans, CSR_INT_PERIODIC_REG,
201262306a36Sopenharmony_ci			    CSR_INT_PERIODIC_DIS);
201362306a36Sopenharmony_ci
201462306a36Sopenharmony_ci		/*
201562306a36Sopenharmony_ci		 * Enable periodic interrupt in 8 msec only if we received
201662306a36Sopenharmony_ci		 * real RX interrupt (instead of just periodic int), to catch
201762306a36Sopenharmony_ci		 * any dangling Rx interrupt.  If it was just the periodic
201862306a36Sopenharmony_ci		 * interrupt, there was no dangling Rx activity, and no need
201962306a36Sopenharmony_ci		 * to extend the periodic interrupt; one-shot is enough.
202062306a36Sopenharmony_ci		 */
202162306a36Sopenharmony_ci		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
202262306a36Sopenharmony_ci			iwl_write8(trans, CSR_INT_PERIODIC_REG,
202362306a36Sopenharmony_ci				   CSR_INT_PERIODIC_ENA);
202462306a36Sopenharmony_ci
202562306a36Sopenharmony_ci		isr_stats->rx++;
202662306a36Sopenharmony_ci
202762306a36Sopenharmony_ci		local_bh_disable();
202862306a36Sopenharmony_ci		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
202962306a36Sopenharmony_ci			polling = true;
203062306a36Sopenharmony_ci			__napi_schedule(&trans_pcie->rxq[0].napi);
203162306a36Sopenharmony_ci		}
203262306a36Sopenharmony_ci		local_bh_enable();
203362306a36Sopenharmony_ci	}
203462306a36Sopenharmony_ci
203562306a36Sopenharmony_ci	/* This "Tx" DMA channel is used only for loading uCode */
203662306a36Sopenharmony_ci	if (inta & CSR_INT_BIT_FH_TX) {
203762306a36Sopenharmony_ci		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
203862306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
203962306a36Sopenharmony_ci		isr_stats->tx++;
204062306a36Sopenharmony_ci		handled |= CSR_INT_BIT_FH_TX;
204162306a36Sopenharmony_ci		/* Wake up uCode load routine, now that load is complete */
204262306a36Sopenharmony_ci		trans_pcie->ucode_write_complete = true;
204362306a36Sopenharmony_ci		wake_up(&trans_pcie->ucode_write_waitq);
204462306a36Sopenharmony_ci		/* Wake up IMR write routine, now that write to SRAM is complete */
204562306a36Sopenharmony_ci		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
204662306a36Sopenharmony_ci			trans_pcie->imr_status = IMR_D2S_COMPLETED;
204762306a36Sopenharmony_ci			wake_up(&trans_pcie->ucode_write_waitq);
204862306a36Sopenharmony_ci		}
204962306a36Sopenharmony_ci	}
205062306a36Sopenharmony_ci
205162306a36Sopenharmony_ci	if (inta & ~handled) {
205262306a36Sopenharmony_ci		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
205362306a36Sopenharmony_ci		isr_stats->unhandled++;
205462306a36Sopenharmony_ci	}
205562306a36Sopenharmony_ci
205662306a36Sopenharmony_ci	if (inta & ~(trans_pcie->inta_mask)) {
205762306a36Sopenharmony_ci		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
205862306a36Sopenharmony_ci			 inta & ~trans_pcie->inta_mask);
205962306a36Sopenharmony_ci	}
206062306a36Sopenharmony_ci
206162306a36Sopenharmony_ci	if (!polling) {
206262306a36Sopenharmony_ci		spin_lock_bh(&trans_pcie->irq_lock);
206362306a36Sopenharmony_ci		/* only Re-enable all interrupt if disabled by irq */
206462306a36Sopenharmony_ci		if (test_bit(STATUS_INT_ENABLED, &trans->status))
206562306a36Sopenharmony_ci			_iwl_enable_interrupts(trans);
206662306a36Sopenharmony_ci		/* we are loading the firmware, enable FH_TX interrupt only */
206762306a36Sopenharmony_ci		else if (handled & CSR_INT_BIT_FH_TX)
206862306a36Sopenharmony_ci			iwl_enable_fw_load_int(trans);
206962306a36Sopenharmony_ci		/* Re-enable RF_KILL if it occurred */
207062306a36Sopenharmony_ci		else if (handled & CSR_INT_BIT_RF_KILL)
207162306a36Sopenharmony_ci			iwl_enable_rfkill_int(trans);
207262306a36Sopenharmony_ci		/* Re-enable the ALIVE / Rx interrupt if it occurred */
207362306a36Sopenharmony_ci		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
207462306a36Sopenharmony_ci			iwl_enable_fw_load_int_ctx_info(trans);
207562306a36Sopenharmony_ci		spin_unlock_bh(&trans_pcie->irq_lock);
207662306a36Sopenharmony_ci	}
207762306a36Sopenharmony_ci
207862306a36Sopenharmony_ciout:
207962306a36Sopenharmony_ci	lock_map_release(&trans->sync_cmd_lockdep_map);
208062306a36Sopenharmony_ci	return IRQ_HANDLED;
208162306a36Sopenharmony_ci}
208262306a36Sopenharmony_ci
208362306a36Sopenharmony_ci/******************************************************************************
208462306a36Sopenharmony_ci *
208562306a36Sopenharmony_ci * ICT functions
208662306a36Sopenharmony_ci *
208762306a36Sopenharmony_ci ******************************************************************************/
208862306a36Sopenharmony_ci
208962306a36Sopenharmony_ci/* Free dram table */
209062306a36Sopenharmony_civoid iwl_pcie_free_ict(struct iwl_trans *trans)
209162306a36Sopenharmony_ci{
209262306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
209362306a36Sopenharmony_ci
209462306a36Sopenharmony_ci	if (trans_pcie->ict_tbl) {
209562306a36Sopenharmony_ci		dma_free_coherent(trans->dev, ICT_SIZE,
209662306a36Sopenharmony_ci				  trans_pcie->ict_tbl,
209762306a36Sopenharmony_ci				  trans_pcie->ict_tbl_dma);
209862306a36Sopenharmony_ci		trans_pcie->ict_tbl = NULL;
209962306a36Sopenharmony_ci		trans_pcie->ict_tbl_dma = 0;
210062306a36Sopenharmony_ci	}
210162306a36Sopenharmony_ci}
210262306a36Sopenharmony_ci
210362306a36Sopenharmony_ci/*
210462306a36Sopenharmony_ci * allocate dram shared table, it is an aligned memory
210562306a36Sopenharmony_ci * block of ICT_SIZE.
210662306a36Sopenharmony_ci * also reset all data related to ICT table interrupt.
210762306a36Sopenharmony_ci */
210862306a36Sopenharmony_ciint iwl_pcie_alloc_ict(struct iwl_trans *trans)
210962306a36Sopenharmony_ci{
211062306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
211162306a36Sopenharmony_ci
211262306a36Sopenharmony_ci	trans_pcie->ict_tbl =
211362306a36Sopenharmony_ci		dma_alloc_coherent(trans->dev, ICT_SIZE,
211462306a36Sopenharmony_ci				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
211562306a36Sopenharmony_ci	if (!trans_pcie->ict_tbl)
211662306a36Sopenharmony_ci		return -ENOMEM;
211762306a36Sopenharmony_ci
211862306a36Sopenharmony_ci	/* just an API sanity check ... it is guaranteed to be aligned */
211962306a36Sopenharmony_ci	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
212062306a36Sopenharmony_ci		iwl_pcie_free_ict(trans);
212162306a36Sopenharmony_ci		return -EINVAL;
212262306a36Sopenharmony_ci	}
212362306a36Sopenharmony_ci
212462306a36Sopenharmony_ci	return 0;
212562306a36Sopenharmony_ci}
212662306a36Sopenharmony_ci
212762306a36Sopenharmony_ci/* Device is going up inform it about using ICT interrupt table,
212862306a36Sopenharmony_ci * also we need to tell the driver to start using ICT interrupt.
212962306a36Sopenharmony_ci */
213062306a36Sopenharmony_civoid iwl_pcie_reset_ict(struct iwl_trans *trans)
213162306a36Sopenharmony_ci{
213262306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
213362306a36Sopenharmony_ci	u32 val;
213462306a36Sopenharmony_ci
213562306a36Sopenharmony_ci	if (!trans_pcie->ict_tbl)
213662306a36Sopenharmony_ci		return;
213762306a36Sopenharmony_ci
213862306a36Sopenharmony_ci	spin_lock_bh(&trans_pcie->irq_lock);
213962306a36Sopenharmony_ci	_iwl_disable_interrupts(trans);
214062306a36Sopenharmony_ci
214162306a36Sopenharmony_ci	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
214262306a36Sopenharmony_ci
214362306a36Sopenharmony_ci	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
214462306a36Sopenharmony_ci
214562306a36Sopenharmony_ci	val |= CSR_DRAM_INT_TBL_ENABLE |
214662306a36Sopenharmony_ci	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
214762306a36Sopenharmony_ci	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
214862306a36Sopenharmony_ci
214962306a36Sopenharmony_ci	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
215062306a36Sopenharmony_ci
215162306a36Sopenharmony_ci	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
215262306a36Sopenharmony_ci	trans_pcie->use_ict = true;
215362306a36Sopenharmony_ci	trans_pcie->ict_index = 0;
215462306a36Sopenharmony_ci	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
215562306a36Sopenharmony_ci	_iwl_enable_interrupts(trans);
215662306a36Sopenharmony_ci	spin_unlock_bh(&trans_pcie->irq_lock);
215762306a36Sopenharmony_ci}
215862306a36Sopenharmony_ci
215962306a36Sopenharmony_ci/* Device is going down disable ict interrupt usage */
216062306a36Sopenharmony_civoid iwl_pcie_disable_ict(struct iwl_trans *trans)
216162306a36Sopenharmony_ci{
216262306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
216362306a36Sopenharmony_ci
216462306a36Sopenharmony_ci	spin_lock_bh(&trans_pcie->irq_lock);
216562306a36Sopenharmony_ci	trans_pcie->use_ict = false;
216662306a36Sopenharmony_ci	spin_unlock_bh(&trans_pcie->irq_lock);
216762306a36Sopenharmony_ci}
216862306a36Sopenharmony_ci
216962306a36Sopenharmony_ciirqreturn_t iwl_pcie_isr(int irq, void *data)
217062306a36Sopenharmony_ci{
217162306a36Sopenharmony_ci	struct iwl_trans *trans = data;
217262306a36Sopenharmony_ci
217362306a36Sopenharmony_ci	if (!trans)
217462306a36Sopenharmony_ci		return IRQ_NONE;
217562306a36Sopenharmony_ci
217662306a36Sopenharmony_ci	/* Disable (but don't clear!) interrupts here to avoid
217762306a36Sopenharmony_ci	 * back-to-back ISRs and sporadic interrupts from our NIC.
217862306a36Sopenharmony_ci	 * If we have something to service, the tasklet will re-enable ints.
217962306a36Sopenharmony_ci	 * If we *don't* have something, we'll re-enable before leaving here.
218062306a36Sopenharmony_ci	 */
218162306a36Sopenharmony_ci	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
218262306a36Sopenharmony_ci
218362306a36Sopenharmony_ci	return IRQ_WAKE_THREAD;
218462306a36Sopenharmony_ci}
218562306a36Sopenharmony_ci
218662306a36Sopenharmony_ciirqreturn_t iwl_pcie_msix_isr(int irq, void *data)
218762306a36Sopenharmony_ci{
218862306a36Sopenharmony_ci	return IRQ_WAKE_THREAD;
218962306a36Sopenharmony_ci}
219062306a36Sopenharmony_ci
219162306a36Sopenharmony_ciirqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
219262306a36Sopenharmony_ci{
219362306a36Sopenharmony_ci	struct msix_entry *entry = dev_id;
219462306a36Sopenharmony_ci	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
219562306a36Sopenharmony_ci	struct iwl_trans *trans = trans_pcie->trans;
219662306a36Sopenharmony_ci	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
219762306a36Sopenharmony_ci	u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
219862306a36Sopenharmony_ci	u32 inta_fh, inta_hw;
219962306a36Sopenharmony_ci	bool polling = false;
220062306a36Sopenharmony_ci	bool sw_err;
220162306a36Sopenharmony_ci
220262306a36Sopenharmony_ci	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
220362306a36Sopenharmony_ci		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
220462306a36Sopenharmony_ci
220562306a36Sopenharmony_ci	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
220662306a36Sopenharmony_ci		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
220762306a36Sopenharmony_ci
220862306a36Sopenharmony_ci	lock_map_acquire(&trans->sync_cmd_lockdep_map);
220962306a36Sopenharmony_ci
221062306a36Sopenharmony_ci	spin_lock_bh(&trans_pcie->irq_lock);
221162306a36Sopenharmony_ci	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
221262306a36Sopenharmony_ci	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
221362306a36Sopenharmony_ci	/*
221462306a36Sopenharmony_ci	 * Clear causes registers to avoid being handling the same cause.
221562306a36Sopenharmony_ci	 */
221662306a36Sopenharmony_ci	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
221762306a36Sopenharmony_ci	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
221862306a36Sopenharmony_ci	spin_unlock_bh(&trans_pcie->irq_lock);
221962306a36Sopenharmony_ci
222062306a36Sopenharmony_ci	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
222162306a36Sopenharmony_ci
222262306a36Sopenharmony_ci	if (unlikely(!(inta_fh | inta_hw))) {
222362306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
222462306a36Sopenharmony_ci		lock_map_release(&trans->sync_cmd_lockdep_map);
222562306a36Sopenharmony_ci		return IRQ_NONE;
222662306a36Sopenharmony_ci	}
222762306a36Sopenharmony_ci
222862306a36Sopenharmony_ci	if (iwl_have_debug_level(IWL_DL_ISR)) {
222962306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans,
223062306a36Sopenharmony_ci			      "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
223162306a36Sopenharmony_ci			      entry->entry, inta_fh, trans_pcie->fh_mask,
223262306a36Sopenharmony_ci			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
223362306a36Sopenharmony_ci		if (inta_fh & ~trans_pcie->fh_mask)
223462306a36Sopenharmony_ci			IWL_DEBUG_ISR(trans,
223562306a36Sopenharmony_ci				      "We got a masked interrupt (0x%08x)\n",
223662306a36Sopenharmony_ci				      inta_fh & ~trans_pcie->fh_mask);
223762306a36Sopenharmony_ci	}
223862306a36Sopenharmony_ci
223962306a36Sopenharmony_ci	inta_fh &= trans_pcie->fh_mask;
224062306a36Sopenharmony_ci
224162306a36Sopenharmony_ci	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
224262306a36Sopenharmony_ci	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
224362306a36Sopenharmony_ci		local_bh_disable();
224462306a36Sopenharmony_ci		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
224562306a36Sopenharmony_ci			polling = true;
224662306a36Sopenharmony_ci			__napi_schedule(&trans_pcie->rxq[0].napi);
224762306a36Sopenharmony_ci		}
224862306a36Sopenharmony_ci		local_bh_enable();
224962306a36Sopenharmony_ci	}
225062306a36Sopenharmony_ci
225162306a36Sopenharmony_ci	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
225262306a36Sopenharmony_ci	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
225362306a36Sopenharmony_ci		local_bh_disable();
225462306a36Sopenharmony_ci		if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
225562306a36Sopenharmony_ci			polling = true;
225662306a36Sopenharmony_ci			__napi_schedule(&trans_pcie->rxq[1].napi);
225762306a36Sopenharmony_ci		}
225862306a36Sopenharmony_ci		local_bh_enable();
225962306a36Sopenharmony_ci	}
226062306a36Sopenharmony_ci
226162306a36Sopenharmony_ci	/* This "Tx" DMA channel is used only for loading uCode */
226262306a36Sopenharmony_ci	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
226362306a36Sopenharmony_ci	    trans_pcie->imr_status == IMR_D2S_REQUESTED) {
226462306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
226562306a36Sopenharmony_ci		isr_stats->tx++;
226662306a36Sopenharmony_ci
226762306a36Sopenharmony_ci		/* Wake up IMR routine once write to SRAM is complete */
226862306a36Sopenharmony_ci		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
226962306a36Sopenharmony_ci			trans_pcie->imr_status = IMR_D2S_COMPLETED;
227062306a36Sopenharmony_ci			wake_up(&trans_pcie->ucode_write_waitq);
227162306a36Sopenharmony_ci		}
227262306a36Sopenharmony_ci	} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
227362306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
227462306a36Sopenharmony_ci		isr_stats->tx++;
227562306a36Sopenharmony_ci		/*
227662306a36Sopenharmony_ci		 * Wake up uCode load routine,
227762306a36Sopenharmony_ci		 * now that load is complete
227862306a36Sopenharmony_ci		 */
227962306a36Sopenharmony_ci		trans_pcie->ucode_write_complete = true;
228062306a36Sopenharmony_ci		wake_up(&trans_pcie->ucode_write_waitq);
228162306a36Sopenharmony_ci
228262306a36Sopenharmony_ci		/* Wake up IMR routine once write to SRAM is complete */
228362306a36Sopenharmony_ci		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
228462306a36Sopenharmony_ci			trans_pcie->imr_status = IMR_D2S_COMPLETED;
228562306a36Sopenharmony_ci			wake_up(&trans_pcie->ucode_write_waitq);
228662306a36Sopenharmony_ci		}
228762306a36Sopenharmony_ci	}
228862306a36Sopenharmony_ci
228962306a36Sopenharmony_ci	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
229062306a36Sopenharmony_ci		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
229162306a36Sopenharmony_ci	else
229262306a36Sopenharmony_ci		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
229362306a36Sopenharmony_ci
229462306a36Sopenharmony_ci	/* Error detected by uCode */
229562306a36Sopenharmony_ci	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
229662306a36Sopenharmony_ci		IWL_ERR(trans,
229762306a36Sopenharmony_ci			"Microcode SW error detected. Restarting 0x%X.\n",
229862306a36Sopenharmony_ci			inta_fh);
229962306a36Sopenharmony_ci		isr_stats->sw++;
230062306a36Sopenharmony_ci		/* during FW reset flow report errors from there */
230162306a36Sopenharmony_ci		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
230262306a36Sopenharmony_ci			trans_pcie->imr_status = IMR_D2S_ERROR;
230362306a36Sopenharmony_ci			wake_up(&trans_pcie->imr_waitq);
230462306a36Sopenharmony_ci		} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
230562306a36Sopenharmony_ci			trans_pcie->fw_reset_state = FW_RESET_ERROR;
230662306a36Sopenharmony_ci			wake_up(&trans_pcie->fw_reset_waitq);
230762306a36Sopenharmony_ci		} else {
230862306a36Sopenharmony_ci			iwl_pcie_irq_handle_error(trans);
230962306a36Sopenharmony_ci		}
231062306a36Sopenharmony_ci	}
231162306a36Sopenharmony_ci
231262306a36Sopenharmony_ci	/* After checking FH register check HW register */
231362306a36Sopenharmony_ci	if (iwl_have_debug_level(IWL_DL_ISR)) {
231462306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans,
231562306a36Sopenharmony_ci			      "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
231662306a36Sopenharmony_ci			      entry->entry, inta_hw, trans_pcie->hw_mask,
231762306a36Sopenharmony_ci			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
231862306a36Sopenharmony_ci		if (inta_hw & ~trans_pcie->hw_mask)
231962306a36Sopenharmony_ci			IWL_DEBUG_ISR(trans,
232062306a36Sopenharmony_ci				      "We got a masked interrupt 0x%08x\n",
232162306a36Sopenharmony_ci				      inta_hw & ~trans_pcie->hw_mask);
232262306a36Sopenharmony_ci	}
232362306a36Sopenharmony_ci
232462306a36Sopenharmony_ci	inta_hw &= trans_pcie->hw_mask;
232562306a36Sopenharmony_ci
232662306a36Sopenharmony_ci	/* Alive notification via Rx interrupt will do the real work */
232762306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
232862306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
232962306a36Sopenharmony_ci		isr_stats->alive++;
233062306a36Sopenharmony_ci		if (trans->trans_cfg->gen2) {
233162306a36Sopenharmony_ci			/* We can restock, since firmware configured the RFH */
233262306a36Sopenharmony_ci			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
233362306a36Sopenharmony_ci		}
233462306a36Sopenharmony_ci	}
233562306a36Sopenharmony_ci
233662306a36Sopenharmony_ci	/*
233762306a36Sopenharmony_ci	 * In some rare cases when the HW is in a bad state, we may
233862306a36Sopenharmony_ci	 * get this interrupt too early, when prph_info is still NULL.
233962306a36Sopenharmony_ci	 * So make sure that it's not NULL to prevent crashing.
234062306a36Sopenharmony_ci	 */
234162306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
234262306a36Sopenharmony_ci		u32 sleep_notif =
234362306a36Sopenharmony_ci			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
234462306a36Sopenharmony_ci		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
234562306a36Sopenharmony_ci		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
234662306a36Sopenharmony_ci			IWL_DEBUG_ISR(trans,
234762306a36Sopenharmony_ci				      "Sx interrupt: sleep notification = 0x%x\n",
234862306a36Sopenharmony_ci				      sleep_notif);
234962306a36Sopenharmony_ci			trans_pcie->sx_complete = true;
235062306a36Sopenharmony_ci			wake_up(&trans_pcie->sx_waitq);
235162306a36Sopenharmony_ci		} else {
235262306a36Sopenharmony_ci			/* uCode wakes up after power-down sleep */
235362306a36Sopenharmony_ci			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
235462306a36Sopenharmony_ci			iwl_pcie_rxq_check_wrptr(trans);
235562306a36Sopenharmony_ci			iwl_pcie_txq_check_wrptrs(trans);
235662306a36Sopenharmony_ci
235762306a36Sopenharmony_ci			isr_stats->wakeup++;
235862306a36Sopenharmony_ci		}
235962306a36Sopenharmony_ci	}
236062306a36Sopenharmony_ci
236162306a36Sopenharmony_ci	/* Chip got too hot and stopped itself */
236262306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
236362306a36Sopenharmony_ci		IWL_ERR(trans, "Microcode CT kill error detected.\n");
236462306a36Sopenharmony_ci		isr_stats->ctkill++;
236562306a36Sopenharmony_ci	}
236662306a36Sopenharmony_ci
236762306a36Sopenharmony_ci	/* HW RF KILL switch toggled */
236862306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
236962306a36Sopenharmony_ci		iwl_pcie_handle_rfkill_irq(trans, true);
237062306a36Sopenharmony_ci
237162306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
237262306a36Sopenharmony_ci		IWL_ERR(trans,
237362306a36Sopenharmony_ci			"Hardware error detected. Restarting.\n");
237462306a36Sopenharmony_ci
237562306a36Sopenharmony_ci		isr_stats->hw++;
237662306a36Sopenharmony_ci		trans->dbg.hw_error = true;
237762306a36Sopenharmony_ci		iwl_pcie_irq_handle_error(trans);
237862306a36Sopenharmony_ci	}
237962306a36Sopenharmony_ci
238062306a36Sopenharmony_ci	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
238162306a36Sopenharmony_ci		IWL_DEBUG_ISR(trans, "Reset flow completed\n");
238262306a36Sopenharmony_ci		trans_pcie->fw_reset_state = FW_RESET_OK;
238362306a36Sopenharmony_ci		wake_up(&trans_pcie->fw_reset_waitq);
238462306a36Sopenharmony_ci	}
238562306a36Sopenharmony_ci
238662306a36Sopenharmony_ci	if (!polling)
238762306a36Sopenharmony_ci		iwl_pcie_clear_irq(trans, entry->entry);
238862306a36Sopenharmony_ci
238962306a36Sopenharmony_ci	lock_map_release(&trans->sync_cmd_lockdep_map);
239062306a36Sopenharmony_ci
239162306a36Sopenharmony_ci	return IRQ_HANDLED;
239262306a36Sopenharmony_ci}
2393