1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
9 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
10 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19 * General Public License for more details.
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called COPYING.
23 *
24 * Contact Information:
25 *  Intel Linux Wireless <linuxwifi@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 * BSD LICENSE
29 *
30 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
31 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
32 * Copyright(c) 2007 - 2015, 2018 - 2020 Intel Corporation
33 * All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 *
39 *  * Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 *  * Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in
43 *    the documentation and/or other materials provided with the
44 *    distribution.
45 *  * Neither the name Intel Corporation nor the names of its
46 *    contributors may be used to endorse or promote products derived
47 *    from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
50 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
51 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
52 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
53 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
55 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
56 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
57 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
58 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
59 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *****************************************************************************/
62#include <linux/pci.h>
63#include <linux/interrupt.h>
64#include <linux/debugfs.h>
65#include <linux/sched.h>
66#include <linux/bitops.h>
67#include <linux/gfp.h>
68#include <linux/vmalloc.h>
69#include <linux/module.h>
70#include <linux/wait.h>
71#include <linux/seq_file.h>
72
73#include "iwl-drv.h"
74#include "iwl-trans.h"
75#include "iwl-csr.h"
76#include "iwl-prph.h"
77#include "iwl-scd.h"
78#include "iwl-agn-hw.h"
79#include "fw/error-dump.h"
80#include "fw/dbg.h"
81#include "fw/api/tx.h"
82#include "internal.h"
83#include "iwl-fh.h"
84#include "iwl-context-info-gen3.h"
85
86/* extended range in FW SRAM */
87#define IWL_FW_MEM_EXTENDED_START	0x40000
88#define IWL_FW_MEM_EXTENDED_END		0x57FFF
89
90void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
91{
92#define PCI_DUMP_SIZE		352
93#define PCI_MEM_DUMP_SIZE	64
94#define PCI_PARENT_DUMP_SIZE	524
95#define PREFIX_LEN		32
96	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
97	struct pci_dev *pdev = trans_pcie->pci_dev;
98	u32 i, pos, alloc_size, *ptr, *buf;
99	char *prefix;
100
101	if (trans_pcie->pcie_dbg_dumped_once)
102		return;
103
104	/* Should be a multiple of 4 */
105	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
106	BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
107	BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
108
109	/* Alloc a max size buffer */
110	alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
111	alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
112	alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
113	alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
114
115	buf = kmalloc(alloc_size, GFP_ATOMIC);
116	if (!buf)
117		return;
118	prefix = (char *)buf + alloc_size - PREFIX_LEN;
119
120	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
121
122	/* Print wifi device registers */
123	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
124	IWL_ERR(trans, "iwlwifi device config registers:\n");
125	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
126		if (pci_read_config_dword(pdev, i, ptr))
127			goto err_read;
128	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
129
130	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
131	for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
132		*ptr = iwl_read32(trans, i);
133	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
134
135	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
136	if (pos) {
137		IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
138		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
139			if (pci_read_config_dword(pdev, pos + i, ptr))
140				goto err_read;
141		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
142			       32, 4, buf, i, 0);
143	}
144
145	/* Print parent device registers next */
146	if (!pdev->bus->self)
147		goto out;
148
149	pdev = pdev->bus->self;
150	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
151
152	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
153		pci_name(pdev));
154	for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
155		if (pci_read_config_dword(pdev, i, ptr))
156			goto err_read;
157	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
158
159	/* Print root port AER registers */
160	pos = 0;
161	pdev = pcie_find_root_port(pdev);
162	if (pdev)
163		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
164	if (pos) {
165		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
166			pci_name(pdev));
167		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
168		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
169			if (pci_read_config_dword(pdev, pos + i, ptr))
170				goto err_read;
171		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
172			       4, buf, i, 0);
173	}
174	goto out;
175
176err_read:
177	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
178	IWL_ERR(trans, "Read failed at 0x%X\n", i);
179out:
180	trans_pcie->pcie_dbg_dumped_once = 1;
181	kfree(buf);
182}
183
184static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
185{
186	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
187	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
188	usleep_range(5000, 6000);
189}
190
191static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
192{
193	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
194
195	if (!fw_mon->size)
196		return;
197
198	dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
199			  fw_mon->physical);
200
201	fw_mon->block = NULL;
202	fw_mon->physical = 0;
203	fw_mon->size = 0;
204}
205
206static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
207					    u8 max_power, u8 min_power)
208{
209	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
210	void *block = NULL;
211	dma_addr_t physical = 0;
212	u32 size = 0;
213	u8 power;
214
215	if (fw_mon->size)
216		return;
217
218	for (power = max_power; power >= min_power; power--) {
219		size = BIT(power);
220		block = dma_alloc_coherent(trans->dev, size, &physical,
221					   GFP_KERNEL | __GFP_NOWARN);
222		if (!block)
223			continue;
224
225		IWL_INFO(trans,
226			 "Allocated 0x%08x bytes for firmware monitor.\n",
227			 size);
228		break;
229	}
230
231	if (WARN_ON_ONCE(!block))
232		return;
233
234	if (power != max_power)
235		IWL_ERR(trans,
236			"Sorry - debug buffer is only %luK while you requested %luK\n",
237			(unsigned long)BIT(power - 10),
238			(unsigned long)BIT(max_power - 10));
239
240	fw_mon->block = block;
241	fw_mon->physical = physical;
242	fw_mon->size = size;
243}
244
245void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
246{
247	if (!max_power) {
248		/* default max_power is maximum */
249		max_power = 26;
250	} else {
251		max_power += 11;
252	}
253
254	if (WARN(max_power > 26,
255		 "External buffer size for monitor is too big %d, check the FW TLV\n",
256		 max_power))
257		return;
258
259	if (trans->dbg.fw_mon.size)
260		return;
261
262	iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
263}
264
265static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
266{
267	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
268		    ((reg & 0x0000ffff) | (2 << 28)));
269	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
270}
271
272static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
273{
274	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
275	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
276		    ((reg & 0x0000ffff) | (3 << 28)));
277}
278
279static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
280{
281	if (trans->cfg->apmg_not_supported)
282		return;
283
284	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
285		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
286				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
287				       ~APMG_PS_CTRL_MSK_PWR_SRC);
288	else
289		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
290				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
291				       ~APMG_PS_CTRL_MSK_PWR_SRC);
292}
293
294/* PCI registers */
295#define PCI_CFG_RETRY_TIMEOUT	0x041
296
297void iwl_pcie_apm_config(struct iwl_trans *trans)
298{
299	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
300	u16 lctl;
301	u16 cap;
302
303	/*
304	 * L0S states have been found to be unstable with our devices
305	 * and in newer hardware they are not officially supported at
306	 * all, so we must always set the L0S_DISABLED bit.
307	 */
308	iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
309
310	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
311	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
312
313	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
314	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
315	IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
316			(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
317			trans->ltr_enabled ? "En" : "Dis");
318}
319
320/*
321 * Start up NIC's basic functionality after it has been reset
322 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
323 * NOTE:  This does not load uCode nor start the embedded processor
324 */
325static int iwl_pcie_apm_init(struct iwl_trans *trans)
326{
327	int ret;
328
329	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
330
331	/*
332	 * Use "set_bit" below rather than "write", to preserve any hardware
333	 * bits already set by default after reset.
334	 */
335
336	/* Disable L0S exit timer (platform NMI Work/Around) */
337	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
338		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
339			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
340
341	/*
342	 * Disable L0s without affecting L1;
343	 *  don't wait for ICH L0s (ICH bug W/A)
344	 */
345	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
346		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
347
348	/* Set FH wait threshold to maximum (HW error during stress W/A) */
349	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
350
351	/*
352	 * Enable HAP INTA (interrupt from management bus) to
353	 * wake device's PCI Express link L1a -> L0s
354	 */
355	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
356		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
357
358	iwl_pcie_apm_config(trans);
359
360	/* Configure analog phase-lock-loop before activating to D0A */
361	if (trans->trans_cfg->base_params->pll_cfg)
362		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
363
364	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
365	if (ret)
366		return ret;
367
368	if (trans->cfg->host_interrupt_operation_mode) {
369		/*
370		 * This is a bit of an abuse - This is needed for 7260 / 3160
371		 * only check host_interrupt_operation_mode even if this is
372		 * not related to host_interrupt_operation_mode.
373		 *
374		 * Enable the oscillator to count wake up time for L1 exit. This
375		 * consumes slightly more power (100uA) - but allows to be sure
376		 * that we wake up from L1 on time.
377		 *
378		 * This looks weird: read twice the same register, discard the
379		 * value, set a bit, and yet again, read that same register
380		 * just to discard the value. But that's the way the hardware
381		 * seems to like it.
382		 */
383		iwl_read_prph(trans, OSC_CLK);
384		iwl_read_prph(trans, OSC_CLK);
385		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
386		iwl_read_prph(trans, OSC_CLK);
387		iwl_read_prph(trans, OSC_CLK);
388	}
389
390	/*
391	 * Enable DMA clock and wait for it to stabilize.
392	 *
393	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
394	 * bits do not disable clocks.  This preserves any hardware
395	 * bits already set by default in "CLK_CTRL_REG" after reset.
396	 */
397	if (!trans->cfg->apmg_not_supported) {
398		iwl_write_prph(trans, APMG_CLK_EN_REG,
399			       APMG_CLK_VAL_DMA_CLK_RQT);
400		udelay(20);
401
402		/* Disable L1-Active */
403		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
404				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
405
406		/* Clear the interrupt in APMG if the NIC is in RFKILL */
407		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
408			       APMG_RTC_INT_STT_RFKILL);
409	}
410
411	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
412
413	return 0;
414}
415
416/*
417 * Enable LP XTAL to avoid HW bug where device may consume much power if
418 * FW is not loaded after device reset. LP XTAL is disabled by default
419 * after device HW reset. Do it only if XTAL is fed by internal source.
420 * Configure device's "persistence" mode to avoid resetting XTAL again when
421 * SHRD_HW_RST occurs in S3.
422 */
423static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
424{
425	int ret;
426	u32 apmg_gp1_reg;
427	u32 apmg_xtal_cfg_reg;
428	u32 dl_cfg_reg;
429
430	/* Force XTAL ON */
431	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
432				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
433
434	iwl_trans_pcie_sw_reset(trans);
435
436	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
437	if (WARN_ON(ret)) {
438		/* Release XTAL ON request */
439		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
440					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
441		return;
442	}
443
444	/*
445	 * Clear "disable persistence" to avoid LP XTAL resetting when
446	 * SHRD_HW_RST is applied in S3.
447	 */
448	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
449				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
450
451	/*
452	 * Force APMG XTAL to be active to prevent its disabling by HW
453	 * caused by APMG idle state.
454	 */
455	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
456						    SHR_APMG_XTAL_CFG_REG);
457	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
458				 apmg_xtal_cfg_reg |
459				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
460
461	iwl_trans_pcie_sw_reset(trans);
462
463	/* Enable LP XTAL by indirect access through CSR */
464	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
465	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
466				 SHR_APMG_GP1_WF_XTAL_LP_EN |
467				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
468
469	/* Clear delay line clock power up */
470	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
471	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
472				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
473
474	/*
475	 * Enable persistence mode to avoid LP XTAL resetting when
476	 * SHRD_HW_RST is applied in S3.
477	 */
478	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
479		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
480
481	/*
482	 * Clear "initialization complete" bit to move adapter from
483	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
484	 */
485	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
486
487	/* Activates XTAL resources monitor */
488	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
489				 CSR_MONITOR_XTAL_RESOURCES);
490
491	/* Release XTAL ON request */
492	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
493				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
494	udelay(10);
495
496	/* Release APMG XTAL */
497	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
498				 apmg_xtal_cfg_reg &
499				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
500}
501
502void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
503{
504	int ret;
505
506	/* stop device's busmaster DMA activity */
507	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
508
509	ret = iwl_poll_bit(trans, CSR_RESET,
510			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
511			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
512	if (ret < 0)
513		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
514
515	IWL_DEBUG_INFO(trans, "stop master\n");
516}
517
518static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
519{
520	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
521
522	if (op_mode_leave) {
523		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
524			iwl_pcie_apm_init(trans);
525
526		/* inform ME that we are leaving */
527		if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
528			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
529					  APMG_PCIDEV_STT_VAL_WAKE_ME);
530		else if (trans->trans_cfg->device_family >=
531			 IWL_DEVICE_FAMILY_8000) {
532			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
533				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
534			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
535				    CSR_HW_IF_CONFIG_REG_PREPARE |
536				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
537			mdelay(1);
538			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
539				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
540		}
541		mdelay(5);
542	}
543
544	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
545
546	/* Stop device's DMA activity */
547	iwl_pcie_apm_stop_master(trans);
548
549	if (trans->cfg->lp_xtal_workaround) {
550		iwl_pcie_apm_lp_xtal_enable(trans);
551		return;
552	}
553
554	iwl_trans_pcie_sw_reset(trans);
555
556	/*
557	 * Clear "initialization complete" bit to move adapter from
558	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
559	 */
560	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
561}
562
563static int iwl_pcie_nic_init(struct iwl_trans *trans)
564{
565	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
566	int ret;
567
568	/* nic_init */
569	spin_lock(&trans_pcie->irq_lock);
570	ret = iwl_pcie_apm_init(trans);
571	spin_unlock(&trans_pcie->irq_lock);
572
573	if (ret)
574		return ret;
575
576	iwl_pcie_set_pwr(trans, false);
577
578	iwl_op_mode_nic_config(trans->op_mode);
579
580	/* Allocate the RX queue, or reset if it is already allocated */
581	iwl_pcie_rx_init(trans);
582
583	/* Allocate or reset and init all Tx and Command queues */
584	if (iwl_pcie_tx_init(trans))
585		return -ENOMEM;
586
587	if (trans->trans_cfg->base_params->shadow_reg_enable) {
588		/* enable shadow regs in HW */
589		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
590		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
591	}
592
593	return 0;
594}
595
596#define HW_READY_TIMEOUT (50)
597
598/* Note: returns poll_bit return value, which is >= 0 if success */
599static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
600{
601	int ret;
602
603	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
604		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
605
606	/* See if we got it */
607	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
608			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
609			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
610			   HW_READY_TIMEOUT);
611
612	if (ret >= 0)
613		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
614
615	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
616	return ret;
617}
618
619/* Note: returns standard 0/-ERROR code */
620int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
621{
622	int ret;
623	int iter;
624
625	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
626
627	ret = iwl_pcie_set_hw_ready(trans);
628	/* If the card is ready, exit 0 */
629	if (ret >= 0)
630		return 0;
631
632	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
633		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
634	usleep_range(1000, 2000);
635
636	for (iter = 0; iter < 10; iter++) {
637		int t = 0;
638
639		/* If HW is not ready, prepare the conditions to check again */
640		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
641			    CSR_HW_IF_CONFIG_REG_PREPARE);
642
643		do {
644			ret = iwl_pcie_set_hw_ready(trans);
645			if (ret >= 0)
646				return 0;
647
648			usleep_range(200, 1000);
649			t += 200;
650		} while (t < 150000);
651		msleep(25);
652	}
653
654	IWL_ERR(trans, "Couldn't prepare the card\n");
655
656	return ret;
657}
658
659/*
660 * ucode
661 */
662static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
663					    u32 dst_addr, dma_addr_t phy_addr,
664					    u32 byte_cnt)
665{
666	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
667		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
668
669	iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
670		    dst_addr);
671
672	iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
673		    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
674
675	iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
676		    (iwl_get_dma_hi_addr(phy_addr)
677			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
678
679	iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
680		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
681		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
682		    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
683
684	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
685		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
686		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
687		    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
688}
689
690static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
691					u32 dst_addr, dma_addr_t phy_addr,
692					u32 byte_cnt)
693{
694	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
695	unsigned long flags;
696	int ret;
697
698	trans_pcie->ucode_write_complete = false;
699
700	if (!iwl_trans_grab_nic_access(trans, &flags))
701		return -EIO;
702
703	iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
704					byte_cnt);
705	iwl_trans_release_nic_access(trans, &flags);
706
707	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
708				 trans_pcie->ucode_write_complete, 5 * HZ);
709	if (!ret) {
710		IWL_ERR(trans, "Failed to load firmware chunk!\n");
711		iwl_trans_pcie_dump_regs(trans);
712		return -ETIMEDOUT;
713	}
714
715	return 0;
716}
717
718static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
719			    const struct fw_desc *section)
720{
721	u8 *v_addr;
722	dma_addr_t p_addr;
723	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
724	int ret = 0;
725
726	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
727		     section_num);
728
729	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
730				    GFP_KERNEL | __GFP_NOWARN);
731	if (!v_addr) {
732		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
733		chunk_sz = PAGE_SIZE;
734		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
735					    &p_addr, GFP_KERNEL);
736		if (!v_addr)
737			return -ENOMEM;
738	}
739
740	for (offset = 0; offset < section->len; offset += chunk_sz) {
741		u32 copy_size, dst_addr;
742		bool extended_addr = false;
743
744		copy_size = min_t(u32, chunk_sz, section->len - offset);
745		dst_addr = section->offset + offset;
746
747		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
748		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
749			extended_addr = true;
750
751		if (extended_addr)
752			iwl_set_bits_prph(trans, LMPM_CHICK,
753					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
754
755		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
756		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
757						   copy_size);
758
759		if (extended_addr)
760			iwl_clear_bits_prph(trans, LMPM_CHICK,
761					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
762
763		if (ret) {
764			IWL_ERR(trans,
765				"Could not load the [%d] uCode section\n",
766				section_num);
767			break;
768		}
769	}
770
771	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
772	return ret;
773}
774
775static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
776					   const struct fw_img *image,
777					   int cpu,
778					   int *first_ucode_section)
779{
780	int shift_param;
781	int i, ret = 0, sec_num = 0x1;
782	u32 val, last_read_idx = 0;
783
784	if (cpu == 1) {
785		shift_param = 0;
786		*first_ucode_section = 0;
787	} else {
788		shift_param = 16;
789		(*first_ucode_section)++;
790	}
791
792	for (i = *first_ucode_section; i < image->num_sec; i++) {
793		last_read_idx = i;
794
795		/*
796		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
797		 * CPU1 to CPU2.
798		 * PAGING_SEPARATOR_SECTION delimiter - separate between
799		 * CPU2 non paged to CPU2 paging sec.
800		 */
801		if (!image->sec[i].data ||
802		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
803		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
804			IWL_DEBUG_FW(trans,
805				     "Break since Data not valid or Empty section, sec = %d\n",
806				     i);
807			break;
808		}
809
810		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
811		if (ret)
812			return ret;
813
814		/* Notify ucode of loaded section number and status */
815		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
816		val = val | (sec_num << shift_param);
817		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
818
819		sec_num = (sec_num << 1) | 0x1;
820	}
821
822	*first_ucode_section = last_read_idx;
823
824	iwl_enable_interrupts(trans);
825
826	if (trans->trans_cfg->use_tfh) {
827		if (cpu == 1)
828			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
829				       0xFFFF);
830		else
831			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
832				       0xFFFFFFFF);
833	} else {
834		if (cpu == 1)
835			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
836					   0xFFFF);
837		else
838			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
839					   0xFFFFFFFF);
840	}
841
842	return 0;
843}
844
845static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
846				      const struct fw_img *image,
847				      int cpu,
848				      int *first_ucode_section)
849{
850	int i, ret = 0;
851	u32 last_read_idx = 0;
852
853	if (cpu == 1)
854		*first_ucode_section = 0;
855	else
856		(*first_ucode_section)++;
857
858	for (i = *first_ucode_section; i < image->num_sec; i++) {
859		last_read_idx = i;
860
861		/*
862		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
863		 * CPU1 to CPU2.
864		 * PAGING_SEPARATOR_SECTION delimiter - separate between
865		 * CPU2 non paged to CPU2 paging sec.
866		 */
867		if (!image->sec[i].data ||
868		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
869		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
870			IWL_DEBUG_FW(trans,
871				     "Break since Data not valid or Empty section, sec = %d\n",
872				     i);
873			break;
874		}
875
876		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
877		if (ret)
878			return ret;
879	}
880
881	*first_ucode_section = last_read_idx;
882
883	return 0;
884}
885
886static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
887{
888	enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
889	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
890		&trans->dbg.fw_mon_cfg[alloc_id];
891	struct iwl_dram_data *frag;
892
893	if (!iwl_trans_dbg_ini_valid(trans))
894		return;
895
896	if (le32_to_cpu(fw_mon_cfg->buf_location) ==
897	    IWL_FW_INI_LOCATION_SRAM_PATH) {
898		IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
899		/* set sram monitor by enabling bit 7 */
900		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
901			    CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
902
903		return;
904	}
905
906	if (le32_to_cpu(fw_mon_cfg->buf_location) !=
907	    IWL_FW_INI_LOCATION_DRAM_PATH ||
908	    !trans->dbg.fw_mon_ini[alloc_id].num_frags)
909		return;
910
911	frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
912
913	IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
914		     alloc_id);
915
916	iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
917			    frag->physical >> MON_BUFF_SHIFT_VER2);
918	iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
919			    (frag->physical + frag->size - 256) >>
920			    MON_BUFF_SHIFT_VER2);
921}
922
923void iwl_pcie_apply_destination(struct iwl_trans *trans)
924{
925	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
926	const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
927	int i;
928
929	if (iwl_trans_dbg_ini_valid(trans)) {
930		iwl_pcie_apply_destination_ini(trans);
931		return;
932	}
933
934	IWL_INFO(trans, "Applying debug destination %s\n",
935		 get_fw_dbg_mode_string(dest->monitor_mode));
936
937	if (dest->monitor_mode == EXTERNAL_MODE)
938		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
939	else
940		IWL_WARN(trans, "PCI should have external buffer debug\n");
941
942	for (i = 0; i < trans->dbg.n_dest_reg; i++) {
943		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
944		u32 val = le32_to_cpu(dest->reg_ops[i].val);
945
946		switch (dest->reg_ops[i].op) {
947		case CSR_ASSIGN:
948			iwl_write32(trans, addr, val);
949			break;
950		case CSR_SETBIT:
951			iwl_set_bit(trans, addr, BIT(val));
952			break;
953		case CSR_CLEARBIT:
954			iwl_clear_bit(trans, addr, BIT(val));
955			break;
956		case PRPH_ASSIGN:
957			iwl_write_prph(trans, addr, val);
958			break;
959		case PRPH_SETBIT:
960			iwl_set_bits_prph(trans, addr, BIT(val));
961			break;
962		case PRPH_CLEARBIT:
963			iwl_clear_bits_prph(trans, addr, BIT(val));
964			break;
965		case PRPH_BLOCKBIT:
966			if (iwl_read_prph(trans, addr) & BIT(val)) {
967				IWL_ERR(trans,
968					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
969					val, addr);
970				goto monitor;
971			}
972			break;
973		default:
974			IWL_ERR(trans, "FW debug - unknown OP %d\n",
975				dest->reg_ops[i].op);
976			break;
977		}
978	}
979
980monitor:
981	if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
982		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
983			       fw_mon->physical >> dest->base_shift);
984		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
985			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
986				       (fw_mon->physical + fw_mon->size -
987					256) >> dest->end_shift);
988		else
989			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
990				       (fw_mon->physical + fw_mon->size) >>
991				       dest->end_shift);
992	}
993}
994
995static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
996				const struct fw_img *image)
997{
998	int ret = 0;
999	int first_ucode_section;
1000
1001	IWL_DEBUG_FW(trans, "working with %s CPU\n",
1002		     image->is_dual_cpus ? "Dual" : "Single");
1003
1004	/* load to FW the binary non secured sections of CPU1 */
1005	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
1006	if (ret)
1007		return ret;
1008
1009	if (image->is_dual_cpus) {
1010		/* set CPU2 header address */
1011		iwl_write_prph(trans,
1012			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
1013			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1014
1015		/* load to FW the binary sections of CPU2 */
1016		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
1017						 &first_ucode_section);
1018		if (ret)
1019			return ret;
1020	}
1021
1022	if (iwl_pcie_dbg_on(trans))
1023		iwl_pcie_apply_destination(trans);
1024
1025	iwl_enable_interrupts(trans);
1026
1027	/* release CPU reset */
1028	iwl_write32(trans, CSR_RESET, 0);
1029
1030	return 0;
1031}
1032
1033static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1034					  const struct fw_img *image)
1035{
1036	int ret = 0;
1037	int first_ucode_section;
1038
1039	IWL_DEBUG_FW(trans, "working with %s CPU\n",
1040		     image->is_dual_cpus ? "Dual" : "Single");
1041
1042	if (iwl_pcie_dbg_on(trans))
1043		iwl_pcie_apply_destination(trans);
1044
1045	IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1046			iwl_read_prph(trans, WFPM_GP2));
1047
1048	/*
1049	 * Set default value. On resume reading the values that were
1050	 * zeored can provide debug data on the resume flow.
1051	 * This is for debugging only and has no functional impact.
1052	 */
1053	iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1054
1055	/* configure the ucode to be ready to get the secured image */
1056	/* release CPU reset */
1057	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1058
1059	/* load to FW the binary Secured sections of CPU1 */
1060	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1061					      &first_ucode_section);
1062	if (ret)
1063		return ret;
1064
1065	/* load to FW the binary sections of CPU2 */
1066	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1067					       &first_ucode_section);
1068}
1069
1070bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1071{
1072	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1073	bool hw_rfkill = iwl_is_rfkill_set(trans);
1074	bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1075	bool report;
1076
1077	if (hw_rfkill) {
1078		set_bit(STATUS_RFKILL_HW, &trans->status);
1079		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1080	} else {
1081		clear_bit(STATUS_RFKILL_HW, &trans->status);
1082		if (trans_pcie->opmode_down)
1083			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1084	}
1085
1086	report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1087
1088	if (prev != report)
1089		iwl_trans_pcie_rf_kill(trans, report);
1090
1091	return hw_rfkill;
1092}
1093
1094struct iwl_causes_list {
1095	u32 cause_num;
1096	u32 mask_reg;
1097	u8 addr;
1098};
1099
1100static struct iwl_causes_list causes_list[] = {
1101	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
1102	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
1103	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
1104	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
1105	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
1106	{MSIX_HW_INT_CAUSES_REG_WAKEUP,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
1107	{MSIX_HW_INT_CAUSES_REG_IML,            CSR_MSIX_HW_INT_MASK_AD, 0x12},
1108	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
1109	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
1110	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
1111	{MSIX_HW_INT_CAUSES_REG_SW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x29},
1112	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1113	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1114	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1115	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1116};
1117
1118static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1119{
1120	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1121	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1122	int i, arr_size = ARRAY_SIZE(causes_list);
1123	struct iwl_causes_list *causes = causes_list;
1124
1125	/*
1126	 * Access all non RX causes and map them to the default irq.
1127	 * In case we are missing at least one interrupt vector,
1128	 * the first interrupt vector will serve non-RX and FBQ causes.
1129	 */
1130	for (i = 0; i < arr_size; i++) {
1131		iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1132		iwl_clear_bit(trans, causes[i].mask_reg,
1133			      causes[i].cause_num);
1134	}
1135}
1136
1137static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1138{
1139	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1140	u32 offset =
1141		trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1142	u32 val, idx;
1143
1144	/*
1145	 * The first RX queue - fallback queue, which is designated for
1146	 * management frame, command responses etc, is always mapped to the
1147	 * first interrupt vector. The other RX queues are mapped to
1148	 * the other (N - 2) interrupt vectors.
1149	 */
1150	val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1151	for (idx = 1; idx < trans->num_rx_queues; idx++) {
1152		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1153			   MSIX_FH_INT_CAUSES_Q(idx - offset));
1154		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1155	}
1156	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1157
1158	val = MSIX_FH_INT_CAUSES_Q(0);
1159	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1160		val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1161	iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1162
1163	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1164		iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1165}
1166
1167void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1168{
1169	struct iwl_trans *trans = trans_pcie->trans;
1170
1171	if (!trans_pcie->msix_enabled) {
1172		if (trans->trans_cfg->mq_rx_supported &&
1173		    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1174			iwl_write_umac_prph(trans, UREG_CHICK,
1175					    UREG_CHICK_MSI_ENABLE);
1176		return;
1177	}
1178	/*
1179	 * The IVAR table needs to be configured again after reset,
1180	 * but if the device is disabled, we can't write to
1181	 * prph.
1182	 */
1183	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1184		iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1185
1186	/*
1187	 * Each cause from the causes list above and the RX causes is
1188	 * represented as a byte in the IVAR table. The first nibble
1189	 * represents the bound interrupt vector of the cause, the second
1190	 * represents no auto clear for this cause. This will be set if its
1191	 * interrupt vector is bound to serve other causes.
1192	 */
1193	iwl_pcie_map_rx_causes(trans);
1194
1195	iwl_pcie_map_non_rx_causes(trans);
1196}
1197
1198static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1199{
1200	struct iwl_trans *trans = trans_pcie->trans;
1201
1202	iwl_pcie_conf_msix_hw(trans_pcie);
1203
1204	if (!trans_pcie->msix_enabled)
1205		return;
1206
1207	trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1208	trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1209	trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1210	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1211}
1212
1213static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1214{
1215	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1216
1217	lockdep_assert_held(&trans_pcie->mutex);
1218
1219	if (trans_pcie->is_down)
1220		return;
1221
1222	trans_pcie->is_down = true;
1223
1224	/* tell the device to stop sending interrupts */
1225	iwl_disable_interrupts(trans);
1226
1227	/* device going down, Stop using ICT table */
1228	iwl_pcie_disable_ict(trans);
1229
1230	/*
1231	 * If a HW restart happens during firmware loading,
1232	 * then the firmware loading might call this function
1233	 * and later it might be called again due to the
1234	 * restart. So don't process again if the device is
1235	 * already dead.
1236	 */
1237	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1238		IWL_DEBUG_INFO(trans,
1239			       "DEVICE_ENABLED bit was set and is now cleared\n");
1240		iwl_pcie_tx_stop(trans);
1241		iwl_pcie_rx_stop(trans);
1242
1243		/* Power-down device's busmaster DMA clocks */
1244		if (!trans->cfg->apmg_not_supported) {
1245			iwl_write_prph(trans, APMG_CLK_DIS_REG,
1246				       APMG_CLK_VAL_DMA_CLK_RQT);
1247			udelay(5);
1248		}
1249	}
1250
1251	/* Make sure (redundant) we've released our request to stay awake */
1252	iwl_clear_bit(trans, CSR_GP_CNTRL,
1253		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1254
1255	/* Stop the device, and put it in low power state */
1256	iwl_pcie_apm_stop(trans, false);
1257
1258	iwl_trans_pcie_sw_reset(trans);
1259
1260	/*
1261	 * Upon stop, the IVAR table gets erased, so msi-x won't
1262	 * work. This causes a bug in RF-KILL flows, since the interrupt
1263	 * that enables radio won't fire on the correct irq, and the
1264	 * driver won't be able to handle the interrupt.
1265	 * Configure the IVAR table again after reset.
1266	 */
1267	iwl_pcie_conf_msix_hw(trans_pcie);
1268
1269	/*
1270	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1271	 * This is a bug in certain verions of the hardware.
1272	 * Certain devices also keep sending HW RF kill interrupt all
1273	 * the time, unless the interrupt is ACKed even if the interrupt
1274	 * should be masked. Re-ACK all the interrupts here.
1275	 */
1276	iwl_disable_interrupts(trans);
1277
1278	/* clear all status bits */
1279	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1280	clear_bit(STATUS_INT_ENABLED, &trans->status);
1281	clear_bit(STATUS_TPOWER_PMI, &trans->status);
1282
1283	/*
1284	 * Even if we stop the HW, we still want the RF kill
1285	 * interrupt
1286	 */
1287	iwl_enable_rfkill_int(trans);
1288
1289	/* re-take ownership to prevent other users from stealing the device */
1290	iwl_pcie_prepare_card_hw(trans);
1291}
1292
1293void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1294{
1295	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1296
1297	if (trans_pcie->msix_enabled) {
1298		int i;
1299
1300		for (i = 0; i < trans_pcie->alloc_vecs; i++)
1301			synchronize_irq(trans_pcie->msix_entries[i].vector);
1302	} else {
1303		synchronize_irq(trans_pcie->pci_dev->irq);
1304	}
1305}
1306
1307static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1308				   const struct fw_img *fw, bool run_in_rfkill)
1309{
1310	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1311	bool hw_rfkill;
1312	int ret;
1313
1314	/* This may fail if AMT took ownership of the device */
1315	if (iwl_pcie_prepare_card_hw(trans)) {
1316		IWL_WARN(trans, "Exit HW not ready\n");
1317		return -EIO;
1318	}
1319
1320	iwl_enable_rfkill_int(trans);
1321
1322	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1323
1324	/*
1325	 * We enabled the RF-Kill interrupt and the handler may very
1326	 * well be running. Disable the interrupts to make sure no other
1327	 * interrupt can be fired.
1328	 */
1329	iwl_disable_interrupts(trans);
1330
1331	/* Make sure it finished running */
1332	iwl_pcie_synchronize_irqs(trans);
1333
1334	mutex_lock(&trans_pcie->mutex);
1335
1336	/* If platform's RF_KILL switch is NOT set to KILL */
1337	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1338	if (hw_rfkill && !run_in_rfkill) {
1339		ret = -ERFKILL;
1340		goto out;
1341	}
1342
1343	/* Someone called stop_device, don't try to start_fw */
1344	if (trans_pcie->is_down) {
1345		IWL_WARN(trans,
1346			 "Can't start_fw since the HW hasn't been started\n");
1347		ret = -EIO;
1348		goto out;
1349	}
1350
1351	/* make sure rfkill handshake bits are cleared */
1352	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1353	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1354		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1355
1356	/* clear (again), then enable host interrupts */
1357	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1358
1359	ret = iwl_pcie_nic_init(trans);
1360	if (ret) {
1361		IWL_ERR(trans, "Unable to init nic\n");
1362		goto out;
1363	}
1364
1365	/*
1366	 * Now, we load the firmware and don't want to be interrupted, even
1367	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1368	 * FH_TX interrupt which is needed to load the firmware). If the
1369	 * RF-Kill switch is toggled, we will find out after having loaded
1370	 * the firmware and return the proper value to the caller.
1371	 */
1372	iwl_enable_fw_load_int(trans);
1373
1374	/* really make sure rfkill handshake bits are cleared */
1375	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1376	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1377
1378	/* Load the given image to the HW */
1379	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1380		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1381	else
1382		ret = iwl_pcie_load_given_ucode(trans, fw);
1383
1384	/* re-check RF-Kill state since we may have missed the interrupt */
1385	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1386	if (hw_rfkill && !run_in_rfkill)
1387		ret = -ERFKILL;
1388
1389out:
1390	mutex_unlock(&trans_pcie->mutex);
1391	return ret;
1392}
1393
1394static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1395{
1396	iwl_pcie_reset_ict(trans);
1397	iwl_pcie_tx_start(trans, scd_addr);
1398}
1399
1400void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1401				       bool was_in_rfkill)
1402{
1403	bool hw_rfkill;
1404
1405	/*
1406	 * Check again since the RF kill state may have changed while
1407	 * all the interrupts were disabled, in this case we couldn't
1408	 * receive the RF kill interrupt and update the state in the
1409	 * op_mode.
1410	 * Don't call the op_mode if the rkfill state hasn't changed.
1411	 * This allows the op_mode to call stop_device from the rfkill
1412	 * notification without endless recursion. Under very rare
1413	 * circumstances, we might have a small recursion if the rfkill
1414	 * state changed exactly now while we were called from stop_device.
1415	 * This is very unlikely but can happen and is supported.
1416	 */
1417	hw_rfkill = iwl_is_rfkill_set(trans);
1418	if (hw_rfkill) {
1419		set_bit(STATUS_RFKILL_HW, &trans->status);
1420		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1421	} else {
1422		clear_bit(STATUS_RFKILL_HW, &trans->status);
1423		clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1424	}
1425	if (hw_rfkill != was_in_rfkill)
1426		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1427}
1428
1429static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1430{
1431	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1432	bool was_in_rfkill;
1433
1434	mutex_lock(&trans_pcie->mutex);
1435	trans_pcie->opmode_down = true;
1436	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1437	_iwl_trans_pcie_stop_device(trans);
1438	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1439	mutex_unlock(&trans_pcie->mutex);
1440}
1441
1442void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1443{
1444	struct iwl_trans_pcie __maybe_unused *trans_pcie =
1445		IWL_TRANS_GET_PCIE_TRANS(trans);
1446
1447	lockdep_assert_held(&trans_pcie->mutex);
1448
1449	IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1450		 state ? "disabled" : "enabled");
1451	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1452		if (trans->trans_cfg->gen2)
1453			_iwl_trans_pcie_gen2_stop_device(trans);
1454		else
1455			_iwl_trans_pcie_stop_device(trans);
1456	}
1457}
1458
1459void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1460				  bool test, bool reset)
1461{
1462	iwl_disable_interrupts(trans);
1463
1464	/*
1465	 * in testing mode, the host stays awake and the
1466	 * hardware won't be reset (not even partially)
1467	 */
1468	if (test)
1469		return;
1470
1471	iwl_pcie_disable_ict(trans);
1472
1473	iwl_pcie_synchronize_irqs(trans);
1474
1475	iwl_clear_bit(trans, CSR_GP_CNTRL,
1476		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1477	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1478
1479	if (reset) {
1480		/*
1481		 * reset TX queues -- some of their registers reset during S3
1482		 * so if we don't reset everything here the D3 image would try
1483		 * to execute some invalid memory upon resume
1484		 */
1485		iwl_trans_pcie_tx_reset(trans);
1486	}
1487
1488	iwl_pcie_set_pwr(trans, true);
1489}
1490
1491static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1492				     bool reset)
1493{
1494	int ret;
1495	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1496
1497	if (!reset)
1498		/* Enable persistence mode to avoid reset */
1499		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1500			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1501
1502	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1503		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1504				    UREG_DOORBELL_TO_ISR6_SUSPEND);
1505
1506		ret = wait_event_timeout(trans_pcie->sx_waitq,
1507					 trans_pcie->sx_complete, 2 * HZ);
1508		/*
1509		 * Invalidate it toward resume.
1510		 */
1511		trans_pcie->sx_complete = false;
1512
1513		if (!ret) {
1514			IWL_ERR(trans, "Timeout entering D3\n");
1515			return -ETIMEDOUT;
1516		}
1517	}
1518	iwl_pcie_d3_complete_suspend(trans, test, reset);
1519
1520	return 0;
1521}
1522
1523static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1524				    enum iwl_d3_status *status,
1525				    bool test,  bool reset)
1526{
1527	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1528	u32 val;
1529	int ret;
1530
1531	if (test) {
1532		iwl_enable_interrupts(trans);
1533		*status = IWL_D3_STATUS_ALIVE;
1534		goto out;
1535	}
1536
1537	iwl_set_bit(trans, CSR_GP_CNTRL,
1538		    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1539
1540	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1541	if (ret)
1542		return ret;
1543
1544	/*
1545	 * Reconfigure IVAR table in case of MSIX or reset ict table in
1546	 * MSI mode since HW reset erased it.
1547	 * Also enables interrupts - none will happen as
1548	 * the device doesn't know we're waking it up, only when
1549	 * the opmode actually tells it after this call.
1550	 */
1551	iwl_pcie_conf_msix_hw(trans_pcie);
1552	if (!trans_pcie->msix_enabled)
1553		iwl_pcie_reset_ict(trans);
1554	iwl_enable_interrupts(trans);
1555
1556	iwl_pcie_set_pwr(trans, false);
1557
1558	if (!reset) {
1559		iwl_clear_bit(trans, CSR_GP_CNTRL,
1560			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1561	} else {
1562		iwl_trans_pcie_tx_reset(trans);
1563
1564		ret = iwl_pcie_rx_init(trans);
1565		if (ret) {
1566			IWL_ERR(trans,
1567				"Failed to resume the device (RX reset)\n");
1568			return ret;
1569		}
1570	}
1571
1572	IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1573			iwl_read_umac_prph(trans, WFPM_GP2));
1574
1575	val = iwl_read32(trans, CSR_RESET);
1576	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1577		*status = IWL_D3_STATUS_RESET;
1578	else
1579		*status = IWL_D3_STATUS_ALIVE;
1580
1581out:
1582	if (*status == IWL_D3_STATUS_ALIVE &&
1583	    trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1584		trans_pcie->sx_complete = false;
1585		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1586				    UREG_DOORBELL_TO_ISR6_RESUME);
1587
1588		ret = wait_event_timeout(trans_pcie->sx_waitq,
1589					 trans_pcie->sx_complete, 2 * HZ);
1590		/*
1591		 * Invalidate it toward next suspend.
1592		 */
1593		trans_pcie->sx_complete = false;
1594
1595		if (!ret) {
1596			IWL_ERR(trans, "Timeout exiting D3\n");
1597			return -ETIMEDOUT;
1598		}
1599	}
1600	return 0;
1601}
1602
1603static void
1604iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1605			    struct iwl_trans *trans,
1606			    const struct iwl_cfg_trans_params *cfg_trans)
1607{
1608	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1609	int max_irqs, num_irqs, i, ret;
1610	u16 pci_cmd;
1611	u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1612
1613	if (!cfg_trans->mq_rx_supported)
1614		goto enable_msi;
1615
1616	if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1617		max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1618
1619	max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1620	for (i = 0; i < max_irqs; i++)
1621		trans_pcie->msix_entries[i].entry = i;
1622
1623	num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1624					 MSIX_MIN_INTERRUPT_VECTORS,
1625					 max_irqs);
1626	if (num_irqs < 0) {
1627		IWL_DEBUG_INFO(trans,
1628			       "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1629			       num_irqs);
1630		goto enable_msi;
1631	}
1632	trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1633
1634	IWL_DEBUG_INFO(trans,
1635		       "MSI-X enabled. %d interrupt vectors were allocated\n",
1636		       num_irqs);
1637
1638	/*
1639	 * In case the OS provides fewer interrupts than requested, different
1640	 * causes will share the same interrupt vector as follows:
1641	 * One interrupt less: non rx causes shared with FBQ.
1642	 * Two interrupts less: non rx causes shared with FBQ and RSS.
1643	 * More than two interrupts: we will use fewer RSS queues.
1644	 */
1645	if (num_irqs <= max_irqs - 2) {
1646		trans_pcie->trans->num_rx_queues = num_irqs + 1;
1647		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1648			IWL_SHARED_IRQ_FIRST_RSS;
1649	} else if (num_irqs == max_irqs - 1) {
1650		trans_pcie->trans->num_rx_queues = num_irqs;
1651		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1652	} else {
1653		trans_pcie->trans->num_rx_queues = num_irqs - 1;
1654	}
1655	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1656
1657	trans_pcie->alloc_vecs = num_irqs;
1658	trans_pcie->msix_enabled = true;
1659	return;
1660
1661enable_msi:
1662	ret = pci_enable_msi(pdev);
1663	if (ret) {
1664		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1665		/* enable rfkill interrupt: hw bug w/a */
1666		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1667		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1668			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1669			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1670		}
1671	}
1672}
1673
1674static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1675{
1676	int iter_rx_q, i, ret, cpu, offset;
1677	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1678
1679	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1680	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1681	offset = 1 + i;
1682	for (; i < iter_rx_q ; i++) {
1683		/*
1684		 * Get the cpu prior to the place to search
1685		 * (i.e. return will be > i - 1).
1686		 */
1687		cpu = cpumask_next(i - offset, cpu_online_mask);
1688		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1689		ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1690					    &trans_pcie->affinity_mask[i]);
1691		if (ret)
1692			IWL_ERR(trans_pcie->trans,
1693				"Failed to set affinity mask for IRQ %d\n",
1694				i);
1695	}
1696}
1697
1698static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1699				      struct iwl_trans_pcie *trans_pcie)
1700{
1701	int i;
1702
1703	for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1704		int ret;
1705		struct msix_entry *msix_entry;
1706		const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1707
1708		if (!qname)
1709			return -ENOMEM;
1710
1711		msix_entry = &trans_pcie->msix_entries[i];
1712		ret = devm_request_threaded_irq(&pdev->dev,
1713						msix_entry->vector,
1714						iwl_pcie_msix_isr,
1715						(i == trans_pcie->def_irq) ?
1716						iwl_pcie_irq_msix_handler :
1717						iwl_pcie_irq_rx_msix_handler,
1718						IRQF_SHARED,
1719						qname,
1720						msix_entry);
1721		if (ret) {
1722			IWL_ERR(trans_pcie->trans,
1723				"Error allocating IRQ %d\n", i);
1724
1725			return ret;
1726		}
1727	}
1728	iwl_pcie_irq_set_affinity(trans_pcie->trans);
1729
1730	return 0;
1731}
1732
1733static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1734{
1735	u32 hpm, wprot;
1736
1737	switch (trans->trans_cfg->device_family) {
1738	case IWL_DEVICE_FAMILY_9000:
1739		wprot = PREG_PRPH_WPROT_9000;
1740		break;
1741	case IWL_DEVICE_FAMILY_22000:
1742		wprot = PREG_PRPH_WPROT_22000;
1743		break;
1744	default:
1745		return 0;
1746	}
1747
1748	hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1749	if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1750		u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1751
1752		if (wprot_val & PREG_WFPM_ACCESS) {
1753			IWL_ERR(trans,
1754				"Error, can not clear persistence bit\n");
1755			return -EPERM;
1756		}
1757		iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1758					    hpm & ~PERSISTENCE_BIT);
1759	}
1760
1761	return 0;
1762}
1763
1764static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1765{
1766	int ret;
1767
1768	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1769	if (ret < 0)
1770		return ret;
1771
1772	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1773			  HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1774	udelay(20);
1775	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1776			  HPM_HIPM_GEN_CFG_CR_PG_EN |
1777			  HPM_HIPM_GEN_CFG_CR_SLP_EN);
1778	udelay(20);
1779	iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1780			    HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1781
1782	iwl_trans_pcie_sw_reset(trans);
1783
1784	return 0;
1785}
1786
1787static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1788{
1789	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1790	int err;
1791
1792	lockdep_assert_held(&trans_pcie->mutex);
1793
1794	err = iwl_pcie_prepare_card_hw(trans);
1795	if (err) {
1796		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1797		return err;
1798	}
1799
1800	err = iwl_trans_pcie_clear_persistence_bit(trans);
1801	if (err)
1802		return err;
1803
1804	iwl_trans_pcie_sw_reset(trans);
1805
1806	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1807	    trans->trans_cfg->integrated) {
1808		err = iwl_pcie_gen2_force_power_gating(trans);
1809		if (err)
1810			return err;
1811	}
1812
1813	err = iwl_pcie_apm_init(trans);
1814	if (err)
1815		return err;
1816
1817	iwl_pcie_init_msix(trans_pcie);
1818
1819	/* From now on, the op_mode will be kept updated about RF kill state */
1820	iwl_enable_rfkill_int(trans);
1821
1822	trans_pcie->opmode_down = false;
1823
1824	/* Set is_down to false here so that...*/
1825	trans_pcie->is_down = false;
1826
1827	/* ...rfkill can call stop_device and set it false if needed */
1828	iwl_pcie_check_hw_rf_kill(trans);
1829
1830	return 0;
1831}
1832
1833static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1834{
1835	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1836	int ret;
1837
1838	mutex_lock(&trans_pcie->mutex);
1839	ret = _iwl_trans_pcie_start_hw(trans);
1840	mutex_unlock(&trans_pcie->mutex);
1841
1842	return ret;
1843}
1844
1845static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1846{
1847	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1848
1849	mutex_lock(&trans_pcie->mutex);
1850
1851	/* disable interrupts - don't enable HW RF kill interrupt */
1852	iwl_disable_interrupts(trans);
1853
1854	iwl_pcie_apm_stop(trans, true);
1855
1856	iwl_disable_interrupts(trans);
1857
1858	iwl_pcie_disable_ict(trans);
1859
1860	mutex_unlock(&trans_pcie->mutex);
1861
1862	iwl_pcie_synchronize_irqs(trans);
1863}
1864
1865static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1866{
1867	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1868}
1869
1870static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1871{
1872	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1873}
1874
1875static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1876{
1877	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1878}
1879
1880static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1881{
1882	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1883		return 0x00FFFFFF;
1884	else
1885		return 0x000FFFFF;
1886}
1887
1888static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1889{
1890	u32 mask = iwl_trans_pcie_prph_msk(trans);
1891
1892	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1893			       ((reg & mask) | (3 << 24)));
1894	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1895}
1896
1897static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1898				      u32 val)
1899{
1900	u32 mask = iwl_trans_pcie_prph_msk(trans);
1901
1902	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1903			       ((addr & mask) | (3 << 24)));
1904	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1905}
1906
1907static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1908				     const struct iwl_trans_config *trans_cfg)
1909{
1910	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1911
1912	/* free all first - we might be reconfigured for a different size */
1913	iwl_pcie_free_rbs_pool(trans);
1914
1915	trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1916	trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1917	trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1918	trans->txqs.page_offs = trans_cfg->cb_data_offs;
1919	trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1920
1921	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1922		trans_pcie->n_no_reclaim_cmds = 0;
1923	else
1924		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1925	if (trans_pcie->n_no_reclaim_cmds)
1926		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1927		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1928
1929	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1930	trans_pcie->rx_page_order =
1931		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1932	trans_pcie->rx_buf_bytes =
1933		iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1934	trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1935	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1936		trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1937
1938	trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1939	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1940	trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
1941
1942	trans->command_groups = trans_cfg->command_groups;
1943	trans->command_groups_size = trans_cfg->command_groups_size;
1944
1945	/* Initialize NAPI here - it should be before registering to mac80211
1946	 * in the opmode but after the HW struct is allocated.
1947	 * As this function may be called again in some corner cases don't
1948	 * do anything if NAPI was already initialized.
1949	 */
1950	if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1951		init_dummy_netdev(&trans_pcie->napi_dev);
1952}
1953
1954void iwl_trans_pcie_free(struct iwl_trans *trans)
1955{
1956	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1957	int i;
1958
1959	iwl_pcie_synchronize_irqs(trans);
1960
1961	if (trans->trans_cfg->gen2)
1962		iwl_txq_gen2_tx_free(trans);
1963	else
1964		iwl_pcie_tx_free(trans);
1965	iwl_pcie_rx_free(trans);
1966
1967	if (trans_pcie->rba.alloc_wq) {
1968		destroy_workqueue(trans_pcie->rba.alloc_wq);
1969		trans_pcie->rba.alloc_wq = NULL;
1970	}
1971
1972	if (trans_pcie->msix_enabled) {
1973		for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1974			irq_set_affinity_hint(
1975				trans_pcie->msix_entries[i].vector,
1976				NULL);
1977		}
1978
1979		trans_pcie->msix_enabled = false;
1980	} else {
1981		iwl_pcie_free_ict(trans);
1982	}
1983
1984	iwl_pcie_free_fw_monitor(trans);
1985
1986	if (trans_pcie->pnvm_dram.size)
1987		dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
1988				  trans_pcie->pnvm_dram.block,
1989				  trans_pcie->pnvm_dram.physical);
1990
1991	mutex_destroy(&trans_pcie->mutex);
1992	iwl_trans_free(trans);
1993}
1994
1995static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1996{
1997	if (state)
1998		set_bit(STATUS_TPOWER_PMI, &trans->status);
1999	else
2000		clear_bit(STATUS_TPOWER_PMI, &trans->status);
2001}
2002
2003struct iwl_trans_pcie_removal {
2004	struct pci_dev *pdev;
2005	struct work_struct work;
2006};
2007
2008static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
2009{
2010	struct iwl_trans_pcie_removal *removal =
2011		container_of(wk, struct iwl_trans_pcie_removal, work);
2012	struct pci_dev *pdev = removal->pdev;
2013	static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
2014
2015	dev_err(&pdev->dev, "Device gone - attempting removal\n");
2016	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2017	pci_lock_rescan_remove();
2018	pci_dev_put(pdev);
2019	pci_stop_and_remove_bus_device(pdev);
2020	pci_unlock_rescan_remove();
2021
2022	kfree(removal);
2023	module_put(THIS_MODULE);
2024}
2025
2026static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
2027					   unsigned long *flags)
2028{
2029	int ret;
2030	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2031
2032	spin_lock_bh(&trans_pcie->reg_lock);
2033
2034	if (trans_pcie->cmd_hold_nic_awake)
2035		goto out;
2036
2037	/* this bit wakes up the NIC */
2038	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
2039				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2040	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2041		udelay(2);
2042
2043	/*
2044	 * These bits say the device is running, and should keep running for
2045	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2046	 * but they do not indicate that embedded SRAM is restored yet;
2047	 * HW with volatile SRAM must save/restore contents to/from
2048	 * host DRAM when sleeping/waking for power-saving.
2049	 * Each direction takes approximately 1/4 millisecond; with this
2050	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2051	 * series of register accesses are expected (e.g. reading Event Log),
2052	 * to keep device from sleeping.
2053	 *
2054	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2055	 * SRAM is okay/restored.  We don't check that here because this call
2056	 * is just for hardware register access; but GP1 MAC_SLEEP
2057	 * check is a good idea before accessing the SRAM of HW with
2058	 * volatile SRAM (e.g. reading Event Log).
2059	 *
2060	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2061	 * and do not save/restore SRAM when power cycling.
2062	 */
2063	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2064			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2065			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2066			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2067	if (unlikely(ret < 0)) {
2068		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2069
2070		WARN_ONCE(1,
2071			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2072			  cntrl);
2073
2074		iwl_trans_pcie_dump_regs(trans);
2075
2076		if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2077			struct iwl_trans_pcie_removal *removal;
2078
2079			if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2080				goto err;
2081
2082			IWL_ERR(trans, "Device gone - scheduling removal!\n");
2083
2084			/*
2085			 * get a module reference to avoid doing this
2086			 * while unloading anyway and to avoid
2087			 * scheduling a work with code that's being
2088			 * removed.
2089			 */
2090			if (!try_module_get(THIS_MODULE)) {
2091				IWL_ERR(trans,
2092					"Module is being unloaded - abort\n");
2093				goto err;
2094			}
2095
2096			removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2097			if (!removal) {
2098				module_put(THIS_MODULE);
2099				goto err;
2100			}
2101			/*
2102			 * we don't need to clear this flag, because
2103			 * the trans will be freed and reallocated.
2104			*/
2105			set_bit(STATUS_TRANS_DEAD, &trans->status);
2106
2107			removal->pdev = to_pci_dev(trans->dev);
2108			INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2109			pci_dev_get(removal->pdev);
2110			schedule_work(&removal->work);
2111		} else {
2112			iwl_write32(trans, CSR_RESET,
2113				    CSR_RESET_REG_FLAG_FORCE_NMI);
2114		}
2115
2116err:
2117		spin_unlock_bh(&trans_pcie->reg_lock);
2118		return false;
2119	}
2120
2121out:
2122	/*
2123	 * Fool sparse by faking we release the lock - sparse will
2124	 * track nic_access anyway.
2125	 */
2126	__release(&trans_pcie->reg_lock);
2127	return true;
2128}
2129
2130static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
2131					      unsigned long *flags)
2132{
2133	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2134
2135	lockdep_assert_held(&trans_pcie->reg_lock);
2136
2137	/*
2138	 * Fool sparse by faking we acquiring the lock - sparse will
2139	 * track nic_access anyway.
2140	 */
2141	__acquire(&trans_pcie->reg_lock);
2142
2143	if (trans_pcie->cmd_hold_nic_awake)
2144		goto out;
2145
2146	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2147				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2148	/*
2149	 * Above we read the CSR_GP_CNTRL register, which will flush
2150	 * any previous writes, but we need the write that clears the
2151	 * MAC_ACCESS_REQ bit to be performed before any other writes
2152	 * scheduled on different CPUs (after we drop reg_lock).
2153	 */
2154out:
2155	spin_unlock_bh(&trans_pcie->reg_lock);
2156}
2157
2158static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2159				   void *buf, int dwords)
2160{
2161	unsigned long flags;
2162	int offs = 0;
2163	u32 *vals = buf;
2164
2165	while (offs < dwords) {
2166		/* limit the time we spin here under lock to 1/2s */
2167		unsigned long end = jiffies + HZ / 2;
2168		bool resched = false;
2169
2170		if (iwl_trans_grab_nic_access(trans, &flags)) {
2171			iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2172				    addr + 4 * offs);
2173
2174			while (offs < dwords) {
2175				vals[offs] = iwl_read32(trans,
2176							HBUS_TARG_MEM_RDAT);
2177				offs++;
2178
2179				if (time_after(jiffies, end)) {
2180					resched = true;
2181					break;
2182				}
2183			}
2184			iwl_trans_release_nic_access(trans, &flags);
2185
2186			if (resched)
2187				cond_resched();
2188		} else {
2189			return -EBUSY;
2190		}
2191	}
2192
2193	return 0;
2194}
2195
2196static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2197				    const void *buf, int dwords)
2198{
2199	unsigned long flags;
2200	int offs, ret = 0;
2201	const u32 *vals = buf;
2202
2203	if (iwl_trans_grab_nic_access(trans, &flags)) {
2204		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2205		for (offs = 0; offs < dwords; offs++)
2206			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2207				    vals ? vals[offs] : 0);
2208		iwl_trans_release_nic_access(trans, &flags);
2209	} else {
2210		ret = -EBUSY;
2211	}
2212	return ret;
2213}
2214
2215static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2216					u32 *val)
2217{
2218	return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2219				     ofs, val);
2220}
2221
2222static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
2223					    unsigned long txqs,
2224					    bool freeze)
2225{
2226	int queue;
2227
2228	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2229		struct iwl_txq *txq = trans->txqs.txq[queue];
2230		unsigned long now;
2231
2232		spin_lock_bh(&txq->lock);
2233
2234		now = jiffies;
2235
2236		if (txq->frozen == freeze)
2237			goto next_queue;
2238
2239		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2240				    freeze ? "Freezing" : "Waking", queue);
2241
2242		txq->frozen = freeze;
2243
2244		if (txq->read_ptr == txq->write_ptr)
2245			goto next_queue;
2246
2247		if (freeze) {
2248			if (unlikely(time_after(now,
2249						txq->stuck_timer.expires))) {
2250				/*
2251				 * The timer should have fired, maybe it is
2252				 * spinning right now on the lock.
2253				 */
2254				goto next_queue;
2255			}
2256			/* remember how long until the timer fires */
2257			txq->frozen_expiry_remainder =
2258				txq->stuck_timer.expires - now;
2259			del_timer(&txq->stuck_timer);
2260			goto next_queue;
2261		}
2262
2263		/*
2264		 * Wake a non-empty queue -> arm timer with the
2265		 * remainder before it froze
2266		 */
2267		mod_timer(&txq->stuck_timer,
2268			  now + txq->frozen_expiry_remainder);
2269
2270next_queue:
2271		spin_unlock_bh(&txq->lock);
2272	}
2273}
2274
2275static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2276{
2277	int i;
2278
2279	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2280		struct iwl_txq *txq = trans->txqs.txq[i];
2281
2282		if (i == trans->txqs.cmd.q_id)
2283			continue;
2284
2285		spin_lock_bh(&txq->lock);
2286
2287		if (!block && !(WARN_ON_ONCE(!txq->block))) {
2288			txq->block--;
2289			if (!txq->block) {
2290				iwl_write32(trans, HBUS_TARG_WRPTR,
2291					    txq->write_ptr | (i << 8));
2292			}
2293		} else if (block) {
2294			txq->block++;
2295		}
2296
2297		spin_unlock_bh(&txq->lock);
2298	}
2299}
2300
2301#define IWL_FLUSH_WAIT_MS	2000
2302
2303static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2304				       struct iwl_trans_rxq_dma_data *data)
2305{
2306	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2307
2308	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2309		return -EINVAL;
2310
2311	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2312	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2313	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2314	data->fr_bd_wid = 0;
2315
2316	return 0;
2317}
2318
2319static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2320{
2321	struct iwl_txq *txq;
2322	unsigned long now = jiffies;
2323	bool overflow_tx;
2324	u8 wr_ptr;
2325
2326	/* Make sure the NIC is still alive in the bus */
2327	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2328		return -ENODEV;
2329
2330	if (!test_bit(txq_idx, trans->txqs.queue_used))
2331		return -EINVAL;
2332
2333	IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2334	txq = trans->txqs.txq[txq_idx];
2335
2336	spin_lock_bh(&txq->lock);
2337	overflow_tx = txq->overflow_tx ||
2338		      !skb_queue_empty(&txq->overflow_q);
2339	spin_unlock_bh(&txq->lock);
2340
2341	wr_ptr = READ_ONCE(txq->write_ptr);
2342
2343	while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2344		overflow_tx) &&
2345	       !time_after(jiffies,
2346			   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2347		u8 write_ptr = READ_ONCE(txq->write_ptr);
2348
2349		/*
2350		 * If write pointer moved during the wait, warn only
2351		 * if the TX came from op mode. In case TX came from
2352		 * trans layer (overflow TX) don't warn.
2353		 */
2354		if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2355			      "WR pointer moved while flushing %d -> %d\n",
2356			      wr_ptr, write_ptr))
2357			return -ETIMEDOUT;
2358		wr_ptr = write_ptr;
2359
2360		usleep_range(1000, 2000);
2361
2362		spin_lock_bh(&txq->lock);
2363		overflow_tx = txq->overflow_tx ||
2364			      !skb_queue_empty(&txq->overflow_q);
2365		spin_unlock_bh(&txq->lock);
2366	}
2367
2368	if (txq->read_ptr != txq->write_ptr) {
2369		IWL_ERR(trans,
2370			"fail to flush all tx fifo queues Q %d\n", txq_idx);
2371		iwl_txq_log_scd_error(trans, txq);
2372		return -ETIMEDOUT;
2373	}
2374
2375	IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2376
2377	return 0;
2378}
2379
2380static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2381{
2382	int cnt;
2383	int ret = 0;
2384
2385	/* waiting for all the tx frames complete might take a while */
2386	for (cnt = 0;
2387	     cnt < trans->trans_cfg->base_params->num_of_queues;
2388	     cnt++) {
2389
2390		if (cnt == trans->txqs.cmd.q_id)
2391			continue;
2392		if (!test_bit(cnt, trans->txqs.queue_used))
2393			continue;
2394		if (!(BIT(cnt) & txq_bm))
2395			continue;
2396
2397		ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2398		if (ret)
2399			break;
2400	}
2401
2402	return ret;
2403}
2404
2405static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2406					 u32 mask, u32 value)
2407{
2408	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2409
2410	spin_lock_bh(&trans_pcie->reg_lock);
2411	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2412	spin_unlock_bh(&trans_pcie->reg_lock);
2413}
2414
2415static const char *get_csr_string(int cmd)
2416{
2417#define IWL_CMD(x) case x: return #x
2418	switch (cmd) {
2419	IWL_CMD(CSR_HW_IF_CONFIG_REG);
2420	IWL_CMD(CSR_INT_COALESCING);
2421	IWL_CMD(CSR_INT);
2422	IWL_CMD(CSR_INT_MASK);
2423	IWL_CMD(CSR_FH_INT_STATUS);
2424	IWL_CMD(CSR_GPIO_IN);
2425	IWL_CMD(CSR_RESET);
2426	IWL_CMD(CSR_GP_CNTRL);
2427	IWL_CMD(CSR_HW_REV);
2428	IWL_CMD(CSR_EEPROM_REG);
2429	IWL_CMD(CSR_EEPROM_GP);
2430	IWL_CMD(CSR_OTP_GP_REG);
2431	IWL_CMD(CSR_GIO_REG);
2432	IWL_CMD(CSR_GP_UCODE_REG);
2433	IWL_CMD(CSR_GP_DRIVER_REG);
2434	IWL_CMD(CSR_UCODE_DRV_GP1);
2435	IWL_CMD(CSR_UCODE_DRV_GP2);
2436	IWL_CMD(CSR_LED_REG);
2437	IWL_CMD(CSR_DRAM_INT_TBL_REG);
2438	IWL_CMD(CSR_GIO_CHICKEN_BITS);
2439	IWL_CMD(CSR_ANA_PLL_CFG);
2440	IWL_CMD(CSR_HW_REV_WA_REG);
2441	IWL_CMD(CSR_MONITOR_STATUS_REG);
2442	IWL_CMD(CSR_DBG_HPET_MEM_REG);
2443	default:
2444		return "UNKNOWN";
2445	}
2446#undef IWL_CMD
2447}
2448
2449void iwl_pcie_dump_csr(struct iwl_trans *trans)
2450{
2451	int i;
2452	static const u32 csr_tbl[] = {
2453		CSR_HW_IF_CONFIG_REG,
2454		CSR_INT_COALESCING,
2455		CSR_INT,
2456		CSR_INT_MASK,
2457		CSR_FH_INT_STATUS,
2458		CSR_GPIO_IN,
2459		CSR_RESET,
2460		CSR_GP_CNTRL,
2461		CSR_HW_REV,
2462		CSR_EEPROM_REG,
2463		CSR_EEPROM_GP,
2464		CSR_OTP_GP_REG,
2465		CSR_GIO_REG,
2466		CSR_GP_UCODE_REG,
2467		CSR_GP_DRIVER_REG,
2468		CSR_UCODE_DRV_GP1,
2469		CSR_UCODE_DRV_GP2,
2470		CSR_LED_REG,
2471		CSR_DRAM_INT_TBL_REG,
2472		CSR_GIO_CHICKEN_BITS,
2473		CSR_ANA_PLL_CFG,
2474		CSR_MONITOR_STATUS_REG,
2475		CSR_HW_REV_WA_REG,
2476		CSR_DBG_HPET_MEM_REG
2477	};
2478	IWL_ERR(trans, "CSR values:\n");
2479	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2480		"CSR_INT_PERIODIC_REG)\n");
2481	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2482		IWL_ERR(trans, "  %25s: 0X%08x\n",
2483			get_csr_string(csr_tbl[i]),
2484			iwl_read32(trans, csr_tbl[i]));
2485	}
2486}
2487
2488#ifdef CONFIG_IWLWIFI_DEBUGFS
2489/* create and remove of files */
2490#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
2491	debugfs_create_file(#name, mode, parent, trans,			\
2492			    &iwl_dbgfs_##name##_ops);			\
2493} while (0)
2494
2495/* file operation */
2496#define DEBUGFS_READ_FILE_OPS(name)					\
2497static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2498	.read = iwl_dbgfs_##name##_read,				\
2499	.open = simple_open,						\
2500	.llseek = generic_file_llseek,					\
2501};
2502
2503#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2504static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2505	.write = iwl_dbgfs_##name##_write,                              \
2506	.open = simple_open,						\
2507	.llseek = generic_file_llseek,					\
2508};
2509
2510#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
2511static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2512	.write = iwl_dbgfs_##name##_write,				\
2513	.read = iwl_dbgfs_##name##_read,				\
2514	.open = simple_open,						\
2515	.llseek = generic_file_llseek,					\
2516};
2517
2518struct iwl_dbgfs_tx_queue_priv {
2519	struct iwl_trans *trans;
2520};
2521
2522struct iwl_dbgfs_tx_queue_state {
2523	loff_t pos;
2524};
2525
2526static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2527{
2528	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2529	struct iwl_dbgfs_tx_queue_state *state;
2530
2531	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2532		return NULL;
2533
2534	state = kmalloc(sizeof(*state), GFP_KERNEL);
2535	if (!state)
2536		return NULL;
2537	state->pos = *pos;
2538	return state;
2539}
2540
2541static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2542					 void *v, loff_t *pos)
2543{
2544	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2545	struct iwl_dbgfs_tx_queue_state *state = v;
2546
2547	*pos = ++state->pos;
2548
2549	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2550		return NULL;
2551
2552	return state;
2553}
2554
2555static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2556{
2557	kfree(v);
2558}
2559
2560static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2561{
2562	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2563	struct iwl_dbgfs_tx_queue_state *state = v;
2564	struct iwl_trans *trans = priv->trans;
2565	struct iwl_txq *txq = trans->txqs.txq[state->pos];
2566
2567	seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2568		   (unsigned int)state->pos,
2569		   !!test_bit(state->pos, trans->txqs.queue_used),
2570		   !!test_bit(state->pos, trans->txqs.queue_stopped));
2571	if (txq)
2572		seq_printf(seq,
2573			   "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2574			   txq->read_ptr, txq->write_ptr,
2575			   txq->need_update, txq->frozen,
2576			   txq->n_window, txq->ampdu);
2577	else
2578		seq_puts(seq, "(unallocated)");
2579
2580	if (state->pos == trans->txqs.cmd.q_id)
2581		seq_puts(seq, " (HCMD)");
2582	seq_puts(seq, "\n");
2583
2584	return 0;
2585}
2586
2587static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2588	.start = iwl_dbgfs_tx_queue_seq_start,
2589	.next = iwl_dbgfs_tx_queue_seq_next,
2590	.stop = iwl_dbgfs_tx_queue_seq_stop,
2591	.show = iwl_dbgfs_tx_queue_seq_show,
2592};
2593
2594static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2595{
2596	struct iwl_dbgfs_tx_queue_priv *priv;
2597
2598	priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2599				  sizeof(*priv));
2600
2601	if (!priv)
2602		return -ENOMEM;
2603
2604	priv->trans = inode->i_private;
2605	return 0;
2606}
2607
2608static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2609				       char __user *user_buf,
2610				       size_t count, loff_t *ppos)
2611{
2612	struct iwl_trans *trans = file->private_data;
2613	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2614	char *buf;
2615	int pos = 0, i, ret;
2616	size_t bufsz;
2617
2618	bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2619
2620	if (!trans_pcie->rxq)
2621		return -EAGAIN;
2622
2623	buf = kzalloc(bufsz, GFP_KERNEL);
2624	if (!buf)
2625		return -ENOMEM;
2626
2627	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2628		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2629
2630		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2631				 i);
2632		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2633				 rxq->read);
2634		pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2635				 rxq->write);
2636		pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2637				 rxq->write_actual);
2638		pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2639				 rxq->need_update);
2640		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2641				 rxq->free_count);
2642		if (rxq->rb_stts) {
2643			u32 r =	__le16_to_cpu(iwl_get_closed_rb_stts(trans,
2644								     rxq));
2645			pos += scnprintf(buf + pos, bufsz - pos,
2646					 "\tclosed_rb_num: %u\n",
2647					 r & 0x0FFF);
2648		} else {
2649			pos += scnprintf(buf + pos, bufsz - pos,
2650					 "\tclosed_rb_num: Not Allocated\n");
2651		}
2652	}
2653	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2654	kfree(buf);
2655
2656	return ret;
2657}
2658
2659static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2660					char __user *user_buf,
2661					size_t count, loff_t *ppos)
2662{
2663	struct iwl_trans *trans = file->private_data;
2664	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2665	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2666
2667	int pos = 0;
2668	char *buf;
2669	int bufsz = 24 * 64; /* 24 items * 64 char per item */
2670	ssize_t ret;
2671
2672	buf = kzalloc(bufsz, GFP_KERNEL);
2673	if (!buf)
2674		return -ENOMEM;
2675
2676	pos += scnprintf(buf + pos, bufsz - pos,
2677			"Interrupt Statistics Report:\n");
2678
2679	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2680		isr_stats->hw);
2681	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2682		isr_stats->sw);
2683	if (isr_stats->sw || isr_stats->hw) {
2684		pos += scnprintf(buf + pos, bufsz - pos,
2685			"\tLast Restarting Code:  0x%X\n",
2686			isr_stats->err_code);
2687	}
2688#ifdef CONFIG_IWLWIFI_DEBUG
2689	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2690		isr_stats->sch);
2691	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2692		isr_stats->alive);
2693#endif
2694	pos += scnprintf(buf + pos, bufsz - pos,
2695		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2696
2697	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2698		isr_stats->ctkill);
2699
2700	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2701		isr_stats->wakeup);
2702
2703	pos += scnprintf(buf + pos, bufsz - pos,
2704		"Rx command responses:\t\t %u\n", isr_stats->rx);
2705
2706	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2707		isr_stats->tx);
2708
2709	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2710		isr_stats->unhandled);
2711
2712	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2713	kfree(buf);
2714	return ret;
2715}
2716
2717static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2718					 const char __user *user_buf,
2719					 size_t count, loff_t *ppos)
2720{
2721	struct iwl_trans *trans = file->private_data;
2722	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2723	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2724	u32 reset_flag;
2725	int ret;
2726
2727	ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2728	if (ret)
2729		return ret;
2730	if (reset_flag == 0)
2731		memset(isr_stats, 0, sizeof(*isr_stats));
2732
2733	return count;
2734}
2735
2736static ssize_t iwl_dbgfs_csr_write(struct file *file,
2737				   const char __user *user_buf,
2738				   size_t count, loff_t *ppos)
2739{
2740	struct iwl_trans *trans = file->private_data;
2741
2742	iwl_pcie_dump_csr(trans);
2743
2744	return count;
2745}
2746
2747static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2748				     char __user *user_buf,
2749				     size_t count, loff_t *ppos)
2750{
2751	struct iwl_trans *trans = file->private_data;
2752	char *buf = NULL;
2753	ssize_t ret;
2754
2755	ret = iwl_dump_fh(trans, &buf);
2756	if (ret < 0)
2757		return ret;
2758	if (!buf)
2759		return -EINVAL;
2760	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2761	kfree(buf);
2762	return ret;
2763}
2764
2765static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2766				     char __user *user_buf,
2767				     size_t count, loff_t *ppos)
2768{
2769	struct iwl_trans *trans = file->private_data;
2770	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2771	char buf[100];
2772	int pos;
2773
2774	pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2775			trans_pcie->debug_rfkill,
2776			!(iwl_read32(trans, CSR_GP_CNTRL) &
2777				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2778
2779	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2780}
2781
2782static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2783				      const char __user *user_buf,
2784				      size_t count, loff_t *ppos)
2785{
2786	struct iwl_trans *trans = file->private_data;
2787	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2788	bool new_value;
2789	int ret;
2790
2791	ret = kstrtobool_from_user(user_buf, count, &new_value);
2792	if (ret)
2793		return ret;
2794	if (new_value == trans_pcie->debug_rfkill)
2795		return count;
2796	IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2797		 trans_pcie->debug_rfkill, new_value);
2798	trans_pcie->debug_rfkill = new_value;
2799	iwl_pcie_handle_rfkill_irq(trans);
2800
2801	return count;
2802}
2803
2804static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2805				       struct file *file)
2806{
2807	struct iwl_trans *trans = inode->i_private;
2808	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2809
2810	if (!trans->dbg.dest_tlv ||
2811	    trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2812		IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2813		return -ENOENT;
2814	}
2815
2816	if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2817		return -EBUSY;
2818
2819	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2820	return simple_open(inode, file);
2821}
2822
2823static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2824					  struct file *file)
2825{
2826	struct iwl_trans_pcie *trans_pcie =
2827		IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2828
2829	if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2830		trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2831	return 0;
2832}
2833
2834static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2835				  void *buf, ssize_t *size,
2836				  ssize_t *bytes_copied)
2837{
2838	ssize_t buf_size_left = count - *bytes_copied;
2839
2840	buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2841	if (*size > buf_size_left)
2842		*size = buf_size_left;
2843
2844	*size -= copy_to_user(user_buf, buf, *size);
2845	*bytes_copied += *size;
2846
2847	if (buf_size_left == *size)
2848		return true;
2849	return false;
2850}
2851
2852static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2853					   char __user *user_buf,
2854					   size_t count, loff_t *ppos)
2855{
2856	struct iwl_trans *trans = file->private_data;
2857	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2858	void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2859	struct cont_rec *data = &trans_pcie->fw_mon_data;
2860	u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2861	ssize_t size, bytes_copied = 0;
2862	bool b_full;
2863
2864	if (trans->dbg.dest_tlv) {
2865		write_ptr_addr =
2866			le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2867		wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2868	} else {
2869		write_ptr_addr = MON_BUFF_WRPTR;
2870		wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2871	}
2872
2873	if (unlikely(!trans->dbg.rec_on))
2874		return 0;
2875
2876	mutex_lock(&data->mutex);
2877	if (data->state ==
2878	    IWL_FW_MON_DBGFS_STATE_DISABLED) {
2879		mutex_unlock(&data->mutex);
2880		return 0;
2881	}
2882
2883	/* write_ptr position in bytes rather then DW */
2884	write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2885	wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2886
2887	if (data->prev_wrap_cnt == wrap_cnt) {
2888		size = write_ptr - data->prev_wr_ptr;
2889		curr_buf = cpu_addr + data->prev_wr_ptr;
2890		b_full = iwl_write_to_user_buf(user_buf, count,
2891					       curr_buf, &size,
2892					       &bytes_copied);
2893		data->prev_wr_ptr += size;
2894
2895	} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2896		   write_ptr < data->prev_wr_ptr) {
2897		size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2898		curr_buf = cpu_addr + data->prev_wr_ptr;
2899		b_full = iwl_write_to_user_buf(user_buf, count,
2900					       curr_buf, &size,
2901					       &bytes_copied);
2902		data->prev_wr_ptr += size;
2903
2904		if (!b_full) {
2905			size = write_ptr;
2906			b_full = iwl_write_to_user_buf(user_buf, count,
2907						       cpu_addr, &size,
2908						       &bytes_copied);
2909			data->prev_wr_ptr = size;
2910			data->prev_wrap_cnt++;
2911		}
2912	} else {
2913		if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2914		    write_ptr > data->prev_wr_ptr)
2915			IWL_WARN(trans,
2916				 "write pointer passed previous write pointer, start copying from the beginning\n");
2917		else if (!unlikely(data->prev_wrap_cnt == 0 &&
2918				   data->prev_wr_ptr == 0))
2919			IWL_WARN(trans,
2920				 "monitor data is out of sync, start copying from the beginning\n");
2921
2922		size = write_ptr;
2923		b_full = iwl_write_to_user_buf(user_buf, count,
2924					       cpu_addr, &size,
2925					       &bytes_copied);
2926		data->prev_wr_ptr = size;
2927		data->prev_wrap_cnt = wrap_cnt;
2928	}
2929
2930	mutex_unlock(&data->mutex);
2931
2932	return bytes_copied;
2933}
2934
2935DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2936DEBUGFS_READ_FILE_OPS(fh_reg);
2937DEBUGFS_READ_FILE_OPS(rx_queue);
2938DEBUGFS_WRITE_FILE_OPS(csr);
2939DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2940static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2941	.owner = THIS_MODULE,
2942	.open = iwl_dbgfs_tx_queue_open,
2943	.read = seq_read,
2944	.llseek = seq_lseek,
2945	.release = seq_release_private,
2946};
2947
2948static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2949	.read = iwl_dbgfs_monitor_data_read,
2950	.open = iwl_dbgfs_monitor_data_open,
2951	.release = iwl_dbgfs_monitor_data_release,
2952};
2953
2954/* Create the debugfs files and directories */
2955void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2956{
2957	struct dentry *dir = trans->dbgfs_dir;
2958
2959	DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2960	DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2961	DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2962	DEBUGFS_ADD_FILE(csr, dir, 0200);
2963	DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2964	DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2965	DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2966}
2967
2968static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2969{
2970	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2971	struct cont_rec *data = &trans_pcie->fw_mon_data;
2972
2973	mutex_lock(&data->mutex);
2974	data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2975	mutex_unlock(&data->mutex);
2976}
2977#endif /*CONFIG_IWLWIFI_DEBUGFS */
2978
2979static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2980{
2981	u32 cmdlen = 0;
2982	int i;
2983
2984	for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
2985		cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
2986
2987	return cmdlen;
2988}
2989
2990static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2991				   struct iwl_fw_error_dump_data **data,
2992				   int allocated_rb_nums)
2993{
2994	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2995	int max_len = trans_pcie->rx_buf_bytes;
2996	/* Dump RBs is supported only for pre-9000 devices (1 queue) */
2997	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2998	u32 i, r, j, rb_len = 0;
2999
3000	spin_lock(&rxq->lock);
3001
3002	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
3003
3004	for (i = rxq->read, j = 0;
3005	     i != r && j < allocated_rb_nums;
3006	     i = (i + 1) & RX_QUEUE_MASK, j++) {
3007		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3008		struct iwl_fw_error_dump_rb *rb;
3009
3010		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
3011			       DMA_FROM_DEVICE);
3012
3013		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
3014
3015		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3016		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3017		rb = (void *)(*data)->data;
3018		rb->index = cpu_to_le32(i);
3019		memcpy(rb->data, page_address(rxb->page), max_len);
3020		/* remap the page for the free benefit */
3021		rxb->page_dma = dma_map_page(trans->dev, rxb->page,
3022					     rxb->offset, max_len,
3023					     DMA_FROM_DEVICE);
3024
3025		*data = iwl_fw_error_next_data(*data);
3026	}
3027
3028	spin_unlock(&rxq->lock);
3029
3030	return rb_len;
3031}
3032#define IWL_CSR_TO_DUMP (0x250)
3033
3034static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
3035				   struct iwl_fw_error_dump_data **data)
3036{
3037	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
3038	__le32 *val;
3039	int i;
3040
3041	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3042	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3043	val = (void *)(*data)->data;
3044
3045	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3046		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3047
3048	*data = iwl_fw_error_next_data(*data);
3049
3050	return csr_len;
3051}
3052
3053static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3054				       struct iwl_fw_error_dump_data **data)
3055{
3056	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3057	unsigned long flags;
3058	__le32 *val;
3059	int i;
3060
3061	if (!iwl_trans_grab_nic_access(trans, &flags))
3062		return 0;
3063
3064	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3065	(*data)->len = cpu_to_le32(fh_regs_len);
3066	val = (void *)(*data)->data;
3067
3068	if (!trans->trans_cfg->gen2)
3069		for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3070		     i += sizeof(u32))
3071			*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3072	else
3073		for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3074		     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3075		     i += sizeof(u32))
3076			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3077								      i));
3078
3079	iwl_trans_release_nic_access(trans, &flags);
3080
3081	*data = iwl_fw_error_next_data(*data);
3082
3083	return sizeof(**data) + fh_regs_len;
3084}
3085
3086static u32
3087iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3088				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3089				 u32 monitor_len)
3090{
3091	u32 buf_size_in_dwords = (monitor_len >> 2);
3092	u32 *buffer = (u32 *)fw_mon_data->data;
3093	unsigned long flags;
3094	u32 i;
3095
3096	if (!iwl_trans_grab_nic_access(trans, &flags))
3097		return 0;
3098
3099	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3100	for (i = 0; i < buf_size_in_dwords; i++)
3101		buffer[i] = iwl_read_umac_prph_no_grab(trans,
3102						       MON_DMARB_RD_DATA_ADDR);
3103	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3104
3105	iwl_trans_release_nic_access(trans, &flags);
3106
3107	return monitor_len;
3108}
3109
3110static void
3111iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3112			     struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3113{
3114	u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3115
3116	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3117		base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3118		base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3119		write_ptr = DBGC_CUR_DBGBUF_STATUS;
3120		wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3121	} else if (trans->dbg.dest_tlv) {
3122		write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3123		wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3124		base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3125	} else {
3126		base = MON_BUFF_BASE_ADDR;
3127		write_ptr = MON_BUFF_WRPTR;
3128		wrap_cnt = MON_BUFF_CYCLE_CNT;
3129	}
3130
3131	write_ptr_val = iwl_read_prph(trans, write_ptr);
3132	fw_mon_data->fw_mon_cycle_cnt =
3133		cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3134	fw_mon_data->fw_mon_base_ptr =
3135		cpu_to_le32(iwl_read_prph(trans, base));
3136	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3137		fw_mon_data->fw_mon_base_high_ptr =
3138			cpu_to_le32(iwl_read_prph(trans, base_high));
3139		write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3140	}
3141	fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3142}
3143
3144static u32
3145iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3146			    struct iwl_fw_error_dump_data **data,
3147			    u32 monitor_len)
3148{
3149	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3150	u32 len = 0;
3151
3152	if (trans->dbg.dest_tlv ||
3153	    (fw_mon->size &&
3154	     (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3155	      trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3156		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3157
3158		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3159		fw_mon_data = (void *)(*data)->data;
3160
3161		iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3162
3163		len += sizeof(**data) + sizeof(*fw_mon_data);
3164		if (fw_mon->size) {
3165			memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3166			monitor_len = fw_mon->size;
3167		} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3168			u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3169			/*
3170			 * Update pointers to reflect actual values after
3171			 * shifting
3172			 */
3173			if (trans->dbg.dest_tlv->version) {
3174				base = (iwl_read_prph(trans, base) &
3175					IWL_LDBG_M2S_BUF_BA_MSK) <<
3176				       trans->dbg.dest_tlv->base_shift;
3177				base *= IWL_M2S_UNIT_SIZE;
3178				base += trans->cfg->smem_offset;
3179			} else {
3180				base = iwl_read_prph(trans, base) <<
3181				       trans->dbg.dest_tlv->base_shift;
3182			}
3183
3184			iwl_trans_read_mem(trans, base, fw_mon_data->data,
3185					   monitor_len / sizeof(u32));
3186		} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3187			monitor_len =
3188				iwl_trans_pci_dump_marbh_monitor(trans,
3189								 fw_mon_data,
3190								 monitor_len);
3191		} else {
3192			/* Didn't match anything - output no monitor data */
3193			monitor_len = 0;
3194		}
3195
3196		len += monitor_len;
3197		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3198	}
3199
3200	return len;
3201}
3202
3203static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3204{
3205	if (trans->dbg.fw_mon.size) {
3206		*len += sizeof(struct iwl_fw_error_dump_data) +
3207			sizeof(struct iwl_fw_error_dump_fw_mon) +
3208			trans->dbg.fw_mon.size;
3209		return trans->dbg.fw_mon.size;
3210	} else if (trans->dbg.dest_tlv) {
3211		u32 base, end, cfg_reg, monitor_len;
3212
3213		if (trans->dbg.dest_tlv->version == 1) {
3214			cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3215			cfg_reg = iwl_read_prph(trans, cfg_reg);
3216			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3217				trans->dbg.dest_tlv->base_shift;
3218			base *= IWL_M2S_UNIT_SIZE;
3219			base += trans->cfg->smem_offset;
3220
3221			monitor_len =
3222				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3223				trans->dbg.dest_tlv->end_shift;
3224			monitor_len *= IWL_M2S_UNIT_SIZE;
3225		} else {
3226			base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3227			end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3228
3229			base = iwl_read_prph(trans, base) <<
3230			       trans->dbg.dest_tlv->base_shift;
3231			end = iwl_read_prph(trans, end) <<
3232			      trans->dbg.dest_tlv->end_shift;
3233
3234			/* Make "end" point to the actual end */
3235			if (trans->trans_cfg->device_family >=
3236			    IWL_DEVICE_FAMILY_8000 ||
3237			    trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3238				end += (1 << trans->dbg.dest_tlv->end_shift);
3239			monitor_len = end - base;
3240		}
3241		*len += sizeof(struct iwl_fw_error_dump_data) +
3242			sizeof(struct iwl_fw_error_dump_fw_mon) +
3243			monitor_len;
3244		return monitor_len;
3245	}
3246	return 0;
3247}
3248
3249static struct iwl_trans_dump_data
3250*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3251			  u32 dump_mask)
3252{
3253	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3254	struct iwl_fw_error_dump_data *data;
3255	struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3256	struct iwl_fw_error_dump_txcmd *txcmd;
3257	struct iwl_trans_dump_data *dump_data;
3258	u32 len, num_rbs = 0, monitor_len = 0;
3259	int i, ptr;
3260	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3261			!trans->trans_cfg->mq_rx_supported &&
3262			dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3263
3264	if (!dump_mask)
3265		return NULL;
3266
3267	/* transport dump header */
3268	len = sizeof(*dump_data);
3269
3270	/* host commands */
3271	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3272		len += sizeof(*data) +
3273			cmdq->n_window * (sizeof(*txcmd) +
3274					  TFD_MAX_PAYLOAD_SIZE);
3275
3276	/* FW monitor */
3277	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3278		monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3279
3280	/* CSR registers */
3281	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3282		len += sizeof(*data) + IWL_CSR_TO_DUMP;
3283
3284	/* FH registers */
3285	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3286		if (trans->trans_cfg->gen2)
3287			len += sizeof(*data) +
3288			       (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3289				iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3290		else
3291			len += sizeof(*data) +
3292			       (FH_MEM_UPPER_BOUND -
3293				FH_MEM_LOWER_BOUND);
3294	}
3295
3296	if (dump_rbs) {
3297		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3298		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3299		/* RBs */
3300		num_rbs =
3301			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3302			& 0x0FFF;
3303		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3304		len += num_rbs * (sizeof(*data) +
3305				  sizeof(struct iwl_fw_error_dump_rb) +
3306				  (PAGE_SIZE << trans_pcie->rx_page_order));
3307	}
3308
3309	/* Paged memory for gen2 HW */
3310	if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3311		for (i = 0; i < trans->init_dram.paging_cnt; i++)
3312			len += sizeof(*data) +
3313			       sizeof(struct iwl_fw_error_dump_paging) +
3314			       trans->init_dram.paging[i].size;
3315
3316	dump_data = vzalloc(len);
3317	if (!dump_data)
3318		return NULL;
3319
3320	len = 0;
3321	data = (void *)dump_data->data;
3322
3323	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3324		u16 tfd_size = trans->txqs.tfd.size;
3325
3326		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3327		txcmd = (void *)data->data;
3328		spin_lock_bh(&cmdq->lock);
3329		ptr = cmdq->write_ptr;
3330		for (i = 0; i < cmdq->n_window; i++) {
3331			u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3332			u8 tfdidx;
3333			u32 caplen, cmdlen;
3334
3335			if (trans->trans_cfg->use_tfh)
3336				tfdidx = idx;
3337			else
3338				tfdidx = ptr;
3339
3340			cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3341							   (u8 *)cmdq->tfds +
3342							   tfd_size * tfdidx);
3343			caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3344
3345			if (cmdlen) {
3346				len += sizeof(*txcmd) + caplen;
3347				txcmd->cmdlen = cpu_to_le32(cmdlen);
3348				txcmd->caplen = cpu_to_le32(caplen);
3349				memcpy(txcmd->data, cmdq->entries[idx].cmd,
3350				       caplen);
3351				txcmd = (void *)((u8 *)txcmd->data + caplen);
3352			}
3353
3354			ptr = iwl_txq_dec_wrap(trans, ptr);
3355		}
3356		spin_unlock_bh(&cmdq->lock);
3357
3358		data->len = cpu_to_le32(len);
3359		len += sizeof(*data);
3360		data = iwl_fw_error_next_data(data);
3361	}
3362
3363	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3364		len += iwl_trans_pcie_dump_csr(trans, &data);
3365	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3366		len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3367	if (dump_rbs)
3368		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3369
3370	/* Paged memory for gen2 HW */
3371	if (trans->trans_cfg->gen2 &&
3372	    dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3373		for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3374			struct iwl_fw_error_dump_paging *paging;
3375			u32 page_len = trans->init_dram.paging[i].size;
3376
3377			data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3378			data->len = cpu_to_le32(sizeof(*paging) + page_len);
3379			paging = (void *)data->data;
3380			paging->index = cpu_to_le32(i);
3381			memcpy(paging->data,
3382			       trans->init_dram.paging[i].block, page_len);
3383			data = iwl_fw_error_next_data(data);
3384
3385			len += sizeof(*data) + sizeof(*paging) + page_len;
3386		}
3387	}
3388	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3389		len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3390
3391	dump_data->len = len;
3392
3393	return dump_data;
3394}
3395
3396#ifdef CONFIG_PM_SLEEP
3397static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
3398{
3399	return 0;
3400}
3401
3402static void iwl_trans_pcie_resume(struct iwl_trans *trans)
3403{
3404}
3405#endif /* CONFIG_PM_SLEEP */
3406
3407#define IWL_TRANS_COMMON_OPS						\
3408	.op_mode_leave = iwl_trans_pcie_op_mode_leave,			\
3409	.write8 = iwl_trans_pcie_write8,				\
3410	.write32 = iwl_trans_pcie_write32,				\
3411	.read32 = iwl_trans_pcie_read32,				\
3412	.read_prph = iwl_trans_pcie_read_prph,				\
3413	.write_prph = iwl_trans_pcie_write_prph,			\
3414	.read_mem = iwl_trans_pcie_read_mem,				\
3415	.write_mem = iwl_trans_pcie_write_mem,				\
3416	.read_config32 = iwl_trans_pcie_read_config32,			\
3417	.configure = iwl_trans_pcie_configure,				\
3418	.set_pmi = iwl_trans_pcie_set_pmi,				\
3419	.sw_reset = iwl_trans_pcie_sw_reset,				\
3420	.grab_nic_access = iwl_trans_pcie_grab_nic_access,		\
3421	.release_nic_access = iwl_trans_pcie_release_nic_access,	\
3422	.set_bits_mask = iwl_trans_pcie_set_bits_mask,			\
3423	.dump_data = iwl_trans_pcie_dump_data,				\
3424	.d3_suspend = iwl_trans_pcie_d3_suspend,			\
3425	.d3_resume = iwl_trans_pcie_d3_resume,				\
3426	.sync_nmi = iwl_trans_pcie_sync_nmi
3427
3428#ifdef CONFIG_PM_SLEEP
3429#define IWL_TRANS_PM_OPS						\
3430	.suspend = iwl_trans_pcie_suspend,				\
3431	.resume = iwl_trans_pcie_resume,
3432#else
3433#define IWL_TRANS_PM_OPS
3434#endif /* CONFIG_PM_SLEEP */
3435
3436static const struct iwl_trans_ops trans_ops_pcie = {
3437	IWL_TRANS_COMMON_OPS,
3438	IWL_TRANS_PM_OPS
3439	.start_hw = iwl_trans_pcie_start_hw,
3440	.fw_alive = iwl_trans_pcie_fw_alive,
3441	.start_fw = iwl_trans_pcie_start_fw,
3442	.stop_device = iwl_trans_pcie_stop_device,
3443
3444	.send_cmd = iwl_trans_pcie_send_hcmd,
3445
3446	.tx = iwl_trans_pcie_tx,
3447	.reclaim = iwl_trans_pcie_reclaim,
3448
3449	.txq_disable = iwl_trans_pcie_txq_disable,
3450	.txq_enable = iwl_trans_pcie_txq_enable,
3451
3452	.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3453
3454	.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3455
3456	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
3457	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3458#ifdef CONFIG_IWLWIFI_DEBUGFS
3459	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3460#endif
3461};
3462
3463static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3464	IWL_TRANS_COMMON_OPS,
3465	IWL_TRANS_PM_OPS
3466	.start_hw = iwl_trans_pcie_start_hw,
3467	.fw_alive = iwl_trans_pcie_gen2_fw_alive,
3468	.start_fw = iwl_trans_pcie_gen2_start_fw,
3469	.stop_device = iwl_trans_pcie_gen2_stop_device,
3470
3471	.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
3472
3473	.tx = iwl_txq_gen2_tx,
3474	.reclaim = iwl_trans_pcie_reclaim,
3475
3476	.set_q_ptrs = iwl_trans_pcie_set_q_ptrs,
3477
3478	.txq_alloc = iwl_txq_dyn_alloc,
3479	.txq_free = iwl_txq_dyn_free,
3480	.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3481	.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3482	.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3483#ifdef CONFIG_IWLWIFI_DEBUGFS
3484	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3485#endif
3486};
3487
3488struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3489			       const struct pci_device_id *ent,
3490			       const struct iwl_cfg_trans_params *cfg_trans)
3491{
3492	struct iwl_trans_pcie *trans_pcie;
3493	struct iwl_trans *trans;
3494	int ret, addr_size;
3495	const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3496
3497	if (!cfg_trans->gen2)
3498		ops = &trans_ops_pcie;
3499
3500	ret = pcim_enable_device(pdev);
3501	if (ret)
3502		return ERR_PTR(ret);
3503
3504	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3505				cfg_trans);
3506	if (!trans)
3507		return ERR_PTR(-ENOMEM);
3508
3509	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3510
3511	trans_pcie->trans = trans;
3512	trans_pcie->opmode_down = true;
3513	spin_lock_init(&trans_pcie->irq_lock);
3514	spin_lock_init(&trans_pcie->reg_lock);
3515	spin_lock_init(&trans_pcie->alloc_page_lock);
3516	mutex_init(&trans_pcie->mutex);
3517	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3518
3519	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3520						   WQ_HIGHPRI | WQ_UNBOUND, 1);
3521	if (!trans_pcie->rba.alloc_wq) {
3522		ret = -ENOMEM;
3523		goto out_free_trans;
3524	}
3525	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3526
3527	trans_pcie->debug_rfkill = -1;
3528
3529	if (!cfg_trans->base_params->pcie_l1_allowed) {
3530		/*
3531		 * W/A - seems to solve weird behavior. We need to remove this
3532		 * if we don't want to stay in L1 all the time. This wastes a
3533		 * lot of power.
3534		 */
3535		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3536				       PCIE_LINK_STATE_L1 |
3537				       PCIE_LINK_STATE_CLKPM);
3538	}
3539
3540	trans_pcie->def_rx_queue = 0;
3541
3542	pci_set_master(pdev);
3543
3544	addr_size = trans->txqs.tfd.addr_size;
3545	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3546	if (!ret)
3547		ret = pci_set_consistent_dma_mask(pdev,
3548						  DMA_BIT_MASK(addr_size));
3549	if (ret) {
3550		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3551		if (!ret)
3552			ret = pci_set_consistent_dma_mask(pdev,
3553							  DMA_BIT_MASK(32));
3554		/* both attempts failed: */
3555		if (ret) {
3556			dev_err(&pdev->dev, "No suitable DMA available\n");
3557			goto out_no_pci;
3558		}
3559	}
3560
3561	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3562	if (ret) {
3563		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3564		goto out_no_pci;
3565	}
3566
3567	trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3568	if (!trans_pcie->hw_base) {
3569		dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3570		ret = -ENODEV;
3571		goto out_no_pci;
3572	}
3573
3574	/* We disable the RETRY_TIMEOUT register (0x41) to keep
3575	 * PCI Tx retries from interfering with C3 CPU state */
3576	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3577
3578	trans_pcie->pci_dev = pdev;
3579	iwl_disable_interrupts(trans);
3580
3581	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3582	if (trans->hw_rev == 0xffffffff) {
3583		dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3584		ret = -EIO;
3585		goto out_no_pci;
3586	}
3587
3588	/*
3589	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3590	 * changed, and now the revision step also includes bit 0-1 (no more
3591	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3592	 * in the old format.
3593	 */
3594	if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000) {
3595		trans->hw_rev = (trans->hw_rev & 0xfff0) |
3596				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3597
3598		ret = iwl_pcie_prepare_card_hw(trans);
3599		if (ret) {
3600			IWL_WARN(trans, "Exit HW not ready\n");
3601			goto out_no_pci;
3602		}
3603
3604		/*
3605		 * in-order to recognize C step driver should read chip version
3606		 * id located at the AUX bus MISC address space.
3607		 */
3608		ret = iwl_finish_nic_init(trans, cfg_trans);
3609		if (ret)
3610			goto out_no_pci;
3611
3612	}
3613
3614	IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3615
3616	iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3617	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3618	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3619		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3620
3621	/* Initialize the wait queue for commands */
3622	init_waitqueue_head(&trans_pcie->wait_command_queue);
3623
3624	init_waitqueue_head(&trans_pcie->sx_waitq);
3625
3626
3627	if (trans_pcie->msix_enabled) {
3628		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3629		if (ret)
3630			goto out_no_pci;
3631	 } else {
3632		ret = iwl_pcie_alloc_ict(trans);
3633		if (ret)
3634			goto out_no_pci;
3635
3636		ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3637						iwl_pcie_isr,
3638						iwl_pcie_irq_handler,
3639						IRQF_SHARED, DRV_NAME, trans);
3640		if (ret) {
3641			IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3642			goto out_free_ict;
3643		}
3644		trans_pcie->inta_mask = CSR_INI_SET_MASK;
3645	 }
3646
3647#ifdef CONFIG_IWLWIFI_DEBUGFS
3648	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3649	mutex_init(&trans_pcie->fw_mon_data.mutex);
3650#endif
3651
3652	iwl_dbg_tlv_init(trans);
3653
3654	return trans;
3655
3656out_free_ict:
3657	iwl_pcie_free_ict(trans);
3658out_no_pci:
3659	destroy_workqueue(trans_pcie->rba.alloc_wq);
3660out_free_trans:
3661	iwl_trans_free(trans);
3662	return ERR_PTR(ret);
3663}
3664
3665void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3666{
3667	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3668	unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
3669	bool interrupts_enabled = test_bit(STATUS_INT_ENABLED, &trans->status);
3670	u32 inta_addr, sw_err_bit;
3671
3672	if (trans_pcie->msix_enabled) {
3673		inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3674		sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3675	} else {
3676		inta_addr = CSR_INT;
3677		sw_err_bit = CSR_INT_BIT_SW_ERR;
3678	}
3679
3680	/* if the interrupts were already disabled, there is no point in
3681	 * calling iwl_disable_interrupts
3682	 */
3683	if (interrupts_enabled)
3684		iwl_disable_interrupts(trans);
3685
3686	iwl_force_nmi(trans);
3687	while (time_after(timeout, jiffies)) {
3688		u32 inta_hw = iwl_read32(trans, inta_addr);
3689
3690		/* Error detected by uCode */
3691		if (inta_hw & sw_err_bit) {
3692			/* Clear causes register */
3693			iwl_write32(trans, inta_addr, inta_hw & sw_err_bit);
3694			break;
3695		}
3696
3697		mdelay(1);
3698	}
3699
3700	/* enable interrupts only if there were already enabled before this
3701	 * function to avoid a case were the driver enable interrupts before
3702	 * proper configurations were made
3703	 */
3704	if (interrupts_enabled)
3705		iwl_enable_interrupts(trans);
3706
3707	iwl_trans_fw_error(trans);
3708}
3709