1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license.  When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 - 2019 Intel Corporation
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
20 * more details.
21 *
22 * The full GNU General Public License is included in this distribution in the
23 * file called COPYING.
24 *
25 * Contact Information:
26 *  Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
28 *
29 * BSD LICENSE
30 *
31 * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 - 2019 Intel Corporation
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 *  * Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 *  * Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in
45 *    the documentation and/or other materials provided with the
46 *    distribution.
47 *  * Neither the name Intel Corporation nor the names of its
48 *    contributors may be used to endorse or promote products derived
49 *    from this software without specific prior written permission.
50 *
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62 *
63 *****************************************************************************/
64#ifndef __iwl_trans_int_pcie_h__
65#define __iwl_trans_int_pcie_h__
66
67#include <linux/spinlock.h>
68#include <linux/interrupt.h>
69#include <linux/skbuff.h>
70#include <linux/wait.h>
71#include <linux/pci.h>
72#include <linux/timer.h>
73#include <linux/cpu.h>
74
75#include "iwl-fh.h"
76#include "iwl-csr.h"
77#include "iwl-trans.h"
78#include "iwl-debug.h"
79#include "iwl-io.h"
80#include "iwl-op-mode.h"
81#include "iwl-drv.h"
82#include "queue/tx.h"
83
84/*
85 * RX related structures and functions
86 */
87#define RX_NUM_QUEUES 1
88#define RX_POST_REQ_ALLOC 2
89#define RX_CLAIM_REQ_ALLOC 8
90#define RX_PENDING_WATERMARK 16
91#define FIRST_RX_QUEUE 512
92
93struct iwl_host_cmd;
94
95/*This file includes the declaration that are internal to the
96 * trans_pcie layer */
97
98/**
99 * struct iwl_rx_mem_buffer
100 * @page_dma: bus address of rxb page
101 * @page: driver's pointer to the rxb page
102 * @invalid: rxb is in driver ownership - not owned by HW
103 * @vid: index of this rxb in the global table
104 * @offset: indicates which offset of the page (in bytes)
105 *	this buffer uses (if multiple RBs fit into one page)
106 */
107struct iwl_rx_mem_buffer {
108	dma_addr_t page_dma;
109	struct page *page;
110	u16 vid;
111	bool invalid;
112	struct list_head list;
113	u32 offset;
114};
115
116/**
117 * struct isr_statistics - interrupt statistics
118 *
119 */
120struct isr_statistics {
121	u32 hw;
122	u32 sw;
123	u32 err_code;
124	u32 sch;
125	u32 alive;
126	u32 rfkill;
127	u32 ctkill;
128	u32 wakeup;
129	u32 rx;
130	u32 tx;
131	u32 unhandled;
132};
133
134/**
135 * struct iwl_rx_transfer_desc - transfer descriptor
136 * @addr: ptr to free buffer start address
137 * @rbid: unique tag of the buffer
138 * @reserved: reserved
139 */
140struct iwl_rx_transfer_desc {
141	__le16 rbid;
142	__le16 reserved[3];
143	__le64 addr;
144} __packed;
145
146#define IWL_RX_CD_FLAGS_FRAGMENTED	BIT(0)
147
148/**
149 * struct iwl_rx_completion_desc - completion descriptor
150 * @reserved1: reserved
151 * @rbid: unique tag of the received buffer
152 * @flags: flags (0: fragmented, all others: reserved)
153 * @reserved2: reserved
154 */
155struct iwl_rx_completion_desc {
156	__le32 reserved1;
157	__le16 rbid;
158	u8 flags;
159	u8 reserved2[25];
160} __packed;
161
162/**
163 * struct iwl_rxq - Rx queue
164 * @id: queue index
165 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
166 *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
167 *	In AX210 devices it is a pointer to a list of iwl_rx_transfer_desc's
168 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
169 * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
170 * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
171 * @tr_tail: driver's pointer to the transmission ring tail buffer
172 * @tr_tail_dma: physical address of the buffer for the transmission ring tail
173 * @cr_tail: driver's pointer to the completion ring tail buffer
174 * @cr_tail_dma: physical address of the buffer for the completion ring tail
175 * @read: Shared index to newest available Rx buffer
176 * @write: Shared index to oldest written Rx packet
177 * @free_count: Number of pre-allocated buffers in rx_free
178 * @used_count: Number of RBDs handled to allocator to use for allocation
179 * @write_actual:
180 * @rx_free: list of RBDs with allocated RB ready for use
181 * @rx_used: list of RBDs with no RB attached
182 * @need_update: flag to indicate we need to update read/write index
183 * @rb_stts: driver's pointer to receive buffer status
184 * @rb_stts_dma: bus address of receive buffer status
185 * @lock:
186 * @queue: actual rx queue. Not used for multi-rx queue.
187 * @next_rb_is_fragment: indicates that the previous RB that we handled set
188 *	the fragmented flag, so the next one is still another fragment
189 *
190 * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
191 */
192struct iwl_rxq {
193	int id;
194	void *bd;
195	dma_addr_t bd_dma;
196	union {
197		void *used_bd;
198		__le32 *bd_32;
199		struct iwl_rx_completion_desc *cd;
200	};
201	dma_addr_t used_bd_dma;
202	__le16 *tr_tail;
203	dma_addr_t tr_tail_dma;
204	__le16 *cr_tail;
205	dma_addr_t cr_tail_dma;
206	u32 read;
207	u32 write;
208	u32 free_count;
209	u32 used_count;
210	u32 write_actual;
211	u32 queue_size;
212	struct list_head rx_free;
213	struct list_head rx_used;
214	bool need_update, next_rb_is_fragment;
215	void *rb_stts;
216	dma_addr_t rb_stts_dma;
217	spinlock_t lock;
218	struct napi_struct napi;
219	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
220};
221
222/**
223 * struct iwl_rb_allocator - Rx allocator
224 * @req_pending: number of requests the allcator had not processed yet
225 * @req_ready: number of requests honored and ready for claiming
226 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
227 *	the queue. This is a list of &struct iwl_rx_mem_buffer
228 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
229 *	of &struct iwl_rx_mem_buffer
230 * @lock: protects the rbd_allocated and rbd_empty lists
231 * @alloc_wq: work queue for background calls
232 * @rx_alloc: work struct for background calls
233 */
234struct iwl_rb_allocator {
235	atomic_t req_pending;
236	atomic_t req_ready;
237	struct list_head rbd_allocated;
238	struct list_head rbd_empty;
239	spinlock_t lock;
240	struct workqueue_struct *alloc_wq;
241	struct work_struct rx_alloc;
242};
243
244/**
245 * iwl_get_closed_rb_stts - get closed rb stts from different structs
246 * @rxq - the rxq to get the rb stts from
247 */
248static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
249					    struct iwl_rxq *rxq)
250{
251	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
252		__le16 *rb_stts = rxq->rb_stts;
253
254		return READ_ONCE(*rb_stts);
255	} else {
256		struct iwl_rb_status *rb_stts = rxq->rb_stts;
257
258		return READ_ONCE(rb_stts->closed_rb_num);
259	}
260}
261
262#ifdef CONFIG_IWLWIFI_DEBUGFS
263/**
264 * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
265 * debugfs file
266 *
267 * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed.
268 * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open.
269 * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is
270 *	set the file can no longer be used.
271 */
272enum iwl_fw_mon_dbgfs_state {
273	IWL_FW_MON_DBGFS_STATE_CLOSED,
274	IWL_FW_MON_DBGFS_STATE_OPEN,
275	IWL_FW_MON_DBGFS_STATE_DISABLED,
276};
277#endif
278
279/**
280 * enum iwl_shared_irq_flags - level of sharing for irq
281 * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
282 * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
283 */
284enum iwl_shared_irq_flags {
285	IWL_SHARED_IRQ_NON_RX		= BIT(0),
286	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
287};
288
289/**
290 * enum iwl_image_response_code - image response values
291 * @IWL_IMAGE_RESP_DEF: the default value of the register
292 * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
293 * @IWL_IMAGE_RESP_FAIL: iml reading failed
294 */
295enum iwl_image_response_code {
296	IWL_IMAGE_RESP_DEF		= 0,
297	IWL_IMAGE_RESP_SUCCESS		= 1,
298	IWL_IMAGE_RESP_FAIL		= 2,
299};
300
301/**
302 * struct cont_rec: continuous recording data structure
303 * @prev_wr_ptr: the last address that was read in monitor_data
304 *	debugfs file
305 * @prev_wrap_cnt: the wrap count that was used during the last read in
306 *	monitor_data debugfs file
307 * @state: the state of monitor_data debugfs file as described
308 *	in &iwl_fw_mon_dbgfs_state enum
309 * @mutex: locked while reading from monitor_data debugfs file
310 */
311#ifdef CONFIG_IWLWIFI_DEBUGFS
312struct cont_rec {
313	u32 prev_wr_ptr;
314	u32 prev_wrap_cnt;
315	u8  state;
316	/* Used to sync monitor_data debugfs file with driver unload flow */
317	struct mutex mutex;
318};
319#endif
320
321/**
322 * struct iwl_trans_pcie - PCIe transport specific data
323 * @rxq: all the RX queue data
324 * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
325 * @global_table: table mapping received VID from hw to rxb
326 * @rba: allocator for RX replenishing
327 * @ctxt_info: context information for FW self init
328 * @ctxt_info_gen3: context information for gen3 devices
329 * @prph_info: prph info for self init
330 * @prph_scratch: prph scratch for self init
331 * @ctxt_info_dma_addr: dma addr of context information
332 * @prph_info_dma_addr: dma addr of prph info
333 * @prph_scratch_dma_addr: dma addr of prph scratch
334 * @ctxt_info_dma_addr: dma addr of context information
335 * @init_dram: DRAM data of firmware image (including paging).
336 *	Context information addresses will be taken from here.
337 *	This is driver's local copy for keeping track of size and
338 *	count for allocating and freeing the memory.
339 * @iml: image loader image virtual address
340 * @iml_dma_addr: image loader image DMA address
341 * @trans: pointer to the generic transport area
342 * @scd_base_addr: scheduler sram base address in SRAM
343 * @kw: keep warm address
344 * @pnvm_dram: DRAM area that contains the PNVM data
345 * @pci_dev: basic pci-network driver stuff
346 * @hw_base: pci hardware address support
347 * @ucode_write_complete: indicates that the ucode has been copied.
348 * @ucode_write_waitq: wait queue for uCode load
349 * @cmd_queue - command queue number
350 * @def_rx_queue - default rx queue number
351 * @rx_buf_size: Rx buffer size
352 * @scd_set_active: should the transport configure the SCD for HCMD queue
353 * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
354 *	frame.
355 * @rx_page_order: page order for receive buffer size
356 * @rx_buf_bytes: RX buffer (RB) size in bytes
357 * @reg_lock: protect hw register access
358 * @mutex: to protect stop_device / start_fw / start_hw
359 * @cmd_in_flight: true when we have a host command in flight
360#ifdef CONFIG_IWLWIFI_DEBUGFS
361 * @fw_mon_data: fw continuous recording data
362#endif
363 * @msix_entries: array of MSI-X entries
364 * @msix_enabled: true if managed to enable MSI-X
365 * @shared_vec_mask: the type of causes the shared vector handles
366 *	(see iwl_shared_irq_flags).
367 * @alloc_vecs: the number of interrupt vectors allocated by the OS
368 * @def_irq: default irq for non rx causes
369 * @fh_init_mask: initial unmasked fh causes
370 * @hw_init_mask: initial unmasked hw causes
371 * @fh_mask: current unmasked fh causes
372 * @hw_mask: current unmasked hw causes
373 * @in_rescan: true if we have triggered a device rescan
374 * @base_rb_stts: base virtual address of receive buffer status for all queues
375 * @base_rb_stts_dma: base physical address of receive buffer status
376 * @supported_dma_mask: DMA mask to validate the actual address against,
377 *	will be DMA_BIT_MASK(11) or DMA_BIT_MASK(12) depending on the device
378 * @alloc_page_lock: spinlock for the page allocator
379 * @alloc_page: allocated page to still use parts of
380 * @alloc_page_used: how much of the allocated page was already used (bytes)
381 */
382struct iwl_trans_pcie {
383	struct iwl_rxq *rxq;
384	struct iwl_rx_mem_buffer *rx_pool;
385	struct iwl_rx_mem_buffer **global_table;
386	struct iwl_rb_allocator rba;
387	union {
388		struct iwl_context_info *ctxt_info;
389		struct iwl_context_info_gen3 *ctxt_info_gen3;
390	};
391	struct iwl_prph_info *prph_info;
392	struct iwl_prph_scratch *prph_scratch;
393	void *iml;
394	dma_addr_t ctxt_info_dma_addr;
395	dma_addr_t prph_info_dma_addr;
396	dma_addr_t prph_scratch_dma_addr;
397	dma_addr_t iml_dma_addr;
398	struct iwl_trans *trans;
399
400	struct net_device napi_dev;
401
402	/* INT ICT Table */
403	__le32 *ict_tbl;
404	dma_addr_t ict_tbl_dma;
405	int ict_index;
406	bool use_ict;
407	bool is_down, opmode_down;
408	s8 debug_rfkill;
409	struct isr_statistics isr_stats;
410
411	spinlock_t irq_lock;
412	struct mutex mutex;
413	u32 inta_mask;
414	u32 scd_base_addr;
415	struct iwl_dma_ptr kw;
416
417	struct iwl_dram_data pnvm_dram;
418
419	struct iwl_txq *txq_memory;
420
421	/* PCI bus related data */
422	struct pci_dev *pci_dev;
423	void __iomem *hw_base;
424
425	bool ucode_write_complete;
426	bool sx_complete;
427	wait_queue_head_t ucode_write_waitq;
428	wait_queue_head_t wait_command_queue;
429	wait_queue_head_t sx_waitq;
430
431	u8 def_rx_queue;
432	u8 n_no_reclaim_cmds;
433	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
434	u16 num_rx_bufs;
435
436	enum iwl_amsdu_size rx_buf_size;
437	bool scd_set_active;
438	bool sw_csum_tx;
439	bool pcie_dbg_dumped_once;
440	u32 rx_page_order;
441	u32 rx_buf_bytes;
442	u32 supported_dma_mask;
443
444	/* allocator lock for the two values below */
445	spinlock_t alloc_page_lock;
446	struct page *alloc_page;
447	u32 alloc_page_used;
448
449	/*protect hw register */
450	spinlock_t reg_lock;
451	bool cmd_hold_nic_awake;
452
453#ifdef CONFIG_IWLWIFI_DEBUGFS
454	struct cont_rec fw_mon_data;
455#endif
456
457	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
458	bool msix_enabled;
459	u8 shared_vec_mask;
460	u32 alloc_vecs;
461	u32 def_irq;
462	u32 fh_init_mask;
463	u32 hw_init_mask;
464	u32 fh_mask;
465	u32 hw_mask;
466	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
467	u16 tx_cmd_queue_size;
468	bool in_rescan;
469
470	void *base_rb_stts;
471	dma_addr_t base_rb_stts_dma;
472};
473
474static inline struct iwl_trans_pcie *
475IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
476{
477	return (void *)trans->trans_specific;
478}
479
480static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
481				      struct msix_entry *entry)
482{
483	/*
484	 * Before sending the interrupt the HW disables it to prevent
485	 * a nested interrupt. This is done by writing 1 to the corresponding
486	 * bit in the mask register. After handling the interrupt, it should be
487	 * re-enabled by clearing this bit. This register is defined as
488	 * write 1 clear (W1C) register, meaning that it's being clear
489	 * by writing 1 to the bit.
490	 */
491	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
492}
493
494static inline struct iwl_trans *
495iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
496{
497	return container_of((void *)trans_pcie, struct iwl_trans,
498			    trans_specific);
499}
500
501/*
502 * Convention: trans API functions: iwl_trans_pcie_XXX
503 *	Other functions: iwl_pcie_XXX
504 */
505struct iwl_trans
506*iwl_trans_pcie_alloc(struct pci_dev *pdev,
507		      const struct pci_device_id *ent,
508		      const struct iwl_cfg_trans_params *cfg_trans);
509void iwl_trans_pcie_free(struct iwl_trans *trans);
510
511/*****************************************************
512* RX
513******************************************************/
514int iwl_pcie_rx_init(struct iwl_trans *trans);
515int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
516irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
517irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
518irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
519irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
520int iwl_pcie_rx_stop(struct iwl_trans *trans);
521void iwl_pcie_rx_free(struct iwl_trans *trans);
522void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
523void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
524int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
525void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
526			    struct iwl_rxq *rxq);
527
528/*****************************************************
529* ICT - interrupt handling
530******************************************************/
531irqreturn_t iwl_pcie_isr(int irq, void *data);
532int iwl_pcie_alloc_ict(struct iwl_trans *trans);
533void iwl_pcie_free_ict(struct iwl_trans *trans);
534void iwl_pcie_reset_ict(struct iwl_trans *trans);
535void iwl_pcie_disable_ict(struct iwl_trans *trans);
536
537/*****************************************************
538* TX / HCMD
539******************************************************/
540int iwl_pcie_tx_init(struct iwl_trans *trans);
541void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
542int iwl_pcie_tx_stop(struct iwl_trans *trans);
543void iwl_pcie_tx_free(struct iwl_trans *trans);
544bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
545			       const struct iwl_trans_txq_scd_cfg *cfg,
546			       unsigned int wdg_timeout);
547void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
548				bool configure_scd);
549void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
550					bool shared_mode);
551int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
552		      struct iwl_device_tx_cmd *dev_cmd, int txq_id);
553void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
554int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
555void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
556			    struct iwl_rx_cmd_buffer *rxb);
557void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
558			    struct sk_buff_head *skbs);
559void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
560void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
561
562/*****************************************************
563* Error handling
564******************************************************/
565void iwl_pcie_dump_csr(struct iwl_trans *trans);
566
567/*****************************************************
568* Helpers
569******************************************************/
570static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
571{
572	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
573
574	clear_bit(STATUS_INT_ENABLED, &trans->status);
575	if (!trans_pcie->msix_enabled) {
576		/* disable interrupts from uCode/NIC to host */
577		iwl_write32(trans, CSR_INT_MASK, 0x00000000);
578
579		/* acknowledge/clear/reset any interrupts still pending
580		 * from uCode or flow handler (Rx/Tx DMA) */
581		iwl_write32(trans, CSR_INT, 0xffffffff);
582		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
583	} else {
584		/* disable all the interrupt we might use */
585		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
586			    trans_pcie->fh_init_mask);
587		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
588			    trans_pcie->hw_init_mask);
589	}
590	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
591}
592
593#define IWL_NUM_OF_COMPLETION_RINGS	31
594#define IWL_NUM_OF_TRANSFER_RINGS	527
595
596static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
597					    int start)
598{
599	int i = 0;
600
601	while (start < fw->num_sec &&
602	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
603	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
604		start++;
605		i++;
606	}
607
608	return i;
609}
610
611static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
612{
613	struct iwl_self_init_dram *dram = &trans->init_dram;
614	int i;
615
616	if (!dram->fw) {
617		WARN_ON(dram->fw_cnt);
618		return;
619	}
620
621	for (i = 0; i < dram->fw_cnt; i++)
622		dma_free_coherent(trans->dev, dram->fw[i].size,
623				  dram->fw[i].block, dram->fw[i].physical);
624
625	kfree(dram->fw);
626	dram->fw_cnt = 0;
627	dram->fw = NULL;
628}
629
630static inline void iwl_disable_interrupts(struct iwl_trans *trans)
631{
632	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
633
634	spin_lock(&trans_pcie->irq_lock);
635	_iwl_disable_interrupts(trans);
636	spin_unlock(&trans_pcie->irq_lock);
637}
638
639static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
640{
641	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
642
643	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
644	set_bit(STATUS_INT_ENABLED, &trans->status);
645	if (!trans_pcie->msix_enabled) {
646		trans_pcie->inta_mask = CSR_INI_SET_MASK;
647		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
648	} else {
649		/*
650		 * fh/hw_mask keeps all the unmasked causes.
651		 * Unlike msi, in msix cause is enabled when it is unset.
652		 */
653		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
654		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
655		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
656			    ~trans_pcie->fh_mask);
657		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
658			    ~trans_pcie->hw_mask);
659	}
660}
661
662static inline void iwl_enable_interrupts(struct iwl_trans *trans)
663{
664	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
665
666	spin_lock(&trans_pcie->irq_lock);
667	_iwl_enable_interrupts(trans);
668	spin_unlock(&trans_pcie->irq_lock);
669}
670static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
671{
672	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
673
674	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
675	trans_pcie->hw_mask = msk;
676}
677
678static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
679{
680	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681
682	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
683	trans_pcie->fh_mask = msk;
684}
685
686static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
687{
688	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
689
690	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
691	if (!trans_pcie->msix_enabled) {
692		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
693		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
694	} else {
695		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
696			    trans_pcie->hw_init_mask);
697		iwl_enable_fh_int_msk_msix(trans,
698					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
699	}
700}
701
702static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
703{
704	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
705
706	IWL_DEBUG_ISR(trans, "Enabling ALIVE interrupt only\n");
707
708	if (!trans_pcie->msix_enabled) {
709		/*
710		 * When we'll receive the ALIVE interrupt, the ISR will call
711		 * iwl_enable_fw_load_int_ctx_info again to set the ALIVE
712		 * interrupt (which is not really needed anymore) but also the
713		 * RX interrupt which will allow us to receive the ALIVE
714		 * notification (which is Rx) and continue the flow.
715		 */
716		trans_pcie->inta_mask =  CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX;
717		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
718	} else {
719		iwl_enable_hw_int_msk_msix(trans,
720					   MSIX_HW_INT_CAUSES_REG_ALIVE);
721		/*
722		 * Leave all the FH causes enabled to get the ALIVE
723		 * notification.
724		 */
725		iwl_enable_fh_int_msk_msix(trans, trans_pcie->fh_init_mask);
726	}
727}
728
729static inline const char *queue_name(struct device *dev,
730				     struct iwl_trans_pcie *trans_p, int i)
731{
732	if (trans_p->shared_vec_mask) {
733		int vec = trans_p->shared_vec_mask &
734			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
735
736		if (i == 0)
737			return DRV_NAME ": shared IRQ";
738
739		return devm_kasprintf(dev, GFP_KERNEL,
740				      DRV_NAME ": queue %d", i + vec);
741	}
742	if (i == 0)
743		return DRV_NAME ": default queue";
744
745	if (i == trans_p->alloc_vecs - 1)
746		return DRV_NAME ": exception";
747
748	return devm_kasprintf(dev, GFP_KERNEL,
749			      DRV_NAME  ": queue %d", i);
750}
751
752static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
753{
754	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
755
756	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
757	if (!trans_pcie->msix_enabled) {
758		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
759		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
760	} else {
761		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
762			    trans_pcie->fh_init_mask);
763		iwl_enable_hw_int_msk_msix(trans,
764					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
765	}
766
767	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) {
768		/*
769		 * On 9000-series devices this bit isn't enabled by default, so
770		 * when we power down the device we need set the bit to allow it
771		 * to wake up the PCI-E bus for RF-kill interrupts.
772		 */
773		iwl_set_bit(trans, CSR_GP_CNTRL,
774			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
775	}
776}
777
778void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
779
780static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
781{
782	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783
784	lockdep_assert_held(&trans_pcie->mutex);
785
786	if (trans_pcie->debug_rfkill == 1)
787		return true;
788
789	return !(iwl_read32(trans, CSR_GP_CNTRL) &
790		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
791}
792
793static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
794						  u32 reg, u32 mask, u32 value)
795{
796	u32 v;
797
798#ifdef CONFIG_IWLWIFI_DEBUG
799	WARN_ON_ONCE(value & ~mask);
800#endif
801
802	v = iwl_read32(trans, reg);
803	v &= ~mask;
804	v |= value;
805	iwl_write32(trans, reg, v);
806}
807
808static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
809					      u32 reg, u32 mask)
810{
811	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
812}
813
814static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
815					    u32 reg, u32 mask)
816{
817	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
818}
819
820static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
821{
822	return (trans->dbg.dest_tlv || iwl_trans_dbg_ini_valid(trans));
823}
824
825void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
826void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
827void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
828
829#ifdef CONFIG_IWLWIFI_DEBUGFS
830void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
831#else
832static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
833#endif
834
835void iwl_pcie_rx_allocator_work(struct work_struct *data);
836
837/* common functions that are used by gen2 transport */
838int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
839void iwl_pcie_apm_config(struct iwl_trans *trans);
840int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
841void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
842bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
843void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
844				       bool was_in_rfkill);
845void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
846void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
847void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
848int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
849			   struct iwl_dma_ptr *ptr, size_t size);
850void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
851void iwl_pcie_apply_destination(struct iwl_trans *trans);
852
853/* common functions that are used by gen3 transport */
854void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
855
856/* transport gen 2 exported functions */
857int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
858				 const struct fw_img *fw, bool run_in_rfkill);
859void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
860int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
861				  struct iwl_host_cmd *cmd);
862void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
863void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
864void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
865				  bool test, bool reset);
866#endif /* __iwl_trans_int_pcie_h__ */
867