1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 */
5/*
6 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7 * Copyright (c) 2014-2015 QLogic Corporation
8 * All rights reserved
9 * www.qlogic.com
10 */
11
12#ifndef __BFA_IOC_H__
13#define __BFA_IOC_H__
14
15#include "bfa_cs.h"
16#include "bfi.h"
17#include "cna.h"
18
19#define BFA_IOC_TOV		3000	/* msecs */
20#define BFA_IOC_HWSEM_TOV	500	/* msecs */
21#define BFA_IOC_HB_TOV		500	/* msecs */
22#define BFA_IOC_POLL_TOV	200	/* msecs */
23#define BNA_DBG_FWTRC_LEN      (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
24				BFI_IOC_TRC_HDR_SZ)
25
26/* PCI device information required by IOC */
27struct bfa_pcidev {
28	int	pci_slot;
29	u8	pci_func;
30	u16	device_id;
31	u16	ssid;
32	void	__iomem *pci_bar_kva;
33};
34
35/* Structure used to remember the DMA-able memory block's KVA and Physical
36 * Address
37 */
38struct bfa_dma {
39	void	*kva;	/* ! Kernel virtual address	*/
40	u64	pa;	/* ! Physical address		*/
41};
42
43#define BFA_DMA_ALIGN_SZ	256
44
45/* smem size for Crossbow and Catapult */
46#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
47#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
48
49/* BFA dma address assignment macro. (big endian format) */
50#define bfa_dma_be_addr_set(dma_addr, pa)	\
51		__bfa_dma_be_addr_set(&dma_addr, (u64)pa)
52static inline void
53__bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
54{
55	dma_addr->a32.addr_lo = (u32) htonl(pa);
56	dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
57}
58
59#define bfa_alen_set(__alen, __len, __pa)	\
60	__bfa_alen_set(__alen, __len, (u64)__pa)
61
62static inline void
63__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
64{
65	alen->al_len = cpu_to_be32(len);
66	bfa_dma_be_addr_set(alen->al_addr, pa);
67}
68
69struct bfa_ioc_regs {
70	void __iomem *hfn_mbox_cmd;
71	void __iomem *hfn_mbox;
72	void __iomem *lpu_mbox_cmd;
73	void __iomem *lpu_mbox;
74	void __iomem *lpu_read_stat;
75	void __iomem *pss_ctl_reg;
76	void __iomem *pss_err_status_reg;
77	void __iomem *app_pll_fast_ctl_reg;
78	void __iomem *app_pll_slow_ctl_reg;
79	void __iomem *ioc_sem_reg;
80	void __iomem *ioc_usage_sem_reg;
81	void __iomem *ioc_init_sem_reg;
82	void __iomem *ioc_usage_reg;
83	void __iomem *host_page_num_fn;
84	void __iomem *heartbeat;
85	void __iomem *ioc_fwstate;
86	void __iomem *alt_ioc_fwstate;
87	void __iomem *ll_halt;
88	void __iomem *alt_ll_halt;
89	void __iomem *err_set;
90	void __iomem *ioc_fail_sync;
91	void __iomem *shirq_isr_next;
92	void __iomem *shirq_msk_next;
93	void __iomem *smem_page_start;
94	u32	smem_pg0;
95};
96
97/* IOC Mailbox structures */
98typedef void (*bfa_mbox_cmd_cbfn_t)(void *cbarg);
99struct bfa_mbox_cmd {
100	struct list_head	qe;
101	bfa_mbox_cmd_cbfn_t     cbfn;
102	void		    *cbarg;
103	u32     msg[BFI_IOC_MSGSZ];
104};
105
106/* IOC mailbox module */
107typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg *m);
108struct bfa_ioc_mbox_mod {
109	struct list_head	cmd_q;		/*!< pending mbox queue	*/
110	int			nmclass;	/*!< number of handlers */
111	struct {
112		bfa_ioc_mbox_mcfunc_t	cbfn;	/*!< message handlers	*/
113		void			*cbarg;
114	} mbhdlr[BFI_MC_MAX];
115};
116
117/* IOC callback function interfaces */
118typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
119typedef void (*bfa_ioc_disable_cbfn_t)(void *bfa);
120typedef void (*bfa_ioc_hbfail_cbfn_t)(void *bfa);
121typedef void (*bfa_ioc_reset_cbfn_t)(void *bfa);
122struct bfa_ioc_cbfn {
123	bfa_ioc_enable_cbfn_t	enable_cbfn;
124	bfa_ioc_disable_cbfn_t	disable_cbfn;
125	bfa_ioc_hbfail_cbfn_t	hbfail_cbfn;
126	bfa_ioc_reset_cbfn_t	reset_cbfn;
127};
128
129/* IOC event notification mechanism. */
130enum bfa_ioc_event {
131	BFA_IOC_E_ENABLED	= 1,
132	BFA_IOC_E_DISABLED	= 2,
133	BFA_IOC_E_FAILED	= 3,
134};
135
136typedef void (*bfa_ioc_notify_cbfn_t)(void *, enum bfa_ioc_event);
137
138struct bfa_ioc_notify {
139	struct list_head	qe;
140	bfa_ioc_notify_cbfn_t	cbfn;
141	void			*cbarg;
142};
143
144/* Initialize a IOC event notification structure */
145#define bfa_ioc_notify_init(__notify, __cbfn, __cbarg) do {	\
146	(__notify)->cbfn = (__cbfn);				\
147	(__notify)->cbarg = (__cbarg);				\
148} while (0)
149
150struct bfa_iocpf {
151	bfa_fsm_t		fsm;
152	struct bfa_ioc		*ioc;
153	bool			fw_mismatch_notified;
154	bool			auto_recover;
155	u32			poll_time;
156};
157
158struct bfa_ioc {
159	bfa_fsm_t		fsm;
160	struct bfa		*bfa;
161	struct bfa_pcidev	pcidev;
162	struct timer_list	ioc_timer;
163	struct timer_list	iocpf_timer;
164	struct timer_list	sem_timer;
165	struct timer_list	hb_timer;
166	u32			hb_count;
167	struct list_head	notify_q;
168	void			*dbg_fwsave;
169	int			dbg_fwsave_len;
170	bool			dbg_fwsave_once;
171	enum bfi_pcifn_class	clscode;
172	struct bfa_ioc_regs	ioc_regs;
173	struct bfa_ioc_drv_stats stats;
174	bool			fcmode;
175	bool			pllinit;
176	bool			stats_busy;	/*!< outstanding stats */
177	u8			port_id;
178
179	struct bfa_dma		attr_dma;
180	struct bfi_ioc_attr	*attr;
181	struct bfa_ioc_cbfn	*cbfn;
182	struct bfa_ioc_mbox_mod	mbox_mod;
183	const struct bfa_ioc_hwif *ioc_hwif;
184	struct bfa_iocpf	iocpf;
185	enum bfi_asic_gen	asic_gen;
186	enum bfi_asic_mode	asic_mode;
187	enum bfi_port_mode	port0_mode;
188	enum bfi_port_mode	port1_mode;
189	enum bfa_mode		port_mode;
190	u8			ad_cap_bm;	/*!< adapter cap bit mask */
191	u8			port_mode_cfg;	/*!< config port mode */
192};
193
194struct bfa_ioc_hwif {
195	enum bfa_status (*ioc_pll_init) (void __iomem *rb,
196						enum bfi_asic_mode m);
197	bool		(*ioc_firmware_lock)	(struct bfa_ioc *ioc);
198	void		(*ioc_firmware_unlock)	(struct bfa_ioc *ioc);
199	void		(*ioc_reg_init)	(struct bfa_ioc *ioc);
200	void		(*ioc_map_port)	(struct bfa_ioc *ioc);
201	void		(*ioc_isr_mode_set)	(struct bfa_ioc *ioc,
202					bool msix);
203	void		(*ioc_notify_fail)	(struct bfa_ioc *ioc);
204	void		(*ioc_ownership_reset)	(struct bfa_ioc *ioc);
205	bool		(*ioc_sync_start)       (struct bfa_ioc *ioc);
206	void		(*ioc_sync_join)	(struct bfa_ioc *ioc);
207	void		(*ioc_sync_leave)	(struct bfa_ioc *ioc);
208	void		(*ioc_sync_ack)		(struct bfa_ioc *ioc);
209	bool		(*ioc_sync_complete)	(struct bfa_ioc *ioc);
210	bool		(*ioc_lpu_read_stat)	(struct bfa_ioc *ioc);
211	void		(*ioc_set_fwstate)	(struct bfa_ioc *ioc,
212					enum bfi_ioc_state fwstate);
213	enum bfi_ioc_state (*ioc_get_fwstate) (struct bfa_ioc *ioc);
214	void		(*ioc_set_alt_fwstate)	(struct bfa_ioc *ioc,
215					enum bfi_ioc_state fwstate);
216	enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc *ioc);
217
218};
219
220#define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
221#define bfa_ioc_devid(__ioc)		((__ioc)->pcidev.device_id)
222#define bfa_ioc_bar0(__ioc)		((__ioc)->pcidev.pci_bar_kva)
223#define bfa_ioc_portid(__ioc)		((__ioc)->port_id)
224#define bfa_ioc_asic_gen(__ioc)		((__ioc)->asic_gen)
225#define bfa_ioc_is_default(__ioc)	\
226	(bfa_ioc_pcifn(__ioc) == bfa_ioc_portid(__ioc))
227#define bfa_ioc_speed_sup(__ioc)	\
228	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
229#define bfa_ioc_get_nports(__ioc)	\
230	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
231
232#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
233#define bfa_ioc_stats_hb_count(_ioc, _hb_count)	\
234	((_ioc)->stats.hb_count = (_hb_count))
235#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
236#define BFA_IOC_FW_SMEM_SIZE(__ioc)					\
237	((bfa_ioc_asic_gen(__ioc) == BFI_ASIC_GEN_CB)			\
238	? BFI_SMEM_CB_SIZE : BFI_SMEM_CT_SIZE)
239#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
240#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
241#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
242
243/* IOC mailbox interface */
244bool bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc,
245			struct bfa_mbox_cmd *cmd,
246			bfa_mbox_cmd_cbfn_t cbfn, void *cbarg);
247void bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc);
248void bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
249		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
250
251/* IOC interfaces */
252
253#define bfa_ioc_pll_init_asic(__ioc) \
254	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
255			   (__ioc)->asic_mode))
256
257#define bfa_ioc_lpu_read_stat(__ioc) do {				\
258		if ((__ioc)->ioc_hwif->ioc_lpu_read_stat)		\
259			((__ioc)->ioc_hwif->ioc_lpu_read_stat(__ioc));	\
260} while (0)
261
262void bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc);
263void bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc);
264void bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc);
265
266void bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa,
267		struct bfa_ioc_cbfn *cbfn);
268void bfa_nw_ioc_auto_recover(bool auto_recover);
269void bfa_nw_ioc_detach(struct bfa_ioc *ioc);
270void bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
271		enum bfi_pcifn_class clscode);
272u32 bfa_nw_ioc_meminfo(void);
273void bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa);
274void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
275void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
276
277void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
278bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
279bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
280void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
281enum bfa_status bfa_nw_ioc_fwsig_invalidate(struct bfa_ioc *ioc);
282void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
283	struct bfa_ioc_notify *notify);
284bool bfa_nw_ioc_sem_get(void __iomem *sem_reg);
285void bfa_nw_ioc_sem_release(void __iomem *sem_reg);
286void bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc);
287void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
288			struct bfi_ioc_image_hdr *fwhdr);
289bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
290			struct bfi_ioc_image_hdr *fwhdr);
291void bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac);
292void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
293int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
294int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
295
296/*
297 * Timeout APIs
298 */
299void bfa_nw_ioc_timeout(struct bfa_ioc *ioc);
300void bfa_nw_ioc_hb_check(struct bfa_ioc *ioc);
301void bfa_nw_iocpf_timeout(struct bfa_ioc *ioc);
302void bfa_nw_iocpf_sem_timeout(struct bfa_ioc *ioc);
303
304/*
305 * F/W Image Size & Chunk
306 */
307u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
308u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
309
310/*
311 *	Flash module specific
312 */
313typedef void	(*bfa_cb_flash) (void *cbarg, enum bfa_status status);
314
315struct bfa_flash {
316	struct bfa_ioc *ioc;		/* back pointer to ioc */
317	u32		type;		/* partition type */
318	u8		instance;	/* partition instance */
319	u8		rsv[3];
320	u32		op_busy;	/*  operation busy flag */
321	u32		residue;	/*  residual length */
322	u32		offset;		/*  offset */
323	enum bfa_status	status;		/*  status */
324	u8		*dbuf_kva;	/*  dma buf virtual address */
325	u64		dbuf_pa;	/*  dma buf physical address */
326	bfa_cb_flash	cbfn;		/*  user callback function */
327	void		*cbarg;		/*  user callback arg */
328	u8		*ubuf;		/*  user supplied buffer */
329	u32		addr_off;	/*  partition address offset */
330	struct bfa_mbox_cmd mb;		/*  mailbox */
331	struct bfa_ioc_notify ioc_notify; /*  ioc event notify */
332};
333
334enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
335			struct bfa_flash_attr *attr,
336			bfa_cb_flash cbfn, void *cbarg);
337enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
338			u32 type, u8 instance, void *buf, u32 len, u32 offset,
339			bfa_cb_flash cbfn, void *cbarg);
340enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
341			u32 type, u8 instance, void *buf, u32 len, u32 offset,
342			bfa_cb_flash cbfn, void *cbarg);
343u32	bfa_nw_flash_meminfo(void);
344void	bfa_nw_flash_attach(struct bfa_flash *flash,
345			    struct bfa_ioc *ioc, void *dev);
346void	bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
347
348#endif /* __BFA_IOC_H__ */
349