1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  Linux MegaRAID driver for SAS based RAID controllers
4 *
5 *  Copyright (c) 2003-2013  LSI Corporation
6 *  Copyright (c) 2013-2016  Avago Technologies
7 *  Copyright (c) 2016-2018  Broadcom Inc.
8 *
9 *  Authors: Broadcom Inc.
10 *           Sreenivas Bagalkote
11 *           Sumant Patro
12 *           Bo Yang
13 *           Adam Radford
14 *           Kashyap Desai <kashyap.desai@broadcom.com>
15 *           Sumit Saxena <sumit.saxena@broadcom.com>
16 *
17 *  Send feedback to: megaraidlinux.pdl@broadcom.com
18 */
19
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/list.h>
24#include <linux/moduleparam.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/uio.h>
30#include <linux/slab.h>
31#include <linux/uaccess.h>
32#include <asm/unaligned.h>
33#include <linux/fs.h>
34#include <linux/compat.h>
35#include <linux/blkdev.h>
36#include <linux/mutex.h>
37#include <linux/poll.h>
38#include <linux/vmalloc.h>
39#include <linux/irq_poll.h>
40#include <linux/blk-mq-pci.h>
41
42#include <scsi/scsi.h>
43#include <scsi/scsi_cmnd.h>
44#include <scsi/scsi_device.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_tcq.h>
47#include <scsi/scsi_dbg.h>
48#include "megaraid_sas_fusion.h"
49#include "megaraid_sas.h"
50
51/*
52 * Number of sectors per IO command
53 * Will be set in megasas_init_mfi if user does not provide
54 */
55static unsigned int max_sectors;
56module_param_named(max_sectors, max_sectors, int, 0444);
57MODULE_PARM_DESC(max_sectors,
58	"Maximum number of sectors per IO command");
59
60static int msix_disable;
61module_param(msix_disable, int, 0444);
62MODULE_PARM_DESC(msix_disable, "Disable MSI-X interrupt handling. Default: 0");
63
64static unsigned int msix_vectors;
65module_param(msix_vectors, int, 0444);
66MODULE_PARM_DESC(msix_vectors, "MSI-X max vector count. Default: Set by FW");
67
68static int allow_vf_ioctls;
69module_param(allow_vf_ioctls, int, 0444);
70MODULE_PARM_DESC(allow_vf_ioctls, "Allow ioctls in SR-IOV VF mode. Default: 0");
71
72static unsigned int throttlequeuedepth = MEGASAS_THROTTLE_QUEUE_DEPTH;
73module_param(throttlequeuedepth, int, 0444);
74MODULE_PARM_DESC(throttlequeuedepth,
75	"Adapter queue depth when throttled due to I/O timeout. Default: 16");
76
77unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
78module_param(resetwaittime, int, 0444);
79MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
80
81static int smp_affinity_enable = 1;
82module_param(smp_affinity_enable, int, 0444);
83MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
84
85static int rdpq_enable = 1;
86module_param(rdpq_enable, int, 0444);
87MODULE_PARM_DESC(rdpq_enable, "Allocate reply queue in chunks for large queue depth enable/disable Default: enable(1)");
88
89unsigned int dual_qdepth_disable;
90module_param(dual_qdepth_disable, int, 0444);
91MODULE_PARM_DESC(dual_qdepth_disable, "Disable dual queue depth feature. Default: 0");
92
93static unsigned int scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
94module_param(scmd_timeout, int, 0444);
95MODULE_PARM_DESC(scmd_timeout, "scsi command timeout (10-90s), default 90s. See megasas_reset_timer.");
96
97int perf_mode = -1;
98module_param(perf_mode, int, 0444);
99MODULE_PARM_DESC(perf_mode, "Performance mode (only for Aero adapters), options:\n\t\t"
100		"0 - balanced: High iops and low latency queues are allocated &\n\t\t"
101		"interrupt coalescing is enabled only on high iops queues\n\t\t"
102		"1 - iops: High iops queues are not allocated &\n\t\t"
103		"interrupt coalescing is enabled on all queues\n\t\t"
104		"2 - latency: High iops queues are not allocated &\n\t\t"
105		"interrupt coalescing is disabled on all queues\n\t\t"
106		"default mode is 'balanced'"
107		);
108
109int event_log_level = MFI_EVT_CLASS_CRITICAL;
110module_param(event_log_level, int, 0644);
111MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)");
112
113unsigned int enable_sdev_max_qd;
114module_param(enable_sdev_max_qd, int, 0444);
115MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
116
117int host_tagset_enable = 1;
118module_param(host_tagset_enable, int, 0444);
119MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
120
121MODULE_LICENSE("GPL");
122MODULE_VERSION(MEGASAS_VERSION);
123MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
124MODULE_DESCRIPTION("Broadcom MegaRAID SAS Driver");
125
126int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
127static int megasas_get_pd_list(struct megasas_instance *instance);
128static int megasas_ld_list_query(struct megasas_instance *instance,
129				 u8 query_type);
130static int megasas_issue_init_mfi(struct megasas_instance *instance);
131static int megasas_register_aen(struct megasas_instance *instance,
132				u32 seq_num, u32 class_locale_word);
133static void megasas_get_pd_info(struct megasas_instance *instance,
134				struct scsi_device *sdev);
135static void
136megasas_set_ld_removed_by_fw(struct megasas_instance *instance);
137
138/*
139 * PCI ID table for all supported controllers
140 */
141static struct pci_device_id megasas_pci_table[] = {
142
143	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1064R)},
144	/* xscale IOP */
145	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078R)},
146	/* ppc IOP */
147	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078DE)},
148	/* ppc IOP */
149	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS1078GEN2)},
150	/* gen2*/
151	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0079GEN2)},
152	/* gen2*/
153	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0073SKINNY)},
154	/* skinny*/
155	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_SAS0071SKINNY)},
156	/* skinny*/
157	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VERDE_ZCR)},
158	/* xscale IOP, vega */
159	{PCI_DEVICE(PCI_VENDOR_ID_DELL, PCI_DEVICE_ID_DELL_PERC5)},
160	/* xscale IOP */
161	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)},
162	/* Fusion */
163	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_PLASMA)},
164	/* Plasma */
165	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)},
166	/* Invader */
167	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FURY)},
168	/* Fury */
169	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER)},
170	/* Intruder */
171	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INTRUDER_24)},
172	/* Intruder 24 port*/
173	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_52)},
174	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CUTLASS_53)},
175	/* VENTURA */
176	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA)},
177	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER)},
178	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_HARPOON)},
179	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_TOMCAT)},
180	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_VENTURA_4PORT)},
181	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_CRUSADER_4PORT)},
182	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E1)},
183	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E2)},
184	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E5)},
185	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E6)},
186	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E0)},
187	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E3)},
188	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E4)},
189	{PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_AERO_10E7)},
190	{}
191};
192
193MODULE_DEVICE_TABLE(pci, megasas_pci_table);
194
195static int megasas_mgmt_majorno;
196struct megasas_mgmt_info megasas_mgmt_info;
197static struct fasync_struct *megasas_async_queue;
198static DEFINE_MUTEX(megasas_async_queue_mutex);
199
200static int megasas_poll_wait_aen;
201static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
202static u32 support_poll_for_event;
203u32 megasas_dbg_lvl;
204static u32 support_device_change;
205static bool support_nvme_encapsulation;
206static bool support_pci_lane_margining;
207
208/* define lock for aen poll */
209static spinlock_t poll_aen_lock;
210
211extern struct dentry *megasas_debugfs_root;
212
213void
214megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
215		     u8 alt_status);
216static u32
217megasas_read_fw_status_reg_gen2(struct megasas_instance *instance);
218static int
219megasas_adp_reset_gen2(struct megasas_instance *instance,
220		       struct megasas_register_set __iomem *reg_set);
221static irqreturn_t megasas_isr(int irq, void *devp);
222static u32
223megasas_init_adapter_mfi(struct megasas_instance *instance);
224u32
225megasas_build_and_issue_cmd(struct megasas_instance *instance,
226			    struct scsi_cmnd *scmd);
227static void megasas_complete_cmd_dpc(unsigned long instance_addr);
228int
229wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
230	int seconds);
231void megasas_fusion_ocr_wq(struct work_struct *work);
232static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
233					 int initial);
234static int
235megasas_set_dma_mask(struct megasas_instance *instance);
236static int
237megasas_alloc_ctrl_mem(struct megasas_instance *instance);
238static inline void
239megasas_free_ctrl_mem(struct megasas_instance *instance);
240static inline int
241megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance);
242static inline void
243megasas_free_ctrl_dma_buffers(struct megasas_instance *instance);
244static inline void
245megasas_init_ctrl_params(struct megasas_instance *instance);
246
247u32 megasas_readl(struct megasas_instance *instance,
248		  const volatile void __iomem *addr)
249{
250	u32 i = 0, ret_val;
251	/*
252	 * Due to a HW errata in Aero controllers, reads to certain
253	 * Fusion registers could intermittently return all zeroes.
254	 * This behavior is transient in nature and subsequent reads will
255	 * return valid value. As a workaround in driver, retry readl for
256	 * up to thirty times until a non-zero value is read.
257	 */
258	if (instance->adapter_type == AERO_SERIES) {
259		do {
260			ret_val = readl(addr);
261			i++;
262		} while (ret_val == 0 && i < 30);
263		return ret_val;
264	} else {
265		return readl(addr);
266	}
267}
268
269/**
270 * megasas_set_dma_settings -	Populate DMA address, length and flags for DCMDs
271 * @instance:			Adapter soft state
272 * @dcmd:			DCMD frame inside MFI command
273 * @dma_addr:			DMA address of buffer to be passed to FW
274 * @dma_len:			Length of DMA buffer to be passed to FW
275 * @return:			void
276 */
277void megasas_set_dma_settings(struct megasas_instance *instance,
278			      struct megasas_dcmd_frame *dcmd,
279			      dma_addr_t dma_addr, u32 dma_len)
280{
281	if (instance->consistent_mask_64bit) {
282		dcmd->sgl.sge64[0].phys_addr = cpu_to_le64(dma_addr);
283		dcmd->sgl.sge64[0].length = cpu_to_le32(dma_len);
284		dcmd->flags = cpu_to_le16(dcmd->flags | MFI_FRAME_SGL64);
285
286	} else {
287		dcmd->sgl.sge32[0].phys_addr =
288				cpu_to_le32(lower_32_bits(dma_addr));
289		dcmd->sgl.sge32[0].length = cpu_to_le32(dma_len);
290		dcmd->flags = cpu_to_le16(dcmd->flags);
291	}
292}
293
294static void
295megasas_issue_dcmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
296{
297	instance->instancet->fire_cmd(instance,
298		cmd->frame_phys_addr, 0, instance->reg_set);
299	return;
300}
301
302/**
303 * megasas_get_cmd -	Get a command from the free pool
304 * @instance:		Adapter soft state
305 *
306 * Returns a free command from the pool
307 */
308struct megasas_cmd *megasas_get_cmd(struct megasas_instance
309						  *instance)
310{
311	unsigned long flags;
312	struct megasas_cmd *cmd = NULL;
313
314	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
315
316	if (!list_empty(&instance->cmd_pool)) {
317		cmd = list_entry((&instance->cmd_pool)->next,
318				 struct megasas_cmd, list);
319		list_del_init(&cmd->list);
320	} else {
321		dev_err(&instance->pdev->dev, "Command pool empty!\n");
322	}
323
324	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
325	return cmd;
326}
327
328/**
329 * megasas_return_cmd -	Return a cmd to free command pool
330 * @instance:		Adapter soft state
331 * @cmd:		Command packet to be returned to free command pool
332 */
333void
334megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
335{
336	unsigned long flags;
337	u32 blk_tags;
338	struct megasas_cmd_fusion *cmd_fusion;
339	struct fusion_context *fusion = instance->ctrl_context;
340
341	/* This flag is used only for fusion adapter.
342	 * Wait for Interrupt for Polled mode DCMD
343	 */
344	if (cmd->flags & DRV_DCMD_POLLED_MODE)
345		return;
346
347	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
348
349	if (fusion) {
350		blk_tags = instance->max_scsi_cmds + cmd->index;
351		cmd_fusion = fusion->cmd_list[blk_tags];
352		megasas_return_cmd_fusion(instance, cmd_fusion);
353	}
354	cmd->scmd = NULL;
355	cmd->frame_count = 0;
356	cmd->flags = 0;
357	memset(cmd->frame, 0, instance->mfi_frame_size);
358	cmd->frame->io.context = cpu_to_le32(cmd->index);
359	if (!fusion && reset_devices)
360		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
361	list_add(&cmd->list, (&instance->cmd_pool)->next);
362
363	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
364
365}
366
367static const char *
368format_timestamp(uint32_t timestamp)
369{
370	static char buffer[32];
371
372	if ((timestamp & 0xff000000) == 0xff000000)
373		snprintf(buffer, sizeof(buffer), "boot + %us", timestamp &
374		0x00ffffff);
375	else
376		snprintf(buffer, sizeof(buffer), "%us", timestamp);
377	return buffer;
378}
379
380static const char *
381format_class(int8_t class)
382{
383	static char buffer[6];
384
385	switch (class) {
386	case MFI_EVT_CLASS_DEBUG:
387		return "debug";
388	case MFI_EVT_CLASS_PROGRESS:
389		return "progress";
390	case MFI_EVT_CLASS_INFO:
391		return "info";
392	case MFI_EVT_CLASS_WARNING:
393		return "WARN";
394	case MFI_EVT_CLASS_CRITICAL:
395		return "CRIT";
396	case MFI_EVT_CLASS_FATAL:
397		return "FATAL";
398	case MFI_EVT_CLASS_DEAD:
399		return "DEAD";
400	default:
401		snprintf(buffer, sizeof(buffer), "%d", class);
402		return buffer;
403	}
404}
405
406/**
407  * megasas_decode_evt: Decode FW AEN event and print critical event
408  * for information.
409  * @instance:			Adapter soft state
410  */
411static void
412megasas_decode_evt(struct megasas_instance *instance)
413{
414	struct megasas_evt_detail *evt_detail = instance->evt_detail;
415	union megasas_evt_class_locale class_locale;
416	class_locale.word = le32_to_cpu(evt_detail->cl.word);
417
418	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
419	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
420		printk(KERN_WARNING "megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
421		event_log_level = MFI_EVT_CLASS_CRITICAL;
422	}
423
424	if (class_locale.members.class >= event_log_level)
425		dev_info(&instance->pdev->dev, "%d (%s/0x%04x/%s) - %s\n",
426			le32_to_cpu(evt_detail->seq_num),
427			format_timestamp(le32_to_cpu(evt_detail->time_stamp)),
428			(class_locale.members.locale),
429			format_class(class_locale.members.class),
430			evt_detail->description);
431
432	if (megasas_dbg_lvl & LD_PD_DEBUG)
433		dev_info(&instance->pdev->dev,
434			 "evt_detail.args.ld.target_id/index %d/%d\n",
435			 evt_detail->args.ld.target_id, evt_detail->args.ld.ld_index);
436
437}
438
439/*
440 * The following functions are defined for xscale
441 * (deviceid : 1064R, PERC5) controllers
442 */
443
444/**
445 * megasas_enable_intr_xscale -	Enables interrupts
446 * @instance:	Adapter soft state
447 */
448static inline void
449megasas_enable_intr_xscale(struct megasas_instance *instance)
450{
451	struct megasas_register_set __iomem *regs;
452
453	regs = instance->reg_set;
454	writel(0, &(regs)->outbound_intr_mask);
455
456	/* Dummy readl to force pci flush */
457	readl(&regs->outbound_intr_mask);
458}
459
460/**
461 * megasas_disable_intr_xscale -Disables interrupt
462 * @instance:	Adapter soft state
463 */
464static inline void
465megasas_disable_intr_xscale(struct megasas_instance *instance)
466{
467	struct megasas_register_set __iomem *regs;
468	u32 mask = 0x1f;
469
470	regs = instance->reg_set;
471	writel(mask, &regs->outbound_intr_mask);
472	/* Dummy readl to force pci flush */
473	readl(&regs->outbound_intr_mask);
474}
475
476/**
477 * megasas_read_fw_status_reg_xscale - returns the current FW status value
478 * @instance:	Adapter soft state
479 */
480static u32
481megasas_read_fw_status_reg_xscale(struct megasas_instance *instance)
482{
483	return readl(&instance->reg_set->outbound_msg_0);
484}
485/**
486 * megasas_clear_interrupt_xscale -	Check & clear interrupt
487 * @instance:	Adapter soft state
488 */
489static int
490megasas_clear_intr_xscale(struct megasas_instance *instance)
491{
492	u32 status;
493	u32 mfiStatus = 0;
494	struct megasas_register_set __iomem *regs;
495	regs = instance->reg_set;
496
497	/*
498	 * Check if it is our interrupt
499	 */
500	status = readl(&regs->outbound_intr_status);
501
502	if (status & MFI_OB_INTR_STATUS_MASK)
503		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
504	if (status & MFI_XSCALE_OMR0_CHANGE_INTERRUPT)
505		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
506
507	/*
508	 * Clear the interrupt by writing back the same value
509	 */
510	if (mfiStatus)
511		writel(status, &regs->outbound_intr_status);
512
513	/* Dummy readl to force pci flush */
514	readl(&regs->outbound_intr_status);
515
516	return mfiStatus;
517}
518
519/**
520 * megasas_fire_cmd_xscale -	Sends command to the FW
521 * @instance:		Adapter soft state
522 * @frame_phys_addr :	Physical address of cmd
523 * @frame_count :	Number of frames for the command
524 * @regs :		MFI register set
525 */
526static inline void
527megasas_fire_cmd_xscale(struct megasas_instance *instance,
528		dma_addr_t frame_phys_addr,
529		u32 frame_count,
530		struct megasas_register_set __iomem *regs)
531{
532	unsigned long flags;
533
534	spin_lock_irqsave(&instance->hba_lock, flags);
535	writel((frame_phys_addr >> 3)|(frame_count),
536	       &(regs)->inbound_queue_port);
537	spin_unlock_irqrestore(&instance->hba_lock, flags);
538}
539
540/**
541 * megasas_adp_reset_xscale -  For controller reset
542 * @instance:	Adapter soft state
543 * @regs:	MFI register set
544 */
545static int
546megasas_adp_reset_xscale(struct megasas_instance *instance,
547	struct megasas_register_set __iomem *regs)
548{
549	u32 i;
550	u32 pcidata;
551
552	writel(MFI_ADP_RESET, &regs->inbound_doorbell);
553
554	for (i = 0; i < 3; i++)
555		msleep(1000); /* sleep for 3 secs */
556	pcidata  = 0;
557	pci_read_config_dword(instance->pdev, MFI_1068_PCSR_OFFSET, &pcidata);
558	dev_notice(&instance->pdev->dev, "pcidata = %x\n", pcidata);
559	if (pcidata & 0x2) {
560		dev_notice(&instance->pdev->dev, "mfi 1068 offset read=%x\n", pcidata);
561		pcidata &= ~0x2;
562		pci_write_config_dword(instance->pdev,
563				MFI_1068_PCSR_OFFSET, pcidata);
564
565		for (i = 0; i < 2; i++)
566			msleep(1000); /* need to wait 2 secs again */
567
568		pcidata  = 0;
569		pci_read_config_dword(instance->pdev,
570				MFI_1068_FW_HANDSHAKE_OFFSET, &pcidata);
571		dev_notice(&instance->pdev->dev, "1068 offset handshake read=%x\n", pcidata);
572		if ((pcidata & 0xffff0000) == MFI_1068_FW_READY) {
573			dev_notice(&instance->pdev->dev, "1068 offset pcidt=%x\n", pcidata);
574			pcidata = 0;
575			pci_write_config_dword(instance->pdev,
576				MFI_1068_FW_HANDSHAKE_OFFSET, pcidata);
577		}
578	}
579	return 0;
580}
581
582/**
583 * megasas_check_reset_xscale -	For controller reset check
584 * @instance:	Adapter soft state
585 * @regs:	MFI register set
586 */
587static int
588megasas_check_reset_xscale(struct megasas_instance *instance,
589		struct megasas_register_set __iomem *regs)
590{
591	if ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
592	    (le32_to_cpu(*instance->consumer) ==
593		MEGASAS_ADPRESET_INPROG_SIGN))
594		return 1;
595	return 0;
596}
597
598static struct megasas_instance_template megasas_instance_template_xscale = {
599
600	.fire_cmd = megasas_fire_cmd_xscale,
601	.enable_intr = megasas_enable_intr_xscale,
602	.disable_intr = megasas_disable_intr_xscale,
603	.clear_intr = megasas_clear_intr_xscale,
604	.read_fw_status_reg = megasas_read_fw_status_reg_xscale,
605	.adp_reset = megasas_adp_reset_xscale,
606	.check_reset = megasas_check_reset_xscale,
607	.service_isr = megasas_isr,
608	.tasklet = megasas_complete_cmd_dpc,
609	.init_adapter = megasas_init_adapter_mfi,
610	.build_and_issue_cmd = megasas_build_and_issue_cmd,
611	.issue_dcmd = megasas_issue_dcmd,
612};
613
614/*
615 * This is the end of set of functions & definitions specific
616 * to xscale (deviceid : 1064R, PERC5) controllers
617 */
618
619/*
620 * The following functions are defined for ppc (deviceid : 0x60)
621 * controllers
622 */
623
624/**
625 * megasas_enable_intr_ppc -	Enables interrupts
626 * @instance:	Adapter soft state
627 */
628static inline void
629megasas_enable_intr_ppc(struct megasas_instance *instance)
630{
631	struct megasas_register_set __iomem *regs;
632
633	regs = instance->reg_set;
634	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
635
636	writel(~0x80000000, &(regs)->outbound_intr_mask);
637
638	/* Dummy readl to force pci flush */
639	readl(&regs->outbound_intr_mask);
640}
641
642/**
643 * megasas_disable_intr_ppc -	Disable interrupt
644 * @instance:	Adapter soft state
645 */
646static inline void
647megasas_disable_intr_ppc(struct megasas_instance *instance)
648{
649	struct megasas_register_set __iomem *regs;
650	u32 mask = 0xFFFFFFFF;
651
652	regs = instance->reg_set;
653	writel(mask, &regs->outbound_intr_mask);
654	/* Dummy readl to force pci flush */
655	readl(&regs->outbound_intr_mask);
656}
657
658/**
659 * megasas_read_fw_status_reg_ppc - returns the current FW status value
660 * @instance:	Adapter soft state
661 */
662static u32
663megasas_read_fw_status_reg_ppc(struct megasas_instance *instance)
664{
665	return readl(&instance->reg_set->outbound_scratch_pad_0);
666}
667
668/**
669 * megasas_clear_interrupt_ppc -	Check & clear interrupt
670 * @instance:	Adapter soft state
671 */
672static int
673megasas_clear_intr_ppc(struct megasas_instance *instance)
674{
675	u32 status, mfiStatus = 0;
676	struct megasas_register_set __iomem *regs;
677	regs = instance->reg_set;
678
679	/*
680	 * Check if it is our interrupt
681	 */
682	status = readl(&regs->outbound_intr_status);
683
684	if (status & MFI_REPLY_1078_MESSAGE_INTERRUPT)
685		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
686
687	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT)
688		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
689
690	/*
691	 * Clear the interrupt by writing back the same value
692	 */
693	writel(status, &regs->outbound_doorbell_clear);
694
695	/* Dummy readl to force pci flush */
696	readl(&regs->outbound_doorbell_clear);
697
698	return mfiStatus;
699}
700
701/**
702 * megasas_fire_cmd_ppc -	Sends command to the FW
703 * @instance:		Adapter soft state
704 * @frame_phys_addr:	Physical address of cmd
705 * @frame_count:	Number of frames for the command
706 * @regs:		MFI register set
707 */
708static inline void
709megasas_fire_cmd_ppc(struct megasas_instance *instance,
710		dma_addr_t frame_phys_addr,
711		u32 frame_count,
712		struct megasas_register_set __iomem *regs)
713{
714	unsigned long flags;
715
716	spin_lock_irqsave(&instance->hba_lock, flags);
717	writel((frame_phys_addr | (frame_count<<1))|1,
718			&(regs)->inbound_queue_port);
719	spin_unlock_irqrestore(&instance->hba_lock, flags);
720}
721
722/**
723 * megasas_check_reset_ppc -	For controller reset check
724 * @instance:	Adapter soft state
725 * @regs:	MFI register set
726 */
727static int
728megasas_check_reset_ppc(struct megasas_instance *instance,
729			struct megasas_register_set __iomem *regs)
730{
731	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
732		return 1;
733
734	return 0;
735}
736
737static struct megasas_instance_template megasas_instance_template_ppc = {
738
739	.fire_cmd = megasas_fire_cmd_ppc,
740	.enable_intr = megasas_enable_intr_ppc,
741	.disable_intr = megasas_disable_intr_ppc,
742	.clear_intr = megasas_clear_intr_ppc,
743	.read_fw_status_reg = megasas_read_fw_status_reg_ppc,
744	.adp_reset = megasas_adp_reset_xscale,
745	.check_reset = megasas_check_reset_ppc,
746	.service_isr = megasas_isr,
747	.tasklet = megasas_complete_cmd_dpc,
748	.init_adapter = megasas_init_adapter_mfi,
749	.build_and_issue_cmd = megasas_build_and_issue_cmd,
750	.issue_dcmd = megasas_issue_dcmd,
751};
752
753/**
754 * megasas_enable_intr_skinny -	Enables interrupts
755 * @instance:	Adapter soft state
756 */
757static inline void
758megasas_enable_intr_skinny(struct megasas_instance *instance)
759{
760	struct megasas_register_set __iomem *regs;
761
762	regs = instance->reg_set;
763	writel(0xFFFFFFFF, &(regs)->outbound_intr_mask);
764
765	writel(~MFI_SKINNY_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
766
767	/* Dummy readl to force pci flush */
768	readl(&regs->outbound_intr_mask);
769}
770
771/**
772 * megasas_disable_intr_skinny -	Disables interrupt
773 * @instance:	Adapter soft state
774 */
775static inline void
776megasas_disable_intr_skinny(struct megasas_instance *instance)
777{
778	struct megasas_register_set __iomem *regs;
779	u32 mask = 0xFFFFFFFF;
780
781	regs = instance->reg_set;
782	writel(mask, &regs->outbound_intr_mask);
783	/* Dummy readl to force pci flush */
784	readl(&regs->outbound_intr_mask);
785}
786
787/**
788 * megasas_read_fw_status_reg_skinny - returns the current FW status value
789 * @instance:	Adapter soft state
790 */
791static u32
792megasas_read_fw_status_reg_skinny(struct megasas_instance *instance)
793{
794	return readl(&instance->reg_set->outbound_scratch_pad_0);
795}
796
797/**
798 * megasas_clear_interrupt_skinny -	Check & clear interrupt
799 * @instance:	Adapter soft state
800 */
801static int
802megasas_clear_intr_skinny(struct megasas_instance *instance)
803{
804	u32 status;
805	u32 mfiStatus = 0;
806	struct megasas_register_set __iomem *regs;
807	regs = instance->reg_set;
808
809	/*
810	 * Check if it is our interrupt
811	 */
812	status = readl(&regs->outbound_intr_status);
813
814	if (!(status & MFI_SKINNY_ENABLE_INTERRUPT_MASK)) {
815		return 0;
816	}
817
818	/*
819	 * Check if it is our interrupt
820	 */
821	if ((megasas_read_fw_status_reg_skinny(instance) & MFI_STATE_MASK) ==
822	    MFI_STATE_FAULT) {
823		mfiStatus = MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
824	} else
825		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
826
827	/*
828	 * Clear the interrupt by writing back the same value
829	 */
830	writel(status, &regs->outbound_intr_status);
831
832	/*
833	 * dummy read to flush PCI
834	 */
835	readl(&regs->outbound_intr_status);
836
837	return mfiStatus;
838}
839
840/**
841 * megasas_fire_cmd_skinny -	Sends command to the FW
842 * @instance:		Adapter soft state
843 * @frame_phys_addr:	Physical address of cmd
844 * @frame_count:	Number of frames for the command
845 * @regs:		MFI register set
846 */
847static inline void
848megasas_fire_cmd_skinny(struct megasas_instance *instance,
849			dma_addr_t frame_phys_addr,
850			u32 frame_count,
851			struct megasas_register_set __iomem *regs)
852{
853	unsigned long flags;
854
855	spin_lock_irqsave(&instance->hba_lock, flags);
856	writel(upper_32_bits(frame_phys_addr),
857	       &(regs)->inbound_high_queue_port);
858	writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1,
859	       &(regs)->inbound_low_queue_port);
860	spin_unlock_irqrestore(&instance->hba_lock, flags);
861}
862
863/**
864 * megasas_check_reset_skinny -	For controller reset check
865 * @instance:	Adapter soft state
866 * @regs:	MFI register set
867 */
868static int
869megasas_check_reset_skinny(struct megasas_instance *instance,
870				struct megasas_register_set __iomem *regs)
871{
872	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
873		return 1;
874
875	return 0;
876}
877
878static struct megasas_instance_template megasas_instance_template_skinny = {
879
880	.fire_cmd = megasas_fire_cmd_skinny,
881	.enable_intr = megasas_enable_intr_skinny,
882	.disable_intr = megasas_disable_intr_skinny,
883	.clear_intr = megasas_clear_intr_skinny,
884	.read_fw_status_reg = megasas_read_fw_status_reg_skinny,
885	.adp_reset = megasas_adp_reset_gen2,
886	.check_reset = megasas_check_reset_skinny,
887	.service_isr = megasas_isr,
888	.tasklet = megasas_complete_cmd_dpc,
889	.init_adapter = megasas_init_adapter_mfi,
890	.build_and_issue_cmd = megasas_build_and_issue_cmd,
891	.issue_dcmd = megasas_issue_dcmd,
892};
893
894
895/*
896 * The following functions are defined for gen2 (deviceid : 0x78 0x79)
897 * controllers
898 */
899
900/**
901 * megasas_enable_intr_gen2 -  Enables interrupts
902 * @instance:	Adapter soft state
903 */
904static inline void
905megasas_enable_intr_gen2(struct megasas_instance *instance)
906{
907	struct megasas_register_set __iomem *regs;
908
909	regs = instance->reg_set;
910	writel(0xFFFFFFFF, &(regs)->outbound_doorbell_clear);
911
912	/* write ~0x00000005 (4 & 1) to the intr mask*/
913	writel(~MFI_GEN2_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
914
915	/* Dummy readl to force pci flush */
916	readl(&regs->outbound_intr_mask);
917}
918
919/**
920 * megasas_disable_intr_gen2 - Disables interrupt
921 * @instance:	Adapter soft state
922 */
923static inline void
924megasas_disable_intr_gen2(struct megasas_instance *instance)
925{
926	struct megasas_register_set __iomem *regs;
927	u32 mask = 0xFFFFFFFF;
928
929	regs = instance->reg_set;
930	writel(mask, &regs->outbound_intr_mask);
931	/* Dummy readl to force pci flush */
932	readl(&regs->outbound_intr_mask);
933}
934
935/**
936 * megasas_read_fw_status_reg_gen2 - returns the current FW status value
937 * @instance:	Adapter soft state
938 */
939static u32
940megasas_read_fw_status_reg_gen2(struct megasas_instance *instance)
941{
942	return readl(&instance->reg_set->outbound_scratch_pad_0);
943}
944
945/**
946 * megasas_clear_interrupt_gen2 -      Check & clear interrupt
947 * @instance:	Adapter soft state
948 */
949static int
950megasas_clear_intr_gen2(struct megasas_instance *instance)
951{
952	u32 status;
953	u32 mfiStatus = 0;
954	struct megasas_register_set __iomem *regs;
955	regs = instance->reg_set;
956
957	/*
958	 * Check if it is our interrupt
959	 */
960	status = readl(&regs->outbound_intr_status);
961
962	if (status & MFI_INTR_FLAG_REPLY_MESSAGE) {
963		mfiStatus = MFI_INTR_FLAG_REPLY_MESSAGE;
964	}
965	if (status & MFI_G2_OUTBOUND_DOORBELL_CHANGE_INTERRUPT) {
966		mfiStatus |= MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE;
967	}
968
969	/*
970	 * Clear the interrupt by writing back the same value
971	 */
972	if (mfiStatus)
973		writel(status, &regs->outbound_doorbell_clear);
974
975	/* Dummy readl to force pci flush */
976	readl(&regs->outbound_intr_status);
977
978	return mfiStatus;
979}
980
981/**
982 * megasas_fire_cmd_gen2 -     Sends command to the FW
983 * @instance:		Adapter soft state
984 * @frame_phys_addr:	Physical address of cmd
985 * @frame_count:	Number of frames for the command
986 * @regs:		MFI register set
987 */
988static inline void
989megasas_fire_cmd_gen2(struct megasas_instance *instance,
990			dma_addr_t frame_phys_addr,
991			u32 frame_count,
992			struct megasas_register_set __iomem *regs)
993{
994	unsigned long flags;
995
996	spin_lock_irqsave(&instance->hba_lock, flags);
997	writel((frame_phys_addr | (frame_count<<1))|1,
998			&(regs)->inbound_queue_port);
999	spin_unlock_irqrestore(&instance->hba_lock, flags);
1000}
1001
1002/**
1003 * megasas_adp_reset_gen2 -	For controller reset
1004 * @instance:	Adapter soft state
1005 * @reg_set:	MFI register set
1006 */
1007static int
1008megasas_adp_reset_gen2(struct megasas_instance *instance,
1009			struct megasas_register_set __iomem *reg_set)
1010{
1011	u32 retry = 0 ;
1012	u32 HostDiag;
1013	u32 __iomem *seq_offset = &reg_set->seq_offset;
1014	u32 __iomem *hostdiag_offset = &reg_set->host_diag;
1015
1016	if (instance->instancet == &megasas_instance_template_skinny) {
1017		seq_offset = &reg_set->fusion_seq_offset;
1018		hostdiag_offset = &reg_set->fusion_host_diag;
1019	}
1020
1021	writel(0, seq_offset);
1022	writel(4, seq_offset);
1023	writel(0xb, seq_offset);
1024	writel(2, seq_offset);
1025	writel(7, seq_offset);
1026	writel(0xd, seq_offset);
1027
1028	msleep(1000);
1029
1030	HostDiag = (u32)readl(hostdiag_offset);
1031
1032	while (!(HostDiag & DIAG_WRITE_ENABLE)) {
1033		msleep(100);
1034		HostDiag = (u32)readl(hostdiag_offset);
1035		dev_notice(&instance->pdev->dev, "RESETGEN2: retry=%x, hostdiag=%x\n",
1036					retry, HostDiag);
1037
1038		if (retry++ >= 100)
1039			return 1;
1040
1041	}
1042
1043	dev_notice(&instance->pdev->dev, "ADP_RESET_GEN2: HostDiag=%x\n", HostDiag);
1044
1045	writel((HostDiag | DIAG_RESET_ADAPTER), hostdiag_offset);
1046
1047	ssleep(10);
1048
1049	HostDiag = (u32)readl(hostdiag_offset);
1050	while (HostDiag & DIAG_RESET_ADAPTER) {
1051		msleep(100);
1052		HostDiag = (u32)readl(hostdiag_offset);
1053		dev_notice(&instance->pdev->dev, "RESET_GEN2: retry=%x, hostdiag=%x\n",
1054				retry, HostDiag);
1055
1056		if (retry++ >= 1000)
1057			return 1;
1058
1059	}
1060	return 0;
1061}
1062
1063/**
1064 * megasas_check_reset_gen2 -	For controller reset check
1065 * @instance:	Adapter soft state
1066 * @regs:	MFI register set
1067 */
1068static int
1069megasas_check_reset_gen2(struct megasas_instance *instance,
1070		struct megasas_register_set __iomem *regs)
1071{
1072	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1073		return 1;
1074
1075	return 0;
1076}
1077
1078static struct megasas_instance_template megasas_instance_template_gen2 = {
1079
1080	.fire_cmd = megasas_fire_cmd_gen2,
1081	.enable_intr = megasas_enable_intr_gen2,
1082	.disable_intr = megasas_disable_intr_gen2,
1083	.clear_intr = megasas_clear_intr_gen2,
1084	.read_fw_status_reg = megasas_read_fw_status_reg_gen2,
1085	.adp_reset = megasas_adp_reset_gen2,
1086	.check_reset = megasas_check_reset_gen2,
1087	.service_isr = megasas_isr,
1088	.tasklet = megasas_complete_cmd_dpc,
1089	.init_adapter = megasas_init_adapter_mfi,
1090	.build_and_issue_cmd = megasas_build_and_issue_cmd,
1091	.issue_dcmd = megasas_issue_dcmd,
1092};
1093
1094/*
1095 * This is the end of set of functions & definitions
1096 * specific to gen2 (deviceid : 0x78, 0x79) controllers
1097 */
1098
1099/*
1100 * Template added for TB (Fusion)
1101 */
1102extern struct megasas_instance_template megasas_instance_template_fusion;
1103
1104/**
1105 * megasas_issue_polled -	Issues a polling command
1106 * @instance:			Adapter soft state
1107 * @cmd:			Command packet to be issued
1108 *
1109 * For polling, MFI requires the cmd_status to be set to MFI_STAT_INVALID_STATUS before posting.
1110 */
1111int
1112megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd)
1113{
1114	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1115
1116	frame_hdr->cmd_status = MFI_STAT_INVALID_STATUS;
1117	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1118
1119	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1120		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1121			__func__, __LINE__);
1122		return DCMD_INIT;
1123	}
1124
1125	instance->instancet->issue_dcmd(instance, cmd);
1126
1127	return wait_and_poll(instance, cmd, instance->requestorId ?
1128			MEGASAS_ROUTINE_WAIT_TIME_VF : MFI_IO_TIMEOUT_SECS);
1129}
1130
1131/**
1132 * megasas_issue_blocked_cmd -	Synchronous wrapper around regular FW cmds
1133 * @instance:			Adapter soft state
1134 * @cmd:			Command to be issued
1135 * @timeout:			Timeout in seconds
1136 *
1137 * This function waits on an event for the command to be returned from ISR.
1138 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1139 * Used to issue ioctl commands.
1140 */
1141int
1142megasas_issue_blocked_cmd(struct megasas_instance *instance,
1143			  struct megasas_cmd *cmd, int timeout)
1144{
1145	int ret = 0;
1146	cmd->cmd_status_drv = DCMD_INIT;
1147
1148	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1149		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1150			__func__, __LINE__);
1151		return DCMD_INIT;
1152	}
1153
1154	instance->instancet->issue_dcmd(instance, cmd);
1155
1156	if (timeout) {
1157		ret = wait_event_timeout(instance->int_cmd_wait_q,
1158		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1159		if (!ret) {
1160			dev_err(&instance->pdev->dev,
1161				"DCMD(opcode: 0x%x) is timed out, func:%s\n",
1162				cmd->frame->dcmd.opcode, __func__);
1163			return DCMD_TIMEOUT;
1164		}
1165	} else
1166		wait_event(instance->int_cmd_wait_q,
1167				cmd->cmd_status_drv != DCMD_INIT);
1168
1169	return cmd->cmd_status_drv;
1170}
1171
1172/**
1173 * megasas_issue_blocked_abort_cmd -	Aborts previously issued cmd
1174 * @instance:				Adapter soft state
1175 * @cmd_to_abort:			Previously issued cmd to be aborted
1176 * @timeout:				Timeout in seconds
1177 *
1178 * MFI firmware can abort previously issued AEN comamnd (automatic event
1179 * notification). The megasas_issue_blocked_abort_cmd() issues such abort
1180 * cmd and waits for return status.
1181 * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
1182 */
1183static int
1184megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
1185				struct megasas_cmd *cmd_to_abort, int timeout)
1186{
1187	struct megasas_cmd *cmd;
1188	struct megasas_abort_frame *abort_fr;
1189	int ret = 0;
1190	u32 opcode;
1191
1192	cmd = megasas_get_cmd(instance);
1193
1194	if (!cmd)
1195		return -1;
1196
1197	abort_fr = &cmd->frame->abort;
1198
1199	/*
1200	 * Prepare and issue the abort frame
1201	 */
1202	abort_fr->cmd = MFI_CMD_ABORT;
1203	abort_fr->cmd_status = MFI_STAT_INVALID_STATUS;
1204	abort_fr->flags = cpu_to_le16(0);
1205	abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index);
1206	abort_fr->abort_mfi_phys_addr_lo =
1207		cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr));
1208	abort_fr->abort_mfi_phys_addr_hi =
1209		cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr));
1210
1211	cmd->sync_cmd = 1;
1212	cmd->cmd_status_drv = DCMD_INIT;
1213
1214	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
1215		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1216			__func__, __LINE__);
1217		return DCMD_INIT;
1218	}
1219
1220	instance->instancet->issue_dcmd(instance, cmd);
1221
1222	if (timeout) {
1223		ret = wait_event_timeout(instance->abort_cmd_wait_q,
1224		cmd->cmd_status_drv != DCMD_INIT, timeout * HZ);
1225		if (!ret) {
1226			opcode = cmd_to_abort->frame->dcmd.opcode;
1227			dev_err(&instance->pdev->dev,
1228				"Abort(to be aborted DCMD opcode: 0x%x) is timed out func:%s\n",
1229				opcode,  __func__);
1230			return DCMD_TIMEOUT;
1231		}
1232	} else
1233		wait_event(instance->abort_cmd_wait_q,
1234		cmd->cmd_status_drv != DCMD_INIT);
1235
1236	cmd->sync_cmd = 0;
1237
1238	megasas_return_cmd(instance, cmd);
1239	return cmd->cmd_status_drv;
1240}
1241
1242/**
1243 * megasas_make_sgl32 -	Prepares 32-bit SGL
1244 * @instance:		Adapter soft state
1245 * @scp:		SCSI command from the mid-layer
1246 * @mfi_sgl:		SGL to be filled in
1247 *
1248 * If successful, this function returns the number of SG elements. Otherwise,
1249 * it returnes -1.
1250 */
1251static int
1252megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp,
1253		   union megasas_sgl *mfi_sgl)
1254{
1255	int i;
1256	int sge_count;
1257	struct scatterlist *os_sgl;
1258
1259	sge_count = scsi_dma_map(scp);
1260	BUG_ON(sge_count < 0);
1261
1262	if (sge_count) {
1263		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1264			mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1265			mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl));
1266		}
1267	}
1268	return sge_count;
1269}
1270
1271/**
1272 * megasas_make_sgl64 -	Prepares 64-bit SGL
1273 * @instance:		Adapter soft state
1274 * @scp:		SCSI command from the mid-layer
1275 * @mfi_sgl:		SGL to be filled in
1276 *
1277 * If successful, this function returns the number of SG elements. Otherwise,
1278 * it returnes -1.
1279 */
1280static int
1281megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp,
1282		   union megasas_sgl *mfi_sgl)
1283{
1284	int i;
1285	int sge_count;
1286	struct scatterlist *os_sgl;
1287
1288	sge_count = scsi_dma_map(scp);
1289	BUG_ON(sge_count < 0);
1290
1291	if (sge_count) {
1292		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1293			mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl));
1294			mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl));
1295		}
1296	}
1297	return sge_count;
1298}
1299
1300/**
1301 * megasas_make_sgl_skinny - Prepares IEEE SGL
1302 * @instance:           Adapter soft state
1303 * @scp:                SCSI command from the mid-layer
1304 * @mfi_sgl:            SGL to be filled in
1305 *
1306 * If successful, this function returns the number of SG elements. Otherwise,
1307 * it returnes -1.
1308 */
1309static int
1310megasas_make_sgl_skinny(struct megasas_instance *instance,
1311		struct scsi_cmnd *scp, union megasas_sgl *mfi_sgl)
1312{
1313	int i;
1314	int sge_count;
1315	struct scatterlist *os_sgl;
1316
1317	sge_count = scsi_dma_map(scp);
1318
1319	if (sge_count) {
1320		scsi_for_each_sg(scp, os_sgl, sge_count, i) {
1321			mfi_sgl->sge_skinny[i].length =
1322				cpu_to_le32(sg_dma_len(os_sgl));
1323			mfi_sgl->sge_skinny[i].phys_addr =
1324				cpu_to_le64(sg_dma_address(os_sgl));
1325			mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0);
1326		}
1327	}
1328	return sge_count;
1329}
1330
1331 /**
1332 * megasas_get_frame_count - Computes the number of frames
1333 * @frame_type		: type of frame- io or pthru frame
1334 * @sge_count		: number of sg elements
1335 *
1336 * Returns the number of frames required for numnber of sge's (sge_count)
1337 */
1338
1339static u32 megasas_get_frame_count(struct megasas_instance *instance,
1340			u8 sge_count, u8 frame_type)
1341{
1342	int num_cnt;
1343	int sge_bytes;
1344	u32 sge_sz;
1345	u32 frame_count = 0;
1346
1347	sge_sz = (IS_DMA64) ? sizeof(struct megasas_sge64) :
1348	    sizeof(struct megasas_sge32);
1349
1350	if (instance->flag_ieee) {
1351		sge_sz = sizeof(struct megasas_sge_skinny);
1352	}
1353
1354	/*
1355	 * Main frame can contain 2 SGEs for 64-bit SGLs and
1356	 * 3 SGEs for 32-bit SGLs for ldio &
1357	 * 1 SGEs for 64-bit SGLs and
1358	 * 2 SGEs for 32-bit SGLs for pthru frame
1359	 */
1360	if (unlikely(frame_type == PTHRU_FRAME)) {
1361		if (instance->flag_ieee == 1) {
1362			num_cnt = sge_count - 1;
1363		} else if (IS_DMA64)
1364			num_cnt = sge_count - 1;
1365		else
1366			num_cnt = sge_count - 2;
1367	} else {
1368		if (instance->flag_ieee == 1) {
1369			num_cnt = sge_count - 1;
1370		} else if (IS_DMA64)
1371			num_cnt = sge_count - 2;
1372		else
1373			num_cnt = sge_count - 3;
1374	}
1375
1376	if (num_cnt > 0) {
1377		sge_bytes = sge_sz * num_cnt;
1378
1379		frame_count = (sge_bytes / MEGAMFI_FRAME_SIZE) +
1380		    ((sge_bytes % MEGAMFI_FRAME_SIZE) ? 1 : 0) ;
1381	}
1382	/* Main frame */
1383	frame_count += 1;
1384
1385	if (frame_count > 7)
1386		frame_count = 8;
1387	return frame_count;
1388}
1389
1390/**
1391 * megasas_build_dcdb -	Prepares a direct cdb (DCDB) command
1392 * @instance:		Adapter soft state
1393 * @scp:		SCSI command
1394 * @cmd:		Command to be prepared in
1395 *
1396 * This function prepares CDB commands. These are typcially pass-through
1397 * commands to the devices.
1398 */
1399static int
1400megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
1401		   struct megasas_cmd *cmd)
1402{
1403	u32 is_logical;
1404	u32 device_id;
1405	u16 flags = 0;
1406	struct megasas_pthru_frame *pthru;
1407
1408	is_logical = MEGASAS_IS_LOGICAL(scp->device);
1409	device_id = MEGASAS_DEV_INDEX(scp);
1410	pthru = (struct megasas_pthru_frame *)cmd->frame;
1411
1412	if (scp->sc_data_direction == DMA_TO_DEVICE)
1413		flags = MFI_FRAME_DIR_WRITE;
1414	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1415		flags = MFI_FRAME_DIR_READ;
1416	else if (scp->sc_data_direction == DMA_NONE)
1417		flags = MFI_FRAME_DIR_NONE;
1418
1419	if (instance->flag_ieee == 1) {
1420		flags |= MFI_FRAME_IEEE;
1421	}
1422
1423	/*
1424	 * Prepare the DCDB frame
1425	 */
1426	pthru->cmd = (is_logical) ? MFI_CMD_LD_SCSI_IO : MFI_CMD_PD_SCSI_IO;
1427	pthru->cmd_status = 0x0;
1428	pthru->scsi_status = 0x0;
1429	pthru->target_id = device_id;
1430	pthru->lun = scp->device->lun;
1431	pthru->cdb_len = scp->cmd_len;
1432	pthru->timeout = 0;
1433	pthru->pad_0 = 0;
1434	pthru->flags = cpu_to_le16(flags);
1435	pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp));
1436
1437	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1438
1439	/*
1440	 * If the command is for the tape device, set the
1441	 * pthru timeout to the os layer timeout value.
1442	 */
1443	if (scp->device->type == TYPE_TAPE) {
1444		if ((scp->request->timeout / HZ) > 0xFFFF)
1445			pthru->timeout = cpu_to_le16(0xFFFF);
1446		else
1447			pthru->timeout = cpu_to_le16(scp->request->timeout / HZ);
1448	}
1449
1450	/*
1451	 * Construct SGL
1452	 */
1453	if (instance->flag_ieee == 1) {
1454		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1455		pthru->sge_count = megasas_make_sgl_skinny(instance, scp,
1456						      &pthru->sgl);
1457	} else if (IS_DMA64) {
1458		pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1459		pthru->sge_count = megasas_make_sgl64(instance, scp,
1460						      &pthru->sgl);
1461	} else
1462		pthru->sge_count = megasas_make_sgl32(instance, scp,
1463						      &pthru->sgl);
1464
1465	if (pthru->sge_count > instance->max_num_sge) {
1466		dev_err(&instance->pdev->dev, "DCDB too many SGE NUM=%x\n",
1467			pthru->sge_count);
1468		return 0;
1469	}
1470
1471	/*
1472	 * Sense info specific
1473	 */
1474	pthru->sense_len = SCSI_SENSE_BUFFERSIZE;
1475	pthru->sense_buf_phys_addr_hi =
1476		cpu_to_le32(upper_32_bits(cmd->sense_phys_addr));
1477	pthru->sense_buf_phys_addr_lo =
1478		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
1479
1480	/*
1481	 * Compute the total number of frames this command consumes. FW uses
1482	 * this number to pull sufficient number of frames from host memory.
1483	 */
1484	cmd->frame_count = megasas_get_frame_count(instance, pthru->sge_count,
1485							PTHRU_FRAME);
1486
1487	return cmd->frame_count;
1488}
1489
1490/**
1491 * megasas_build_ldio -	Prepares IOs to logical devices
1492 * @instance:		Adapter soft state
1493 * @scp:		SCSI command
1494 * @cmd:		Command to be prepared
1495 *
1496 * Frames (and accompanying SGLs) for regular SCSI IOs use this function.
1497 */
1498static int
1499megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
1500		   struct megasas_cmd *cmd)
1501{
1502	u32 device_id;
1503	u8 sc = scp->cmnd[0];
1504	u16 flags = 0;
1505	struct megasas_io_frame *ldio;
1506
1507	device_id = MEGASAS_DEV_INDEX(scp);
1508	ldio = (struct megasas_io_frame *)cmd->frame;
1509
1510	if (scp->sc_data_direction == DMA_TO_DEVICE)
1511		flags = MFI_FRAME_DIR_WRITE;
1512	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
1513		flags = MFI_FRAME_DIR_READ;
1514
1515	if (instance->flag_ieee == 1) {
1516		flags |= MFI_FRAME_IEEE;
1517	}
1518
1519	/*
1520	 * Prepare the Logical IO frame: 2nd bit is zero for all read cmds
1521	 */
1522	ldio->cmd = (sc & 0x02) ? MFI_CMD_LD_WRITE : MFI_CMD_LD_READ;
1523	ldio->cmd_status = 0x0;
1524	ldio->scsi_status = 0x0;
1525	ldio->target_id = device_id;
1526	ldio->timeout = 0;
1527	ldio->reserved_0 = 0;
1528	ldio->pad_0 = 0;
1529	ldio->flags = cpu_to_le16(flags);
1530	ldio->start_lba_hi = 0;
1531	ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0;
1532
1533	/*
1534	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1535	 */
1536	if (scp->cmd_len == 6) {
1537		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]);
1538		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) |
1539						 ((u32) scp->cmnd[2] << 8) |
1540						 (u32) scp->cmnd[3]);
1541
1542		ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF);
1543	}
1544
1545	/*
1546	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1547	 */
1548	else if (scp->cmd_len == 10) {
1549		ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] |
1550					      ((u32) scp->cmnd[7] << 8));
1551		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1552						 ((u32) scp->cmnd[3] << 16) |
1553						 ((u32) scp->cmnd[4] << 8) |
1554						 (u32) scp->cmnd[5]);
1555	}
1556
1557	/*
1558	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1559	 */
1560	else if (scp->cmd_len == 12) {
1561		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1562					      ((u32) scp->cmnd[7] << 16) |
1563					      ((u32) scp->cmnd[8] << 8) |
1564					      (u32) scp->cmnd[9]);
1565
1566		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1567						 ((u32) scp->cmnd[3] << 16) |
1568						 ((u32) scp->cmnd[4] << 8) |
1569						 (u32) scp->cmnd[5]);
1570	}
1571
1572	/*
1573	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
1574	 */
1575	else if (scp->cmd_len == 16) {
1576		ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) |
1577					      ((u32) scp->cmnd[11] << 16) |
1578					      ((u32) scp->cmnd[12] << 8) |
1579					      (u32) scp->cmnd[13]);
1580
1581		ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) |
1582						 ((u32) scp->cmnd[7] << 16) |
1583						 ((u32) scp->cmnd[8] << 8) |
1584						 (u32) scp->cmnd[9]);
1585
1586		ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) |
1587						 ((u32) scp->cmnd[3] << 16) |
1588						 ((u32) scp->cmnd[4] << 8) |
1589						 (u32) scp->cmnd[5]);
1590
1591	}
1592
1593	/*
1594	 * Construct SGL
1595	 */
1596	if (instance->flag_ieee) {
1597		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1598		ldio->sge_count = megasas_make_sgl_skinny(instance, scp,
1599					      &ldio->sgl);
1600	} else if (IS_DMA64) {
1601		ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64);
1602		ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl);
1603	} else
1604		ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl);
1605
1606	if (ldio->sge_count > instance->max_num_sge) {
1607		dev_err(&instance->pdev->dev, "build_ld_io: sge_count = %x\n",
1608			ldio->sge_count);
1609		return 0;
1610	}
1611
1612	/*
1613	 * Sense info specific
1614	 */
1615	ldio->sense_len = SCSI_SENSE_BUFFERSIZE;
1616	ldio->sense_buf_phys_addr_hi = 0;
1617	ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr);
1618
1619	/*
1620	 * Compute the total number of frames this command consumes. FW uses
1621	 * this number to pull sufficient number of frames from host memory.
1622	 */
1623	cmd->frame_count = megasas_get_frame_count(instance,
1624			ldio->sge_count, IO_FRAME);
1625
1626	return cmd->frame_count;
1627}
1628
1629/**
1630 * megasas_cmd_type -		Checks if the cmd is for logical drive/sysPD
1631 *				and whether it's RW or non RW
1632 * @cmd:			SCSI command
1633 *
1634 */
1635inline int megasas_cmd_type(struct scsi_cmnd *cmd)
1636{
1637	int ret;
1638
1639	switch (cmd->cmnd[0]) {
1640	case READ_10:
1641	case WRITE_10:
1642	case READ_12:
1643	case WRITE_12:
1644	case READ_6:
1645	case WRITE_6:
1646	case READ_16:
1647	case WRITE_16:
1648		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1649			READ_WRITE_LDIO : READ_WRITE_SYSPDIO;
1650		break;
1651	default:
1652		ret = (MEGASAS_IS_LOGICAL(cmd->device)) ?
1653			NON_READ_WRITE_LDIO : NON_READ_WRITE_SYSPDIO;
1654	}
1655	return ret;
1656}
1657
1658 /**
1659 * megasas_dump_pending_frames -	Dumps the frame address of all pending cmds
1660 *					in FW
1661 * @instance:				Adapter soft state
1662 */
1663static inline void
1664megasas_dump_pending_frames(struct megasas_instance *instance)
1665{
1666	struct megasas_cmd *cmd;
1667	int i,n;
1668	union megasas_sgl *mfi_sgl;
1669	struct megasas_io_frame *ldio;
1670	struct megasas_pthru_frame *pthru;
1671	u32 sgcount;
1672	u16 max_cmd = instance->max_fw_cmds;
1673
1674	dev_err(&instance->pdev->dev, "[%d]: Dumping Frame Phys Address of all pending cmds in FW\n",instance->host->host_no);
1675	dev_err(&instance->pdev->dev, "[%d]: Total OS Pending cmds : %d\n",instance->host->host_no,atomic_read(&instance->fw_outstanding));
1676	if (IS_DMA64)
1677		dev_err(&instance->pdev->dev, "[%d]: 64 bit SGLs were sent to FW\n",instance->host->host_no);
1678	else
1679		dev_err(&instance->pdev->dev, "[%d]: 32 bit SGLs were sent to FW\n",instance->host->host_no);
1680
1681	dev_err(&instance->pdev->dev, "[%d]: Pending OS cmds in FW : \n",instance->host->host_no);
1682	for (i = 0; i < max_cmd; i++) {
1683		cmd = instance->cmd_list[i];
1684		if (!cmd->scmd)
1685			continue;
1686		dev_err(&instance->pdev->dev, "[%d]: Frame addr :0x%08lx : ",instance->host->host_no,(unsigned long)cmd->frame_phys_addr);
1687		if (megasas_cmd_type(cmd->scmd) == READ_WRITE_LDIO) {
1688			ldio = (struct megasas_io_frame *)cmd->frame;
1689			mfi_sgl = &ldio->sgl;
1690			sgcount = ldio->sge_count;
1691			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x,"
1692			" lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1693			instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id,
1694			le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi),
1695			le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount);
1696		} else {
1697			pthru = (struct megasas_pthru_frame *) cmd->frame;
1698			mfi_sgl = &pthru->sgl;
1699			sgcount = pthru->sge_count;
1700			dev_err(&instance->pdev->dev, "[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, "
1701			"lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",
1702			instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id,
1703			pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len),
1704			le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount);
1705		}
1706		if (megasas_dbg_lvl & MEGASAS_DBG_LVL) {
1707			for (n = 0; n < sgcount; n++) {
1708				if (IS_DMA64)
1709					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%llx\n",
1710						le32_to_cpu(mfi_sgl->sge64[n].length),
1711						le64_to_cpu(mfi_sgl->sge64[n].phys_addr));
1712				else
1713					dev_err(&instance->pdev->dev, "sgl len : 0x%x, sgl addr : 0x%x\n",
1714						le32_to_cpu(mfi_sgl->sge32[n].length),
1715						le32_to_cpu(mfi_sgl->sge32[n].phys_addr));
1716			}
1717		}
1718	} /*for max_cmd*/
1719	dev_err(&instance->pdev->dev, "[%d]: Pending Internal cmds in FW : \n",instance->host->host_no);
1720	for (i = 0; i < max_cmd; i++) {
1721
1722		cmd = instance->cmd_list[i];
1723
1724		if (cmd->sync_cmd == 1)
1725			dev_err(&instance->pdev->dev, "0x%08lx : ", (unsigned long)cmd->frame_phys_addr);
1726	}
1727	dev_err(&instance->pdev->dev, "[%d]: Dumping Done\n\n",instance->host->host_no);
1728}
1729
1730u32
1731megasas_build_and_issue_cmd(struct megasas_instance *instance,
1732			    struct scsi_cmnd *scmd)
1733{
1734	struct megasas_cmd *cmd;
1735	u32 frame_count;
1736
1737	cmd = megasas_get_cmd(instance);
1738	if (!cmd)
1739		return SCSI_MLQUEUE_HOST_BUSY;
1740
1741	/*
1742	 * Logical drive command
1743	 */
1744	if (megasas_cmd_type(scmd) == READ_WRITE_LDIO)
1745		frame_count = megasas_build_ldio(instance, scmd, cmd);
1746	else
1747		frame_count = megasas_build_dcdb(instance, scmd, cmd);
1748
1749	if (!frame_count)
1750		goto out_return_cmd;
1751
1752	cmd->scmd = scmd;
1753	scmd->SCp.ptr = (char *)cmd;
1754
1755	/*
1756	 * Issue the command to the FW
1757	 */
1758	atomic_inc(&instance->fw_outstanding);
1759
1760	instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
1761				cmd->frame_count-1, instance->reg_set);
1762
1763	return 0;
1764out_return_cmd:
1765	megasas_return_cmd(instance, cmd);
1766	return SCSI_MLQUEUE_HOST_BUSY;
1767}
1768
1769
1770/**
1771 * megasas_queue_command -	Queue entry point
1772 * @shost:			adapter SCSI host
1773 * @scmd:			SCSI command to be queued
1774 */
1775static int
1776megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
1777{
1778	struct megasas_instance *instance;
1779	struct MR_PRIV_DEVICE *mr_device_priv_data;
1780	u32 ld_tgt_id;
1781
1782	instance = (struct megasas_instance *)
1783	    scmd->device->host->hostdata;
1784
1785	if (instance->unload == 1) {
1786		scmd->result = DID_NO_CONNECT << 16;
1787		scmd->scsi_done(scmd);
1788		return 0;
1789	}
1790
1791	if (instance->issuepend_done == 0)
1792		return SCSI_MLQUEUE_HOST_BUSY;
1793
1794
1795	/* Check for an mpio path and adjust behavior */
1796	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
1797		if (megasas_check_mpio_paths(instance, scmd) ==
1798		    (DID_REQUEUE << 16)) {
1799			return SCSI_MLQUEUE_HOST_BUSY;
1800		} else {
1801			scmd->result = DID_NO_CONNECT << 16;
1802			scmd->scsi_done(scmd);
1803			return 0;
1804		}
1805	}
1806
1807	mr_device_priv_data = scmd->device->hostdata;
1808	if (!mr_device_priv_data ||
1809	    (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)) {
1810		scmd->result = DID_NO_CONNECT << 16;
1811		scmd->scsi_done(scmd);
1812		return 0;
1813	}
1814
1815	if (MEGASAS_IS_LOGICAL(scmd->device)) {
1816		ld_tgt_id = MEGASAS_TARGET_ID(scmd->device);
1817		if (instance->ld_tgtid_status[ld_tgt_id] == LD_TARGET_ID_DELETED) {
1818			scmd->result = DID_NO_CONNECT << 16;
1819			scmd->scsi_done(scmd);
1820			return 0;
1821		}
1822	}
1823
1824	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL)
1825		return SCSI_MLQUEUE_HOST_BUSY;
1826
1827	if (mr_device_priv_data->tm_busy)
1828		return SCSI_MLQUEUE_DEVICE_BUSY;
1829
1830
1831	scmd->result = 0;
1832
1833	if (MEGASAS_IS_LOGICAL(scmd->device) &&
1834	    (scmd->device->id >= instance->fw_supported_vd_count ||
1835		scmd->device->lun)) {
1836		scmd->result = DID_BAD_TARGET << 16;
1837		goto out_done;
1838	}
1839
1840	if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) &&
1841	    MEGASAS_IS_LOGICAL(scmd->device) &&
1842	    (!instance->fw_sync_cache_support)) {
1843		scmd->result = DID_OK << 16;
1844		goto out_done;
1845	}
1846
1847	return instance->instancet->build_and_issue_cmd(instance, scmd);
1848
1849 out_done:
1850	scmd->scsi_done(scmd);
1851	return 0;
1852}
1853
1854static struct megasas_instance *megasas_lookup_instance(u16 host_no)
1855{
1856	int i;
1857
1858	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
1859
1860		if ((megasas_mgmt_info.instance[i]) &&
1861		    (megasas_mgmt_info.instance[i]->host->host_no == host_no))
1862			return megasas_mgmt_info.instance[i];
1863	}
1864
1865	return NULL;
1866}
1867
1868/*
1869* megasas_set_dynamic_target_properties -
1870* Device property set by driver may not be static and it is required to be
1871* updated after OCR
1872*
1873* set tm_capable.
1874* set dma alignment (only for eedp protection enable vd).
1875*
1876* @sdev: OS provided scsi device
1877*
1878* Returns void
1879*/
1880void megasas_set_dynamic_target_properties(struct scsi_device *sdev,
1881					   bool is_target_prop)
1882{
1883	u16 pd_index = 0, ld;
1884	u32 device_id;
1885	struct megasas_instance *instance;
1886	struct fusion_context *fusion;
1887	struct MR_PRIV_DEVICE *mr_device_priv_data;
1888	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1889	struct MR_LD_RAID *raid;
1890	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
1891
1892	instance = megasas_lookup_instance(sdev->host->host_no);
1893	fusion = instance->ctrl_context;
1894	mr_device_priv_data = sdev->hostdata;
1895
1896	if (!fusion || !mr_device_priv_data)
1897		return;
1898
1899	if (MEGASAS_IS_LOGICAL(sdev)) {
1900		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
1901					+ sdev->id;
1902		local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
1903		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
1904		if (ld >= instance->fw_supported_vd_count)
1905			return;
1906		raid = MR_LdRaidGet(ld, local_map_ptr);
1907
1908		if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER)
1909		blk_queue_update_dma_alignment(sdev->request_queue, 0x7);
1910
1911		mr_device_priv_data->is_tm_capable =
1912			raid->capability.tmCapable;
1913
1914		if (!raid->flags.isEPD)
1915			sdev->no_write_same = 1;
1916
1917	} else if (instance->use_seqnum_jbod_fp) {
1918		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
1919			sdev->id;
1920		pd_sync = (void *)fusion->pd_seq_sync
1921				[(instance->pd_seq_map_id - 1) & 1];
1922		mr_device_priv_data->is_tm_capable =
1923			pd_sync->seq[pd_index].capability.tmCapable;
1924	}
1925
1926	if (is_target_prop && instance->tgt_prop->reset_tmo) {
1927		/*
1928		 * If FW provides a target reset timeout value, driver will use
1929		 * it. If not set, fallback to default values.
1930		 */
1931		mr_device_priv_data->target_reset_tmo =
1932			min_t(u8, instance->max_reset_tmo,
1933			      instance->tgt_prop->reset_tmo);
1934		mr_device_priv_data->task_abort_tmo = instance->task_abort_tmo;
1935	} else {
1936		mr_device_priv_data->target_reset_tmo =
1937						MEGASAS_DEFAULT_TM_TIMEOUT;
1938		mr_device_priv_data->task_abort_tmo =
1939						MEGASAS_DEFAULT_TM_TIMEOUT;
1940	}
1941}
1942
1943/*
1944 * megasas_set_nvme_device_properties -
1945 * set nomerges=2
1946 * set virtual page boundary = 4K (current mr_nvme_pg_size is 4K).
1947 * set maximum io transfer = MDTS of NVME device provided by MR firmware.
1948 *
1949 * MR firmware provides value in KB. Caller of this function converts
1950 * kb into bytes.
1951 *
1952 * e.a MDTS=5 means 2^5 * nvme page size. (In case of 4K page size,
1953 * MR firmware provides value 128 as (32 * 4K) = 128K.
1954 *
1955 * @sdev:				scsi device
1956 * @max_io_size:				maximum io transfer size
1957 *
1958 */
1959static inline void
1960megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size)
1961{
1962	struct megasas_instance *instance;
1963	u32 mr_nvme_pg_size;
1964
1965	instance = (struct megasas_instance *)sdev->host->hostdata;
1966	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
1967				MR_DEFAULT_NVME_PAGE_SIZE);
1968
1969	blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512));
1970
1971	blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue);
1972	blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1);
1973}
1974
1975/*
1976 * megasas_set_fw_assisted_qd -
1977 * set device queue depth to can_queue
1978 * set device queue depth to fw assisted qd
1979 *
1980 * @sdev:				scsi device
1981 * @is_target_prop			true, if fw provided target properties.
1982 */
1983static void megasas_set_fw_assisted_qd(struct scsi_device *sdev,
1984						 bool is_target_prop)
1985{
1986	u8 interface_type;
1987	u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN;
1988	u32 tgt_device_qd;
1989	struct megasas_instance *instance;
1990	struct MR_PRIV_DEVICE *mr_device_priv_data;
1991
1992	instance = megasas_lookup_instance(sdev->host->host_no);
1993	mr_device_priv_data = sdev->hostdata;
1994	interface_type  = mr_device_priv_data->interface_type;
1995
1996	switch (interface_type) {
1997	case SAS_PD:
1998		device_qd = MEGASAS_SAS_QD;
1999		break;
2000	case SATA_PD:
2001		device_qd = MEGASAS_SATA_QD;
2002		break;
2003	case NVME_PD:
2004		device_qd = MEGASAS_NVME_QD;
2005		break;
2006	}
2007
2008	if (is_target_prop) {
2009		tgt_device_qd = le32_to_cpu(instance->tgt_prop->device_qdepth);
2010		if (tgt_device_qd)
2011			device_qd = min(instance->host->can_queue,
2012					(int)tgt_device_qd);
2013	}
2014
2015	if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE)
2016		device_qd = instance->host->can_queue;
2017
2018	scsi_change_queue_depth(sdev, device_qd);
2019}
2020
2021/*
2022 * megasas_set_static_target_properties -
2023 * Device property set by driver are static and it is not required to be
2024 * updated after OCR.
2025 *
2026 * set io timeout
2027 * set device queue depth
2028 * set nvme device properties. see - megasas_set_nvme_device_properties
2029 *
2030 * @sdev:				scsi device
2031 * @is_target_prop			true, if fw provided target properties.
2032 */
2033static void megasas_set_static_target_properties(struct scsi_device *sdev,
2034						 bool is_target_prop)
2035{
2036	u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB;
2037	struct megasas_instance *instance;
2038
2039	instance = megasas_lookup_instance(sdev->host->host_no);
2040
2041	/*
2042	 * The RAID firmware may require extended timeouts.
2043	 */
2044	blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ);
2045
2046	/* max_io_size_kb will be set to non zero for
2047	 * nvme based vd and syspd.
2048	 */
2049	if (is_target_prop)
2050		max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb);
2051
2052	if (instance->nvme_page_size && max_io_size_kb)
2053		megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10));
2054
2055	megasas_set_fw_assisted_qd(sdev, is_target_prop);
2056}
2057
2058
2059static int megasas_slave_configure(struct scsi_device *sdev)
2060{
2061	u16 pd_index = 0;
2062	struct megasas_instance *instance;
2063	int ret_target_prop = DCMD_FAILED;
2064	bool is_target_prop = false;
2065
2066	instance = megasas_lookup_instance(sdev->host->host_no);
2067	if (instance->pd_list_not_supported) {
2068		if (!MEGASAS_IS_LOGICAL(sdev) && sdev->type == TYPE_DISK) {
2069			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2070				sdev->id;
2071			if (instance->pd_list[pd_index].driveState !=
2072				MR_PD_STATE_SYSTEM)
2073				return -ENXIO;
2074		}
2075	}
2076
2077	mutex_lock(&instance->reset_mutex);
2078	/* Send DCMD to Firmware and cache the information */
2079	if ((instance->pd_info) && !MEGASAS_IS_LOGICAL(sdev))
2080		megasas_get_pd_info(instance, sdev);
2081
2082	/* Some ventura firmware may not have instance->nvme_page_size set.
2083	 * Do not send MR_DCMD_DRV_GET_TARGET_PROP
2084	 */
2085	if ((instance->tgt_prop) && (instance->nvme_page_size))
2086		ret_target_prop = megasas_get_target_prop(instance, sdev);
2087
2088	is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
2089	megasas_set_static_target_properties(sdev, is_target_prop);
2090
2091	/* This sdev property may change post OCR */
2092	megasas_set_dynamic_target_properties(sdev, is_target_prop);
2093
2094	mutex_unlock(&instance->reset_mutex);
2095
2096	return 0;
2097}
2098
2099static int megasas_slave_alloc(struct scsi_device *sdev)
2100{
2101	u16 pd_index = 0, ld_tgt_id;
2102	struct megasas_instance *instance ;
2103	struct MR_PRIV_DEVICE *mr_device_priv_data;
2104
2105	instance = megasas_lookup_instance(sdev->host->host_no);
2106	if (!MEGASAS_IS_LOGICAL(sdev)) {
2107		/*
2108		 * Open the OS scan to the SYSTEM PD
2109		 */
2110		pd_index =
2111			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
2112			sdev->id;
2113		if ((instance->pd_list_not_supported ||
2114			instance->pd_list[pd_index].driveState ==
2115			MR_PD_STATE_SYSTEM)) {
2116			goto scan_target;
2117		}
2118		return -ENXIO;
2119	} else if (!MEGASAS_IS_LUN_VALID(sdev)) {
2120		sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2121		return -ENXIO;
2122	}
2123
2124scan_target:
2125	mr_device_priv_data = kzalloc(sizeof(*mr_device_priv_data),
2126					GFP_KERNEL);
2127	if (!mr_device_priv_data)
2128		return -ENOMEM;
2129
2130	if (MEGASAS_IS_LOGICAL(sdev)) {
2131		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2132		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_ACTIVE;
2133		if (megasas_dbg_lvl & LD_PD_DEBUG)
2134			sdev_printk(KERN_INFO, sdev, "LD target ID %d created.\n", ld_tgt_id);
2135	}
2136
2137	sdev->hostdata = mr_device_priv_data;
2138
2139	atomic_set(&mr_device_priv_data->r1_ldio_hint,
2140		   instance->r1_ldio_hint_default);
2141	return 0;
2142}
2143
2144static void megasas_slave_destroy(struct scsi_device *sdev)
2145{
2146	u16 ld_tgt_id;
2147	struct megasas_instance *instance;
2148
2149	instance = megasas_lookup_instance(sdev->host->host_no);
2150
2151	if (MEGASAS_IS_LOGICAL(sdev)) {
2152		if (!MEGASAS_IS_LUN_VALID(sdev)) {
2153			sdev_printk(KERN_INFO, sdev, "%s: invalid LUN\n", __func__);
2154			return;
2155		}
2156		ld_tgt_id = MEGASAS_TARGET_ID(sdev);
2157		instance->ld_tgtid_status[ld_tgt_id] = LD_TARGET_ID_DELETED;
2158		if (megasas_dbg_lvl & LD_PD_DEBUG)
2159			sdev_printk(KERN_INFO, sdev,
2160				    "LD target ID %d removed from OS stack\n", ld_tgt_id);
2161	}
2162
2163	kfree(sdev->hostdata);
2164	sdev->hostdata = NULL;
2165}
2166
2167/*
2168* megasas_complete_outstanding_ioctls - Complete outstanding ioctls after a
2169*                                       kill adapter
2170* @instance:				Adapter soft state
2171*
2172*/
2173static void megasas_complete_outstanding_ioctls(struct megasas_instance *instance)
2174{
2175	int i;
2176	struct megasas_cmd *cmd_mfi;
2177	struct megasas_cmd_fusion *cmd_fusion;
2178	struct fusion_context *fusion = instance->ctrl_context;
2179
2180	/* Find all outstanding ioctls */
2181	if (fusion) {
2182		for (i = 0; i < instance->max_fw_cmds; i++) {
2183			cmd_fusion = fusion->cmd_list[i];
2184			if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
2185				cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
2186				if (cmd_mfi->sync_cmd &&
2187				    (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
2188					cmd_mfi->frame->hdr.cmd_status =
2189							MFI_STAT_WRONG_STATE;
2190					megasas_complete_cmd(instance,
2191							     cmd_mfi, DID_OK);
2192				}
2193			}
2194		}
2195	} else {
2196		for (i = 0; i < instance->max_fw_cmds; i++) {
2197			cmd_mfi = instance->cmd_list[i];
2198			if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd !=
2199				MFI_CMD_ABORT)
2200				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
2201		}
2202	}
2203}
2204
2205
2206void megaraid_sas_kill_hba(struct megasas_instance *instance)
2207{
2208	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2209		dev_warn(&instance->pdev->dev,
2210			 "Adapter already dead, skipping kill HBA\n");
2211		return;
2212	}
2213
2214	/* Set critical error to block I/O & ioctls in case caller didn't */
2215	atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2216	/* Wait 1 second to ensure IO or ioctls in build have posted */
2217	msleep(1000);
2218	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
2219		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
2220		(instance->adapter_type != MFI_SERIES)) {
2221		if (!instance->requestorId) {
2222			writel(MFI_STOP_ADP, &instance->reg_set->doorbell);
2223			/* Flush */
2224			readl(&instance->reg_set->doorbell);
2225		}
2226		if (instance->requestorId && instance->peerIsPresent)
2227			memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
2228	} else {
2229		writel(MFI_STOP_ADP,
2230			&instance->reg_set->inbound_doorbell);
2231	}
2232	/* Complete outstanding ioctls when adapter is killed */
2233	megasas_complete_outstanding_ioctls(instance);
2234}
2235
2236 /**
2237  * megasas_check_and_restore_queue_depth - Check if queue depth needs to be
2238  *					restored to max value
2239  * @instance:			Adapter soft state
2240  *
2241  */
2242void
2243megasas_check_and_restore_queue_depth(struct megasas_instance *instance)
2244{
2245	unsigned long flags;
2246
2247	if (instance->flag & MEGASAS_FW_BUSY
2248	    && time_after(jiffies, instance->last_time + 5 * HZ)
2249	    && atomic_read(&instance->fw_outstanding) <
2250	    instance->throttlequeuedepth + 1) {
2251
2252		spin_lock_irqsave(instance->host->host_lock, flags);
2253		instance->flag &= ~MEGASAS_FW_BUSY;
2254
2255		instance->host->can_queue = instance->cur_can_queue;
2256		spin_unlock_irqrestore(instance->host->host_lock, flags);
2257	}
2258}
2259
2260/**
2261 * megasas_complete_cmd_dpc	 -	Returns FW's controller structure
2262 * @instance_addr:			Address of adapter soft state
2263 *
2264 * Tasklet to complete cmds
2265 */
2266static void megasas_complete_cmd_dpc(unsigned long instance_addr)
2267{
2268	u32 producer;
2269	u32 consumer;
2270	u32 context;
2271	struct megasas_cmd *cmd;
2272	struct megasas_instance *instance =
2273				(struct megasas_instance *)instance_addr;
2274	unsigned long flags;
2275
2276	/* If we have already declared adapter dead, donot complete cmds */
2277	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
2278		return;
2279
2280	spin_lock_irqsave(&instance->completion_lock, flags);
2281
2282	producer = le32_to_cpu(*instance->producer);
2283	consumer = le32_to_cpu(*instance->consumer);
2284
2285	while (consumer != producer) {
2286		context = le32_to_cpu(instance->reply_queue[consumer]);
2287		if (context >= instance->max_fw_cmds) {
2288			dev_err(&instance->pdev->dev, "Unexpected context value %x\n",
2289				context);
2290			BUG();
2291		}
2292
2293		cmd = instance->cmd_list[context];
2294
2295		megasas_complete_cmd(instance, cmd, DID_OK);
2296
2297		consumer++;
2298		if (consumer == (instance->max_fw_cmds + 1)) {
2299			consumer = 0;
2300		}
2301	}
2302
2303	*instance->consumer = cpu_to_le32(producer);
2304
2305	spin_unlock_irqrestore(&instance->completion_lock, flags);
2306
2307	/*
2308	 * Check if we can restore can_queue
2309	 */
2310	megasas_check_and_restore_queue_depth(instance);
2311}
2312
2313static void megasas_sriov_heartbeat_handler(struct timer_list *t);
2314
2315/**
2316 * megasas_start_timer - Initializes sriov heartbeat timer object
2317 * @instance:		Adapter soft state
2318 *
2319 */
2320void megasas_start_timer(struct megasas_instance *instance)
2321{
2322	struct timer_list *timer = &instance->sriov_heartbeat_timer;
2323
2324	timer_setup(timer, megasas_sriov_heartbeat_handler, 0);
2325	timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF;
2326	add_timer(timer);
2327}
2328
2329static void
2330megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
2331
2332static void
2333process_fw_state_change_wq(struct work_struct *work);
2334
2335static void megasas_do_ocr(struct megasas_instance *instance)
2336{
2337	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
2338	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
2339	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
2340		*instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
2341	}
2342	instance->instancet->disable_intr(instance);
2343	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
2344	instance->issuepend_done = 0;
2345
2346	atomic_set(&instance->fw_outstanding, 0);
2347	megasas_internal_reset_defer_cmds(instance);
2348	process_fw_state_change_wq(&instance->work_init);
2349}
2350
2351static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
2352					    int initial)
2353{
2354	struct megasas_cmd *cmd;
2355	struct megasas_dcmd_frame *dcmd;
2356	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
2357	dma_addr_t new_affiliation_111_h;
2358	int ld, retval = 0;
2359	u8 thisVf;
2360
2361	cmd = megasas_get_cmd(instance);
2362
2363	if (!cmd) {
2364		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation_111:"
2365		       "Failed to get cmd for scsi%d\n",
2366			instance->host->host_no);
2367		return -ENOMEM;
2368	}
2369
2370	dcmd = &cmd->frame->dcmd;
2371
2372	if (!instance->vf_affiliation_111) {
2373		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2374		       "affiliation for scsi%d\n", instance->host->host_no);
2375		megasas_return_cmd(instance, cmd);
2376		return -ENOMEM;
2377	}
2378
2379	if (initial)
2380			memset(instance->vf_affiliation_111, 0,
2381			       sizeof(struct MR_LD_VF_AFFILIATION_111));
2382	else {
2383		new_affiliation_111 =
2384			dma_alloc_coherent(&instance->pdev->dev,
2385					   sizeof(struct MR_LD_VF_AFFILIATION_111),
2386					   &new_affiliation_111_h, GFP_KERNEL);
2387		if (!new_affiliation_111) {
2388			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2389			       "memory for new affiliation for scsi%d\n",
2390			       instance->host->host_no);
2391			megasas_return_cmd(instance, cmd);
2392			return -ENOMEM;
2393		}
2394	}
2395
2396	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2397
2398	dcmd->cmd = MFI_CMD_DCMD;
2399	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2400	dcmd->sge_count = 1;
2401	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2402	dcmd->timeout = 0;
2403	dcmd->pad_0 = 0;
2404	dcmd->data_xfer_len =
2405		cpu_to_le32(sizeof(struct MR_LD_VF_AFFILIATION_111));
2406	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111);
2407
2408	if (initial)
2409		dcmd->sgl.sge32[0].phys_addr =
2410			cpu_to_le32(instance->vf_affiliation_111_h);
2411	else
2412		dcmd->sgl.sge32[0].phys_addr =
2413			cpu_to_le32(new_affiliation_111_h);
2414
2415	dcmd->sgl.sge32[0].length = cpu_to_le32(
2416		sizeof(struct MR_LD_VF_AFFILIATION_111));
2417
2418	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2419	       "scsi%d\n", instance->host->host_no);
2420
2421	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2422		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2423		       " failed with status 0x%x for scsi%d\n",
2424		       dcmd->cmd_status, instance->host->host_no);
2425		retval = 1; /* Do a scan if we couldn't get affiliation */
2426		goto out;
2427	}
2428
2429	if (!initial) {
2430		thisVf = new_affiliation_111->thisVf;
2431		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
2432			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
2433			    new_affiliation_111->map[ld].policy[thisVf]) {
2434				dev_warn(&instance->pdev->dev, "SR-IOV: "
2435				       "Got new LD/VF affiliation for scsi%d\n",
2436				       instance->host->host_no);
2437				memcpy(instance->vf_affiliation_111,
2438				       new_affiliation_111,
2439				       sizeof(struct MR_LD_VF_AFFILIATION_111));
2440				retval = 1;
2441				goto out;
2442			}
2443	}
2444out:
2445	if (new_affiliation_111) {
2446		dma_free_coherent(&instance->pdev->dev,
2447				    sizeof(struct MR_LD_VF_AFFILIATION_111),
2448				    new_affiliation_111,
2449				    new_affiliation_111_h);
2450	}
2451
2452	megasas_return_cmd(instance, cmd);
2453
2454	return retval;
2455}
2456
2457static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
2458					    int initial)
2459{
2460	struct megasas_cmd *cmd;
2461	struct megasas_dcmd_frame *dcmd;
2462	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
2463	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
2464	dma_addr_t new_affiliation_h;
2465	int i, j, retval = 0, found = 0, doscan = 0;
2466	u8 thisVf;
2467
2468	cmd = megasas_get_cmd(instance);
2469
2470	if (!cmd) {
2471		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_vf_affiliation12: "
2472		       "Failed to get cmd for scsi%d\n",
2473		       instance->host->host_no);
2474		return -ENOMEM;
2475	}
2476
2477	dcmd = &cmd->frame->dcmd;
2478
2479	if (!instance->vf_affiliation) {
2480		dev_warn(&instance->pdev->dev, "SR-IOV: Couldn't get LD/VF "
2481		       "affiliation for scsi%d\n", instance->host->host_no);
2482		megasas_return_cmd(instance, cmd);
2483		return -ENOMEM;
2484	}
2485
2486	if (initial)
2487		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
2488		       sizeof(struct MR_LD_VF_AFFILIATION));
2489	else {
2490		new_affiliation =
2491			dma_alloc_coherent(&instance->pdev->dev,
2492					   (MAX_LOGICAL_DRIVES + 1) * sizeof(struct MR_LD_VF_AFFILIATION),
2493					   &new_affiliation_h, GFP_KERNEL);
2494		if (!new_affiliation) {
2495			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
2496			       "memory for new affiliation for scsi%d\n",
2497			       instance->host->host_no);
2498			megasas_return_cmd(instance, cmd);
2499			return -ENOMEM;
2500		}
2501	}
2502
2503	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2504
2505	dcmd->cmd = MFI_CMD_DCMD;
2506	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2507	dcmd->sge_count = 1;
2508	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2509	dcmd->timeout = 0;
2510	dcmd->pad_0 = 0;
2511	dcmd->data_xfer_len = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2512		sizeof(struct MR_LD_VF_AFFILIATION));
2513	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_VF_MAP_GET_ALL_LDS);
2514
2515	if (initial)
2516		dcmd->sgl.sge32[0].phys_addr =
2517			cpu_to_le32(instance->vf_affiliation_h);
2518	else
2519		dcmd->sgl.sge32[0].phys_addr =
2520			cpu_to_le32(new_affiliation_h);
2521
2522	dcmd->sgl.sge32[0].length = cpu_to_le32((MAX_LOGICAL_DRIVES + 1) *
2523		sizeof(struct MR_LD_VF_AFFILIATION));
2524
2525	dev_warn(&instance->pdev->dev, "SR-IOV: Getting LD/VF affiliation for "
2526	       "scsi%d\n", instance->host->host_no);
2527
2528
2529	if (megasas_issue_blocked_cmd(instance, cmd, 0) != DCMD_SUCCESS) {
2530		dev_warn(&instance->pdev->dev, "SR-IOV: LD/VF affiliation DCMD"
2531		       " failed with status 0x%x for scsi%d\n",
2532		       dcmd->cmd_status, instance->host->host_no);
2533		retval = 1; /* Do a scan if we couldn't get affiliation */
2534		goto out;
2535	}
2536
2537	if (!initial) {
2538		if (!new_affiliation->ldCount) {
2539			dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2540			       "affiliation for passive path for scsi%d\n",
2541			       instance->host->host_no);
2542			retval = 1;
2543			goto out;
2544		}
2545		newmap = new_affiliation->map;
2546		savedmap = instance->vf_affiliation->map;
2547		thisVf = new_affiliation->thisVf;
2548		for (i = 0 ; i < new_affiliation->ldCount; i++) {
2549			found = 0;
2550			for (j = 0; j < instance->vf_affiliation->ldCount;
2551			     j++) {
2552				if (newmap->ref.targetId ==
2553				    savedmap->ref.targetId) {
2554					found = 1;
2555					if (newmap->policy[thisVf] !=
2556					    savedmap->policy[thisVf]) {
2557						doscan = 1;
2558						goto out;
2559					}
2560				}
2561				savedmap = (struct MR_LD_VF_MAP *)
2562					((unsigned char *)savedmap +
2563					 savedmap->size);
2564			}
2565			if (!found && newmap->policy[thisVf] !=
2566			    MR_LD_ACCESS_HIDDEN) {
2567				doscan = 1;
2568				goto out;
2569			}
2570			newmap = (struct MR_LD_VF_MAP *)
2571				((unsigned char *)newmap + newmap->size);
2572		}
2573
2574		newmap = new_affiliation->map;
2575		savedmap = instance->vf_affiliation->map;
2576
2577		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
2578			found = 0;
2579			for (j = 0 ; j < new_affiliation->ldCount; j++) {
2580				if (savedmap->ref.targetId ==
2581				    newmap->ref.targetId) {
2582					found = 1;
2583					if (savedmap->policy[thisVf] !=
2584					    newmap->policy[thisVf]) {
2585						doscan = 1;
2586						goto out;
2587					}
2588				}
2589				newmap = (struct MR_LD_VF_MAP *)
2590					((unsigned char *)newmap +
2591					 newmap->size);
2592			}
2593			if (!found && savedmap->policy[thisVf] !=
2594			    MR_LD_ACCESS_HIDDEN) {
2595				doscan = 1;
2596				goto out;
2597			}
2598			savedmap = (struct MR_LD_VF_MAP *)
2599				((unsigned char *)savedmap +
2600				 savedmap->size);
2601		}
2602	}
2603out:
2604	if (doscan) {
2605		dev_warn(&instance->pdev->dev, "SR-IOV: Got new LD/VF "
2606		       "affiliation for scsi%d\n", instance->host->host_no);
2607		memcpy(instance->vf_affiliation, new_affiliation,
2608		       new_affiliation->size);
2609		retval = 1;
2610	}
2611
2612	if (new_affiliation)
2613		dma_free_coherent(&instance->pdev->dev,
2614				    (MAX_LOGICAL_DRIVES + 1) *
2615				    sizeof(struct MR_LD_VF_AFFILIATION),
2616				    new_affiliation, new_affiliation_h);
2617	megasas_return_cmd(instance, cmd);
2618
2619	return retval;
2620}
2621
2622/* This function will get the current SR-IOV LD/VF affiliation */
2623static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
2624	int initial)
2625{
2626	int retval;
2627
2628	if (instance->PlasmaFW111)
2629		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
2630	else
2631		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
2632	return retval;
2633}
2634
2635/* This function will tell FW to start the SR-IOV heartbeat */
2636int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
2637					 int initial)
2638{
2639	struct megasas_cmd *cmd;
2640	struct megasas_dcmd_frame *dcmd;
2641	int retval = 0;
2642
2643	cmd = megasas_get_cmd(instance);
2644
2645	if (!cmd) {
2646		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_sriov_start_heartbeat: "
2647		       "Failed to get cmd for scsi%d\n",
2648		       instance->host->host_no);
2649		return -ENOMEM;
2650	}
2651
2652	dcmd = &cmd->frame->dcmd;
2653
2654	if (initial) {
2655		instance->hb_host_mem =
2656			dma_alloc_coherent(&instance->pdev->dev,
2657					   sizeof(struct MR_CTRL_HB_HOST_MEM),
2658					   &instance->hb_host_mem_h,
2659					   GFP_KERNEL);
2660		if (!instance->hb_host_mem) {
2661			dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
2662			       " memory for heartbeat host memory for scsi%d\n",
2663			       instance->host->host_no);
2664			retval = -ENOMEM;
2665			goto out;
2666		}
2667	}
2668
2669	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
2670
2671	dcmd->mbox.s[0] = cpu_to_le16(sizeof(struct MR_CTRL_HB_HOST_MEM));
2672	dcmd->cmd = MFI_CMD_DCMD;
2673	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
2674	dcmd->sge_count = 1;
2675	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_BOTH);
2676	dcmd->timeout = 0;
2677	dcmd->pad_0 = 0;
2678	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_CTRL_HB_HOST_MEM));
2679	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SHARED_HOST_MEM_ALLOC);
2680
2681	megasas_set_dma_settings(instance, dcmd, instance->hb_host_mem_h,
2682				 sizeof(struct MR_CTRL_HB_HOST_MEM));
2683
2684	dev_warn(&instance->pdev->dev, "SR-IOV: Starting heartbeat for scsi%d\n",
2685	       instance->host->host_no);
2686
2687	if ((instance->adapter_type != MFI_SERIES) &&
2688	    !instance->mask_interrupts)
2689		retval = megasas_issue_blocked_cmd(instance, cmd,
2690			MEGASAS_ROUTINE_WAIT_TIME_VF);
2691	else
2692		retval = megasas_issue_polled(instance, cmd);
2693
2694	if (retval) {
2695		dev_warn(&instance->pdev->dev, "SR-IOV: MR_DCMD_CTRL_SHARED_HOST"
2696			"_MEM_ALLOC DCMD %s for scsi%d\n",
2697			(dcmd->cmd_status == MFI_STAT_INVALID_STATUS) ?
2698			"timed out" : "failed", instance->host->host_no);
2699		retval = 1;
2700	}
2701
2702out:
2703	megasas_return_cmd(instance, cmd);
2704
2705	return retval;
2706}
2707
2708/* Handler for SR-IOV heartbeat */
2709static void megasas_sriov_heartbeat_handler(struct timer_list *t)
2710{
2711	struct megasas_instance *instance =
2712		from_timer(instance, t, sriov_heartbeat_timer);
2713
2714	if (instance->hb_host_mem->HB.fwCounter !=
2715	    instance->hb_host_mem->HB.driverCounter) {
2716		instance->hb_host_mem->HB.driverCounter =
2717			instance->hb_host_mem->HB.fwCounter;
2718		mod_timer(&instance->sriov_heartbeat_timer,
2719			  jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF);
2720	} else {
2721		dev_warn(&instance->pdev->dev, "SR-IOV: Heartbeat never "
2722		       "completed for scsi%d\n", instance->host->host_no);
2723		schedule_work(&instance->work_init);
2724	}
2725}
2726
2727/**
2728 * megasas_wait_for_outstanding -	Wait for all outstanding cmds
2729 * @instance:				Adapter soft state
2730 *
2731 * This function waits for up to MEGASAS_RESET_WAIT_TIME seconds for FW to
2732 * complete all its outstanding commands. Returns error if one or more IOs
2733 * are pending after this time period. It also marks the controller dead.
2734 */
2735static int megasas_wait_for_outstanding(struct megasas_instance *instance)
2736{
2737	int i, sl, outstanding;
2738	u32 reset_index;
2739	u32 wait_time = MEGASAS_RESET_WAIT_TIME;
2740	unsigned long flags;
2741	struct list_head clist_local;
2742	struct megasas_cmd *reset_cmd;
2743	u32 fw_state;
2744
2745	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2746		dev_info(&instance->pdev->dev, "%s:%d HBA is killed.\n",
2747		__func__, __LINE__);
2748		return FAILED;
2749	}
2750
2751	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2752
2753		INIT_LIST_HEAD(&clist_local);
2754		spin_lock_irqsave(&instance->hba_lock, flags);
2755		list_splice_init(&instance->internal_reset_pending_q,
2756				&clist_local);
2757		spin_unlock_irqrestore(&instance->hba_lock, flags);
2758
2759		dev_notice(&instance->pdev->dev, "HBA reset wait ...\n");
2760		for (i = 0; i < wait_time; i++) {
2761			msleep(1000);
2762			if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL)
2763				break;
2764		}
2765
2766		if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
2767			dev_notice(&instance->pdev->dev, "reset: Stopping HBA.\n");
2768			atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR);
2769			return FAILED;
2770		}
2771
2772		reset_index = 0;
2773		while (!list_empty(&clist_local)) {
2774			reset_cmd = list_entry((&clist_local)->next,
2775						struct megasas_cmd, list);
2776			list_del_init(&reset_cmd->list);
2777			if (reset_cmd->scmd) {
2778				reset_cmd->scmd->result = DID_REQUEUE << 16;
2779				dev_notice(&instance->pdev->dev, "%d:%p reset [%02x]\n",
2780					reset_index, reset_cmd,
2781					reset_cmd->scmd->cmnd[0]);
2782
2783				reset_cmd->scmd->scsi_done(reset_cmd->scmd);
2784				megasas_return_cmd(instance, reset_cmd);
2785			} else if (reset_cmd->sync_cmd) {
2786				dev_notice(&instance->pdev->dev, "%p synch cmds"
2787						"reset queue\n",
2788						reset_cmd);
2789
2790				reset_cmd->cmd_status_drv = DCMD_INIT;
2791				instance->instancet->fire_cmd(instance,
2792						reset_cmd->frame_phys_addr,
2793						0, instance->reg_set);
2794			} else {
2795				dev_notice(&instance->pdev->dev, "%p unexpected"
2796					"cmds lst\n",
2797					reset_cmd);
2798			}
2799			reset_index++;
2800		}
2801
2802		return SUCCESS;
2803	}
2804
2805	for (i = 0; i < resetwaittime; i++) {
2806		outstanding = atomic_read(&instance->fw_outstanding);
2807
2808		if (!outstanding)
2809			break;
2810
2811		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
2812			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
2813			       "commands to complete\n",i,outstanding);
2814			/*
2815			 * Call cmd completion routine. Cmd to be
2816			 * be completed directly without depending on isr.
2817			 */
2818			megasas_complete_cmd_dpc((unsigned long)instance);
2819		}
2820
2821		msleep(1000);
2822	}
2823
2824	i = 0;
2825	outstanding = atomic_read(&instance->fw_outstanding);
2826	fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2827
2828	if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2829		goto no_outstanding;
2830
2831	if (instance->disableOnlineCtrlReset)
2832		goto kill_hba_and_failed;
2833	do {
2834		if ((fw_state == MFI_STATE_FAULT) || atomic_read(&instance->fw_outstanding)) {
2835			dev_info(&instance->pdev->dev,
2836				"%s:%d waiting_for_outstanding: before issue OCR. FW state = 0x%x, outstanding 0x%x\n",
2837				__func__, __LINE__, fw_state, atomic_read(&instance->fw_outstanding));
2838			if (i == 3)
2839				goto kill_hba_and_failed;
2840			megasas_do_ocr(instance);
2841
2842			if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2843				dev_info(&instance->pdev->dev, "%s:%d OCR failed and HBA is killed.\n",
2844				__func__, __LINE__);
2845				return FAILED;
2846			}
2847			dev_info(&instance->pdev->dev, "%s:%d waiting_for_outstanding: after issue OCR.\n",
2848				__func__, __LINE__);
2849
2850			for (sl = 0; sl < 10; sl++)
2851				msleep(500);
2852
2853			outstanding = atomic_read(&instance->fw_outstanding);
2854
2855			fw_state = instance->instancet->read_fw_status_reg(instance) & MFI_STATE_MASK;
2856			if ((!outstanding && (fw_state == MFI_STATE_OPERATIONAL)))
2857				goto no_outstanding;
2858		}
2859		i++;
2860	} while (i <= 3);
2861
2862no_outstanding:
2863
2864	dev_info(&instance->pdev->dev, "%s:%d no more pending commands remain after reset handling.\n",
2865		__func__, __LINE__);
2866	return SUCCESS;
2867
2868kill_hba_and_failed:
2869
2870	/* Reset not supported, kill adapter */
2871	dev_info(&instance->pdev->dev, "%s:%d killing adapter scsi%d"
2872		" disableOnlineCtrlReset %d fw_outstanding %d \n",
2873		__func__, __LINE__, instance->host->host_no, instance->disableOnlineCtrlReset,
2874		atomic_read(&instance->fw_outstanding));
2875	megasas_dump_pending_frames(instance);
2876	megaraid_sas_kill_hba(instance);
2877
2878	return FAILED;
2879}
2880
2881/**
2882 * megasas_generic_reset -	Generic reset routine
2883 * @scmd:			Mid-layer SCSI command
2884 *
2885 * This routine implements a generic reset handler for device, bus and host
2886 * reset requests. Device, bus and host specific reset handlers can use this
2887 * function after they do their specific tasks.
2888 */
2889static int megasas_generic_reset(struct scsi_cmnd *scmd)
2890{
2891	int ret_val;
2892	struct megasas_instance *instance;
2893
2894	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2895
2896	scmd_printk(KERN_NOTICE, scmd, "megasas: RESET cmd=%x retries=%x\n",
2897		 scmd->cmnd[0], scmd->retries);
2898
2899	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
2900		dev_err(&instance->pdev->dev, "cannot recover from previous reset failures\n");
2901		return FAILED;
2902	}
2903
2904	ret_val = megasas_wait_for_outstanding(instance);
2905	if (ret_val == SUCCESS)
2906		dev_notice(&instance->pdev->dev, "reset successful\n");
2907	else
2908		dev_err(&instance->pdev->dev, "failed to do reset\n");
2909
2910	return ret_val;
2911}
2912
2913/**
2914 * megasas_reset_timer - quiesce the adapter if required
2915 * @scmd:		scsi cmnd
2916 *
2917 * Sets the FW busy flag and reduces the host->can_queue if the
2918 * cmd has not been completed within the timeout period.
2919 */
2920static enum
2921blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd)
2922{
2923	struct megasas_instance *instance;
2924	unsigned long flags;
2925
2926	if (time_after(jiffies, scmd->jiffies_at_alloc +
2927				(scmd_timeout * 2) * HZ)) {
2928		return BLK_EH_DONE;
2929	}
2930
2931	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2932	if (!(instance->flag & MEGASAS_FW_BUSY)) {
2933		/* FW is busy, throttle IO */
2934		spin_lock_irqsave(instance->host->host_lock, flags);
2935
2936		instance->host->can_queue = instance->throttlequeuedepth;
2937		instance->last_time = jiffies;
2938		instance->flag |= MEGASAS_FW_BUSY;
2939
2940		spin_unlock_irqrestore(instance->host->host_lock, flags);
2941	}
2942	return BLK_EH_RESET_TIMER;
2943}
2944
2945/**
2946 * megasas_dump -	This function will print hexdump of provided buffer.
2947 * @buf:		Buffer to be dumped
2948 * @sz:		Size in bytes
2949 * @format:		Different formats of dumping e.g. format=n will
2950 *			cause only 'n' 32 bit words to be dumped in a single
2951 *			line.
2952 */
2953inline void
2954megasas_dump(void *buf, int sz, int format)
2955{
2956	int i;
2957	__le32 *buf_loc = (__le32 *)buf;
2958
2959	for (i = 0; i < (sz / sizeof(__le32)); i++) {
2960		if ((i % format) == 0) {
2961			if (i != 0)
2962				printk(KERN_CONT "\n");
2963			printk(KERN_CONT "%08x: ", (i * 4));
2964		}
2965		printk(KERN_CONT "%08x ", le32_to_cpu(buf_loc[i]));
2966	}
2967	printk(KERN_CONT "\n");
2968}
2969
2970/**
2971 * megasas_dump_reg_set -	This function will print hexdump of register set
2972 * @reg_set:	Register set to be dumped
2973 */
2974inline void
2975megasas_dump_reg_set(void __iomem *reg_set)
2976{
2977	unsigned int i, sz = 256;
2978	u32 __iomem *reg = (u32 __iomem *)reg_set;
2979
2980	for (i = 0; i < (sz / sizeof(u32)); i++)
2981		printk("%08x: %08x\n", (i * 4), readl(&reg[i]));
2982}
2983
2984/**
2985 * megasas_dump_fusion_io -	This function will print key details
2986 *				of SCSI IO
2987 * @scmd:			SCSI command pointer of SCSI IO
2988 */
2989void
2990megasas_dump_fusion_io(struct scsi_cmnd *scmd)
2991{
2992	struct megasas_cmd_fusion *cmd;
2993	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
2994	struct megasas_instance *instance;
2995
2996	cmd = (struct megasas_cmd_fusion *)scmd->SCp.ptr;
2997	instance = (struct megasas_instance *)scmd->device->host->hostdata;
2998
2999	scmd_printk(KERN_INFO, scmd,
3000		    "scmd: (0x%p)  retries: 0x%x  allowed: 0x%x\n",
3001		    scmd, scmd->retries, scmd->allowed);
3002	scsi_print_command(scmd);
3003
3004	if (cmd) {
3005		req_desc = (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)cmd->request_desc;
3006		scmd_printk(KERN_INFO, scmd, "Request descriptor details:\n");
3007		scmd_printk(KERN_INFO, scmd,
3008			    "RequestFlags:0x%x  MSIxIndex:0x%x  SMID:0x%x  LMID:0x%x  DevHandle:0x%x\n",
3009			    req_desc->SCSIIO.RequestFlags,
3010			    req_desc->SCSIIO.MSIxIndex, req_desc->SCSIIO.SMID,
3011			    req_desc->SCSIIO.LMID, req_desc->SCSIIO.DevHandle);
3012
3013		printk(KERN_INFO "IO request frame:\n");
3014		megasas_dump(cmd->io_request,
3015			     MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, 8);
3016		printk(KERN_INFO "Chain frame:\n");
3017		megasas_dump(cmd->sg_frame,
3018			     instance->max_chain_frame_sz, 8);
3019	}
3020
3021}
3022
3023/*
3024 * megasas_dump_sys_regs - This function will dump system registers through
3025 *			    sysfs.
3026 * @reg_set:		    Pointer to System register set.
3027 * @buf:		    Buffer to which output is to be written.
3028 * @return:		    Number of bytes written to buffer.
3029 */
3030static inline ssize_t
3031megasas_dump_sys_regs(void __iomem *reg_set, char *buf)
3032{
3033	unsigned int i, sz = 256;
3034	int bytes_wrote = 0;
3035	char *loc = (char *)buf;
3036	u32 __iomem *reg = (u32 __iomem *)reg_set;
3037
3038	for (i = 0; i < sz / sizeof(u32); i++) {
3039		bytes_wrote += scnprintf(loc + bytes_wrote,
3040					 PAGE_SIZE - bytes_wrote,
3041					 "%08x: %08x\n", (i * 4),
3042					 readl(&reg[i]));
3043	}
3044	return bytes_wrote;
3045}
3046
3047/**
3048 * megasas_reset_bus_host -	Bus & host reset handler entry point
3049 * @scmd:			Mid-layer SCSI command
3050 */
3051static int megasas_reset_bus_host(struct scsi_cmnd *scmd)
3052{
3053	int ret;
3054	struct megasas_instance *instance;
3055
3056	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3057
3058	scmd_printk(KERN_INFO, scmd,
3059		"OCR is requested due to IO timeout!!\n");
3060
3061	scmd_printk(KERN_INFO, scmd,
3062		"SCSI host state: %d  SCSI host busy: %d  FW outstanding: %d\n",
3063		scmd->device->host->shost_state,
3064		scsi_host_busy(scmd->device->host),
3065		atomic_read(&instance->fw_outstanding));
3066	/*
3067	 * First wait for all commands to complete
3068	 */
3069	if (instance->adapter_type == MFI_SERIES) {
3070		ret = megasas_generic_reset(scmd);
3071	} else {
3072		megasas_dump_fusion_io(scmd);
3073		ret = megasas_reset_fusion(scmd->device->host,
3074				SCSIIO_TIMEOUT_OCR);
3075	}
3076
3077	return ret;
3078}
3079
3080/**
3081 * megasas_task_abort - Issues task abort request to firmware
3082 *			(supported only for fusion adapters)
3083 * @scmd:		SCSI command pointer
3084 */
3085static int megasas_task_abort(struct scsi_cmnd *scmd)
3086{
3087	int ret;
3088	struct megasas_instance *instance;
3089
3090	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3091
3092	if (instance->adapter_type != MFI_SERIES)
3093		ret = megasas_task_abort_fusion(scmd);
3094	else {
3095		sdev_printk(KERN_NOTICE, scmd->device, "TASK ABORT not supported\n");
3096		ret = FAILED;
3097	}
3098
3099	return ret;
3100}
3101
3102/**
3103 * megasas_reset_target:  Issues target reset request to firmware
3104 *                        (supported only for fusion adapters)
3105 * @scmd:                 SCSI command pointer
3106 */
3107static int megasas_reset_target(struct scsi_cmnd *scmd)
3108{
3109	int ret;
3110	struct megasas_instance *instance;
3111
3112	instance = (struct megasas_instance *)scmd->device->host->hostdata;
3113
3114	if (instance->adapter_type != MFI_SERIES)
3115		ret = megasas_reset_target_fusion(scmd);
3116	else {
3117		sdev_printk(KERN_NOTICE, scmd->device, "TARGET RESET not supported\n");
3118		ret = FAILED;
3119	}
3120
3121	return ret;
3122}
3123
3124/**
3125 * megasas_bios_param - Returns disk geometry for a disk
3126 * @sdev:		device handle
3127 * @bdev:		block device
3128 * @capacity:		drive capacity
3129 * @geom:		geometry parameters
3130 */
3131static int
3132megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
3133		 sector_t capacity, int geom[])
3134{
3135	int heads;
3136	int sectors;
3137	sector_t cylinders;
3138	unsigned long tmp;
3139
3140	/* Default heads (64) & sectors (32) */
3141	heads = 64;
3142	sectors = 32;
3143
3144	tmp = heads * sectors;
3145	cylinders = capacity;
3146
3147	sector_div(cylinders, tmp);
3148
3149	/*
3150	 * Handle extended translation size for logical drives > 1Gb
3151	 */
3152
3153	if (capacity >= 0x200000) {
3154		heads = 255;
3155		sectors = 63;
3156		tmp = heads*sectors;
3157		cylinders = capacity;
3158		sector_div(cylinders, tmp);
3159	}
3160
3161	geom[0] = heads;
3162	geom[1] = sectors;
3163	geom[2] = cylinders;
3164
3165	return 0;
3166}
3167
3168static int megasas_map_queues(struct Scsi_Host *shost)
3169{
3170	struct megasas_instance *instance;
3171
3172	instance = (struct megasas_instance *)shost->hostdata;
3173
3174	if (shost->nr_hw_queues == 1)
3175		return 0;
3176
3177	return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
3178			instance->pdev, instance->low_latency_index_start);
3179}
3180
3181static void megasas_aen_polling(struct work_struct *work);
3182
3183/**
3184 * megasas_service_aen -	Processes an event notification
3185 * @instance:			Adapter soft state
3186 * @cmd:			AEN command completed by the ISR
3187 *
3188 * For AEN, driver sends a command down to FW that is held by the FW till an
3189 * event occurs. When an event of interest occurs, FW completes the command
3190 * that it was previously holding.
3191 *
3192 * This routines sends SIGIO signal to processes that have registered with the
3193 * driver for AEN.
3194 */
3195static void
3196megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
3197{
3198	unsigned long flags;
3199
3200	/*
3201	 * Don't signal app if it is just an aborted previously registered aen
3202	 */
3203	if ((!cmd->abort_aen) && (instance->unload == 0)) {
3204		spin_lock_irqsave(&poll_aen_lock, flags);
3205		megasas_poll_wait_aen = 1;
3206		spin_unlock_irqrestore(&poll_aen_lock, flags);
3207		wake_up(&megasas_poll_wait);
3208		kill_fasync(&megasas_async_queue, SIGIO, POLL_IN);
3209	}
3210	else
3211		cmd->abort_aen = 0;
3212
3213	instance->aen_cmd = NULL;
3214
3215	megasas_return_cmd(instance, cmd);
3216
3217	if ((instance->unload == 0) &&
3218		((instance->issuepend_done == 1))) {
3219		struct megasas_aen_event *ev;
3220
3221		ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
3222		if (!ev) {
3223			dev_err(&instance->pdev->dev, "megasas_service_aen: out of memory\n");
3224		} else {
3225			ev->instance = instance;
3226			instance->ev = ev;
3227			INIT_DELAYED_WORK(&ev->hotplug_work,
3228					  megasas_aen_polling);
3229			schedule_delayed_work(&ev->hotplug_work, 0);
3230		}
3231	}
3232}
3233
3234static ssize_t
3235fw_crash_buffer_store(struct device *cdev,
3236	struct device_attribute *attr, const char *buf, size_t count)
3237{
3238	struct Scsi_Host *shost = class_to_shost(cdev);
3239	struct megasas_instance *instance =
3240		(struct megasas_instance *) shost->hostdata;
3241	int val = 0;
3242
3243	if (kstrtoint(buf, 0, &val) != 0)
3244		return -EINVAL;
3245
3246	mutex_lock(&instance->crashdump_lock);
3247	instance->fw_crash_buffer_offset = val;
3248	mutex_unlock(&instance->crashdump_lock);
3249	return strlen(buf);
3250}
3251
3252static ssize_t
3253fw_crash_buffer_show(struct device *cdev,
3254	struct device_attribute *attr, char *buf)
3255{
3256	struct Scsi_Host *shost = class_to_shost(cdev);
3257	struct megasas_instance *instance =
3258		(struct megasas_instance *) shost->hostdata;
3259	u32 size;
3260	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
3261	unsigned long chunk_left_bytes;
3262	unsigned long src_addr;
3263	u32 buff_offset;
3264
3265	mutex_lock(&instance->crashdump_lock);
3266	buff_offset = instance->fw_crash_buffer_offset;
3267	if (!instance->crash_dump_buf ||
3268		!((instance->fw_crash_state == AVAILABLE) ||
3269		(instance->fw_crash_state == COPYING))) {
3270		dev_err(&instance->pdev->dev,
3271			"Firmware crash dump is not available\n");
3272		mutex_unlock(&instance->crashdump_lock);
3273		return -EINVAL;
3274	}
3275
3276	if (buff_offset > (instance->fw_crash_buffer_size * dmachunk)) {
3277		dev_err(&instance->pdev->dev,
3278			"Firmware crash dump offset is out of range\n");
3279		mutex_unlock(&instance->crashdump_lock);
3280		return 0;
3281	}
3282
3283	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
3284	chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
3285	size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
3286	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
3287
3288	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
3289		(buff_offset % dmachunk);
3290	memcpy(buf, (void *)src_addr, size);
3291	mutex_unlock(&instance->crashdump_lock);
3292
3293	return size;
3294}
3295
3296static ssize_t
3297fw_crash_buffer_size_show(struct device *cdev,
3298	struct device_attribute *attr, char *buf)
3299{
3300	struct Scsi_Host *shost = class_to_shost(cdev);
3301	struct megasas_instance *instance =
3302		(struct megasas_instance *) shost->hostdata;
3303
3304	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
3305		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
3306}
3307
3308static ssize_t
3309fw_crash_state_store(struct device *cdev,
3310	struct device_attribute *attr, const char *buf, size_t count)
3311{
3312	struct Scsi_Host *shost = class_to_shost(cdev);
3313	struct megasas_instance *instance =
3314		(struct megasas_instance *) shost->hostdata;
3315	int val = 0;
3316
3317	if (kstrtoint(buf, 0, &val) != 0)
3318		return -EINVAL;
3319
3320	if ((val <= AVAILABLE || val > COPY_ERROR)) {
3321		dev_err(&instance->pdev->dev, "application updates invalid "
3322			"firmware crash state\n");
3323		return -EINVAL;
3324	}
3325
3326	instance->fw_crash_state = val;
3327
3328	if ((val == COPIED) || (val == COPY_ERROR)) {
3329		mutex_lock(&instance->crashdump_lock);
3330		megasas_free_host_crash_buffer(instance);
3331		mutex_unlock(&instance->crashdump_lock);
3332		if (val == COPY_ERROR)
3333			dev_info(&instance->pdev->dev, "application failed to "
3334				"copy Firmware crash dump\n");
3335		else
3336			dev_info(&instance->pdev->dev, "Firmware crash dump "
3337				"copied successfully\n");
3338	}
3339	return strlen(buf);
3340}
3341
3342static ssize_t
3343fw_crash_state_show(struct device *cdev,
3344	struct device_attribute *attr, char *buf)
3345{
3346	struct Scsi_Host *shost = class_to_shost(cdev);
3347	struct megasas_instance *instance =
3348		(struct megasas_instance *) shost->hostdata;
3349
3350	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
3351}
3352
3353static ssize_t
3354page_size_show(struct device *cdev,
3355	struct device_attribute *attr, char *buf)
3356{
3357	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
3358}
3359
3360static ssize_t
3361ldio_outstanding_show(struct device *cdev, struct device_attribute *attr,
3362	char *buf)
3363{
3364	struct Scsi_Host *shost = class_to_shost(cdev);
3365	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3366
3367	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->ldio_outstanding));
3368}
3369
3370static ssize_t
3371fw_cmds_outstanding_show(struct device *cdev,
3372				 struct device_attribute *attr, char *buf)
3373{
3374	struct Scsi_Host *shost = class_to_shost(cdev);
3375	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3376
3377	return snprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&instance->fw_outstanding));
3378}
3379
3380static ssize_t
3381enable_sdev_max_qd_show(struct device *cdev,
3382	struct device_attribute *attr, char *buf)
3383{
3384	struct Scsi_Host *shost = class_to_shost(cdev);
3385	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3386
3387	return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd);
3388}
3389
3390static ssize_t
3391enable_sdev_max_qd_store(struct device *cdev,
3392	struct device_attribute *attr, const char *buf, size_t count)
3393{
3394	struct Scsi_Host *shost = class_to_shost(cdev);
3395	struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata;
3396	u32 val = 0;
3397	bool is_target_prop;
3398	int ret_target_prop = DCMD_FAILED;
3399	struct scsi_device *sdev;
3400
3401	if (kstrtou32(buf, 0, &val) != 0) {
3402		pr_err("megasas: could not set enable_sdev_max_qd\n");
3403		return -EINVAL;
3404	}
3405
3406	mutex_lock(&instance->reset_mutex);
3407	if (val)
3408		instance->enable_sdev_max_qd = true;
3409	else
3410		instance->enable_sdev_max_qd = false;
3411
3412	shost_for_each_device(sdev, shost) {
3413		ret_target_prop = megasas_get_target_prop(instance, sdev);
3414		is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
3415		megasas_set_fw_assisted_qd(sdev, is_target_prop);
3416	}
3417	mutex_unlock(&instance->reset_mutex);
3418
3419	return strlen(buf);
3420}
3421
3422static ssize_t
3423dump_system_regs_show(struct device *cdev,
3424			       struct device_attribute *attr, char *buf)
3425{
3426	struct Scsi_Host *shost = class_to_shost(cdev);
3427	struct megasas_instance *instance =
3428			(struct megasas_instance *)shost->hostdata;
3429
3430	return megasas_dump_sys_regs(instance->reg_set, buf);
3431}
3432
3433static ssize_t
3434raid_map_id_show(struct device *cdev, struct device_attribute *attr,
3435			  char *buf)
3436{
3437	struct Scsi_Host *shost = class_to_shost(cdev);
3438	struct megasas_instance *instance =
3439			(struct megasas_instance *)shost->hostdata;
3440
3441	return snprintf(buf, PAGE_SIZE, "%ld\n",
3442			(unsigned long)instance->map_id);
3443}
3444
3445static DEVICE_ATTR_RW(fw_crash_buffer);
3446static DEVICE_ATTR_RO(fw_crash_buffer_size);
3447static DEVICE_ATTR_RW(fw_crash_state);
3448static DEVICE_ATTR_RO(page_size);
3449static DEVICE_ATTR_RO(ldio_outstanding);
3450static DEVICE_ATTR_RO(fw_cmds_outstanding);
3451static DEVICE_ATTR_RW(enable_sdev_max_qd);
3452static DEVICE_ATTR_RO(dump_system_regs);
3453static DEVICE_ATTR_RO(raid_map_id);
3454
3455static struct device_attribute *megaraid_host_attrs[] = {
3456	&dev_attr_fw_crash_buffer_size,
3457	&dev_attr_fw_crash_buffer,
3458	&dev_attr_fw_crash_state,
3459	&dev_attr_page_size,
3460	&dev_attr_ldio_outstanding,
3461	&dev_attr_fw_cmds_outstanding,
3462	&dev_attr_enable_sdev_max_qd,
3463	&dev_attr_dump_system_regs,
3464	&dev_attr_raid_map_id,
3465	NULL,
3466};
3467
3468/*
3469 * Scsi host template for megaraid_sas driver
3470 */
3471static struct scsi_host_template megasas_template = {
3472
3473	.module = THIS_MODULE,
3474	.name = "Avago SAS based MegaRAID driver",
3475	.proc_name = "megaraid_sas",
3476	.slave_configure = megasas_slave_configure,
3477	.slave_alloc = megasas_slave_alloc,
3478	.slave_destroy = megasas_slave_destroy,
3479	.queuecommand = megasas_queue_command,
3480	.eh_target_reset_handler = megasas_reset_target,
3481	.eh_abort_handler = megasas_task_abort,
3482	.eh_host_reset_handler = megasas_reset_bus_host,
3483	.eh_timed_out = megasas_reset_timer,
3484	.shost_attrs = megaraid_host_attrs,
3485	.bios_param = megasas_bios_param,
3486	.map_queues = megasas_map_queues,
3487	.change_queue_depth = scsi_change_queue_depth,
3488	.max_segment_size = 0xffffffff,
3489};
3490
3491/**
3492 * megasas_complete_int_cmd -	Completes an internal command
3493 * @instance:			Adapter soft state
3494 * @cmd:			Command to be completed
3495 *
3496 * The megasas_issue_blocked_cmd() function waits for a command to complete
3497 * after it issues a command. This function wakes up that waiting routine by
3498 * calling wake_up() on the wait queue.
3499 */
3500static void
3501megasas_complete_int_cmd(struct megasas_instance *instance,
3502			 struct megasas_cmd *cmd)
3503{
3504	if (cmd->cmd_status_drv == DCMD_INIT)
3505		cmd->cmd_status_drv =
3506		(cmd->frame->io.cmd_status == MFI_STAT_OK) ?
3507		DCMD_SUCCESS : DCMD_FAILED;
3508
3509	wake_up(&instance->int_cmd_wait_q);
3510}
3511
3512/**
3513 * megasas_complete_abort -	Completes aborting a command
3514 * @instance:			Adapter soft state
3515 * @cmd:			Cmd that was issued to abort another cmd
3516 *
3517 * The megasas_issue_blocked_abort_cmd() function waits on abort_cmd_wait_q
3518 * after it issues an abort on a previously issued command. This function
3519 * wakes up all functions waiting on the same wait queue.
3520 */
3521static void
3522megasas_complete_abort(struct megasas_instance *instance,
3523		       struct megasas_cmd *cmd)
3524{
3525	if (cmd->sync_cmd) {
3526		cmd->sync_cmd = 0;
3527		cmd->cmd_status_drv = DCMD_SUCCESS;
3528		wake_up(&instance->abort_cmd_wait_q);
3529	}
3530}
3531
3532static void
3533megasas_set_ld_removed_by_fw(struct megasas_instance *instance)
3534{
3535	uint i;
3536
3537	for (i = 0; (i < MEGASAS_MAX_LD_IDS); i++) {
3538		if (instance->ld_ids_prev[i] != 0xff &&
3539		    instance->ld_ids_from_raidmap[i] == 0xff) {
3540			if (megasas_dbg_lvl & LD_PD_DEBUG)
3541				dev_info(&instance->pdev->dev,
3542					 "LD target ID %d removed from RAID map\n", i);
3543			instance->ld_tgtid_status[i] = LD_TARGET_ID_DELETED;
3544		}
3545	}
3546}
3547
3548/**
3549 * megasas_complete_cmd -	Completes a command
3550 * @instance:			Adapter soft state
3551 * @cmd:			Command to be completed
3552 * @alt_status:			If non-zero, use this value as status to
3553 *				SCSI mid-layer instead of the value returned
3554 *				by the FW. This should be used if caller wants
3555 *				an alternate status (as in the case of aborted
3556 *				commands)
3557 */
3558void
3559megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
3560		     u8 alt_status)
3561{
3562	int exception = 0;
3563	struct megasas_header *hdr = &cmd->frame->hdr;
3564	unsigned long flags;
3565	struct fusion_context *fusion = instance->ctrl_context;
3566	u32 opcode, status;
3567
3568	/* flag for the retry reset */
3569	cmd->retry_for_fw_reset = 0;
3570
3571	if (cmd->scmd)
3572		cmd->scmd->SCp.ptr = NULL;
3573
3574	switch (hdr->cmd) {
3575	case MFI_CMD_INVALID:
3576		/* Some older 1068 controller FW may keep a pended
3577		   MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel
3578		   when booting the kdump kernel.  Ignore this command to
3579		   prevent a kernel panic on shutdown of the kdump kernel. */
3580		dev_warn(&instance->pdev->dev, "MFI_CMD_INVALID command "
3581		       "completed\n");
3582		dev_warn(&instance->pdev->dev, "If you have a controller "
3583		       "other than PERC5, please upgrade your firmware\n");
3584		break;
3585	case MFI_CMD_PD_SCSI_IO:
3586	case MFI_CMD_LD_SCSI_IO:
3587
3588		/*
3589		 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been
3590		 * issued either through an IO path or an IOCTL path. If it
3591		 * was via IOCTL, we will send it to internal completion.
3592		 */
3593		if (cmd->sync_cmd) {
3594			cmd->sync_cmd = 0;
3595			megasas_complete_int_cmd(instance, cmd);
3596			break;
3597		}
3598		fallthrough;
3599
3600	case MFI_CMD_LD_READ:
3601	case MFI_CMD_LD_WRITE:
3602
3603		if (alt_status) {
3604			cmd->scmd->result = alt_status << 16;
3605			exception = 1;
3606		}
3607
3608		if (exception) {
3609
3610			atomic_dec(&instance->fw_outstanding);
3611
3612			scsi_dma_unmap(cmd->scmd);
3613			cmd->scmd->scsi_done(cmd->scmd);
3614			megasas_return_cmd(instance, cmd);
3615
3616			break;
3617		}
3618
3619		switch (hdr->cmd_status) {
3620
3621		case MFI_STAT_OK:
3622			cmd->scmd->result = DID_OK << 16;
3623			break;
3624
3625		case MFI_STAT_SCSI_IO_FAILED:
3626		case MFI_STAT_LD_INIT_IN_PROGRESS:
3627			cmd->scmd->result =
3628			    (DID_ERROR << 16) | hdr->scsi_status;
3629			break;
3630
3631		case MFI_STAT_SCSI_DONE_WITH_ERROR:
3632
3633			cmd->scmd->result = (DID_OK << 16) | hdr->scsi_status;
3634
3635			if (hdr->scsi_status == SAM_STAT_CHECK_CONDITION) {
3636				memset(cmd->scmd->sense_buffer, 0,
3637				       SCSI_SENSE_BUFFERSIZE);
3638				memcpy(cmd->scmd->sense_buffer, cmd->sense,
3639				       hdr->sense_len);
3640
3641				cmd->scmd->result |= DRIVER_SENSE << 24;
3642			}
3643
3644			break;
3645
3646		case MFI_STAT_LD_OFFLINE:
3647		case MFI_STAT_DEVICE_NOT_FOUND:
3648			cmd->scmd->result = DID_BAD_TARGET << 16;
3649			break;
3650
3651		default:
3652			dev_printk(KERN_DEBUG, &instance->pdev->dev, "MFI FW status %#x\n",
3653			       hdr->cmd_status);
3654			cmd->scmd->result = DID_ERROR << 16;
3655			break;
3656		}
3657
3658		atomic_dec(&instance->fw_outstanding);
3659
3660		scsi_dma_unmap(cmd->scmd);
3661		cmd->scmd->scsi_done(cmd->scmd);
3662		megasas_return_cmd(instance, cmd);
3663
3664		break;
3665
3666	case MFI_CMD_SMP:
3667	case MFI_CMD_STP:
3668	case MFI_CMD_NVME:
3669	case MFI_CMD_TOOLBOX:
3670		megasas_complete_int_cmd(instance, cmd);
3671		break;
3672
3673	case MFI_CMD_DCMD:
3674		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
3675		/* Check for LD map update */
3676		if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
3677			&& (cmd->frame->dcmd.mbox.b[1] == 1)) {
3678			fusion->fast_path_io = 0;
3679			spin_lock_irqsave(instance->host->host_lock, flags);
3680			status = cmd->frame->hdr.cmd_status;
3681			instance->map_update_cmd = NULL;
3682			if (status != MFI_STAT_OK) {
3683				if (status != MFI_STAT_NOT_FOUND)
3684					dev_warn(&instance->pdev->dev, "map syncfailed, status = 0x%x\n",
3685					       cmd->frame->hdr.cmd_status);
3686				else {
3687					megasas_return_cmd(instance, cmd);
3688					spin_unlock_irqrestore(
3689						instance->host->host_lock,
3690						flags);
3691					break;
3692				}
3693			}
3694
3695			megasas_return_cmd(instance, cmd);
3696
3697			/*
3698			 * Set fast path IO to ZERO.
3699			 * Validate Map will set proper value.
3700			 * Meanwhile all IOs will go as LD IO.
3701			 */
3702			if (status == MFI_STAT_OK &&
3703			    (MR_ValidateMapInfo(instance, (instance->map_id + 1)))) {
3704				instance->map_id++;
3705				fusion->fast_path_io = 1;
3706			} else {
3707				fusion->fast_path_io = 0;
3708			}
3709
3710			if (instance->adapter_type >= INVADER_SERIES)
3711				megasas_set_ld_removed_by_fw(instance);
3712
3713			megasas_sync_map_info(instance);
3714			spin_unlock_irqrestore(instance->host->host_lock,
3715					       flags);
3716
3717			break;
3718		}
3719		if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO ||
3720		    opcode == MR_DCMD_CTRL_EVENT_GET) {
3721			spin_lock_irqsave(&poll_aen_lock, flags);
3722			megasas_poll_wait_aen = 0;
3723			spin_unlock_irqrestore(&poll_aen_lock, flags);
3724		}
3725
3726		/* FW has an updated PD sequence */
3727		if ((opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
3728			(cmd->frame->dcmd.mbox.b[0] == 1)) {
3729
3730			spin_lock_irqsave(instance->host->host_lock, flags);
3731			status = cmd->frame->hdr.cmd_status;
3732			instance->jbod_seq_cmd = NULL;
3733			megasas_return_cmd(instance, cmd);
3734
3735			if (status == MFI_STAT_OK) {
3736				instance->pd_seq_map_id++;
3737				/* Re-register a pd sync seq num cmd */
3738				if (megasas_sync_pd_seq_num(instance, true))
3739					instance->use_seqnum_jbod_fp = false;
3740			} else
3741				instance->use_seqnum_jbod_fp = false;
3742
3743			spin_unlock_irqrestore(instance->host->host_lock, flags);
3744			break;
3745		}
3746
3747		/*
3748		 * See if got an event notification
3749		 */
3750		if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
3751			megasas_service_aen(instance, cmd);
3752		else
3753			megasas_complete_int_cmd(instance, cmd);
3754
3755		break;
3756
3757	case MFI_CMD_ABORT:
3758		/*
3759		 * Cmd issued to abort another cmd returned
3760		 */
3761		megasas_complete_abort(instance, cmd);
3762		break;
3763
3764	default:
3765		dev_info(&instance->pdev->dev, "Unknown command completed! [0x%X]\n",
3766		       hdr->cmd);
3767		megasas_complete_int_cmd(instance, cmd);
3768		break;
3769	}
3770}
3771
3772/**
3773 * megasas_issue_pending_cmds_again -	issue all pending cmds
3774 *					in FW again because of the fw reset
3775 * @instance:				Adapter soft state
3776 */
3777static inline void
3778megasas_issue_pending_cmds_again(struct megasas_instance *instance)
3779{
3780	struct megasas_cmd *cmd;
3781	struct list_head clist_local;
3782	union megasas_evt_class_locale class_locale;
3783	unsigned long flags;
3784	u32 seq_num;
3785
3786	INIT_LIST_HEAD(&clist_local);
3787	spin_lock_irqsave(&instance->hba_lock, flags);
3788	list_splice_init(&instance->internal_reset_pending_q, &clist_local);
3789	spin_unlock_irqrestore(&instance->hba_lock, flags);
3790
3791	while (!list_empty(&clist_local)) {
3792		cmd = list_entry((&clist_local)->next,
3793					struct megasas_cmd, list);
3794		list_del_init(&cmd->list);
3795
3796		if (cmd->sync_cmd || cmd->scmd) {
3797			dev_notice(&instance->pdev->dev, "command %p, %p:%d"
3798				"detected to be pending while HBA reset\n",
3799					cmd, cmd->scmd, cmd->sync_cmd);
3800
3801			cmd->retry_for_fw_reset++;
3802
3803			if (cmd->retry_for_fw_reset == 3) {
3804				dev_notice(&instance->pdev->dev, "cmd %p, %p:%d"
3805					"was tried multiple times during reset."
3806					"Shutting down the HBA\n",
3807					cmd, cmd->scmd, cmd->sync_cmd);
3808				instance->instancet->disable_intr(instance);
3809				atomic_set(&instance->fw_reset_no_pci_access, 1);
3810				megaraid_sas_kill_hba(instance);
3811				return;
3812			}
3813		}
3814
3815		if (cmd->sync_cmd == 1) {
3816			if (cmd->scmd) {
3817				dev_notice(&instance->pdev->dev, "unexpected"
3818					"cmd attached to internal command!\n");
3819			}
3820			dev_notice(&instance->pdev->dev, "%p synchronous cmd"
3821						"on the internal reset queue,"
3822						"issue it again.\n", cmd);
3823			cmd->cmd_status_drv = DCMD_INIT;
3824			instance->instancet->fire_cmd(instance,
3825							cmd->frame_phys_addr,
3826							0, instance->reg_set);
3827		} else if (cmd->scmd) {
3828			dev_notice(&instance->pdev->dev, "%p scsi cmd [%02x]"
3829			"detected on the internal queue, issue again.\n",
3830			cmd, cmd->scmd->cmnd[0]);
3831
3832			atomic_inc(&instance->fw_outstanding);
3833			instance->instancet->fire_cmd(instance,
3834					cmd->frame_phys_addr,
3835					cmd->frame_count-1, instance->reg_set);
3836		} else {
3837			dev_notice(&instance->pdev->dev, "%p unexpected cmd on the"
3838				"internal reset defer list while re-issue!!\n",
3839				cmd);
3840		}
3841	}
3842
3843	if (instance->aen_cmd) {
3844		dev_notice(&instance->pdev->dev, "aen_cmd in def process\n");
3845		megasas_return_cmd(instance, instance->aen_cmd);
3846
3847		instance->aen_cmd = NULL;
3848	}
3849
3850	/*
3851	 * Initiate AEN (Asynchronous Event Notification)
3852	 */
3853	seq_num = instance->last_seq_num;
3854	class_locale.members.reserved = 0;
3855	class_locale.members.locale = MR_EVT_LOCALE_ALL;
3856	class_locale.members.class = MR_EVT_CLASS_DEBUG;
3857
3858	megasas_register_aen(instance, seq_num, class_locale.word);
3859}
3860
3861/*
3862 * Move the internal reset pending commands to a deferred queue.
3863 *
3864 * We move the commands pending at internal reset time to a
3865 * pending queue. This queue would be flushed after successful
3866 * completion of the internal reset sequence. if the internal reset
3867 * did not complete in time, the kernel reset handler would flush
3868 * these commands.
3869 */
3870static void
3871megasas_internal_reset_defer_cmds(struct megasas_instance *instance)
3872{
3873	struct megasas_cmd *cmd;
3874	int i;
3875	u16 max_cmd = instance->max_fw_cmds;
3876	u32 defer_index;
3877	unsigned long flags;
3878
3879	defer_index = 0;
3880	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
3881	for (i = 0; i < max_cmd; i++) {
3882		cmd = instance->cmd_list[i];
3883		if (cmd->sync_cmd == 1 || cmd->scmd) {
3884			dev_notice(&instance->pdev->dev, "moving cmd[%d]:%p:%d:%p"
3885					"on the defer queue as internal\n",
3886				defer_index, cmd, cmd->sync_cmd, cmd->scmd);
3887
3888			if (!list_empty(&cmd->list)) {
3889				dev_notice(&instance->pdev->dev, "ERROR while"
3890					" moving this cmd:%p, %d %p, it was"
3891					"discovered on some list?\n",
3892					cmd, cmd->sync_cmd, cmd->scmd);
3893
3894				list_del_init(&cmd->list);
3895			}
3896			defer_index++;
3897			list_add_tail(&cmd->list,
3898				&instance->internal_reset_pending_q);
3899		}
3900	}
3901	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
3902}
3903
3904
3905static void
3906process_fw_state_change_wq(struct work_struct *work)
3907{
3908	struct megasas_instance *instance =
3909		container_of(work, struct megasas_instance, work_init);
3910	u32 wait;
3911	unsigned long flags;
3912
3913    if (atomic_read(&instance->adprecovery) != MEGASAS_ADPRESET_SM_INFAULT) {
3914		dev_notice(&instance->pdev->dev, "error, recovery st %x\n",
3915				atomic_read(&instance->adprecovery));
3916		return ;
3917	}
3918
3919	if (atomic_read(&instance->adprecovery) == MEGASAS_ADPRESET_SM_INFAULT) {
3920		dev_notice(&instance->pdev->dev, "FW detected to be in fault"
3921					"state, restarting it...\n");
3922
3923		instance->instancet->disable_intr(instance);
3924		atomic_set(&instance->fw_outstanding, 0);
3925
3926		atomic_set(&instance->fw_reset_no_pci_access, 1);
3927		instance->instancet->adp_reset(instance, instance->reg_set);
3928		atomic_set(&instance->fw_reset_no_pci_access, 0);
3929
3930		dev_notice(&instance->pdev->dev, "FW restarted successfully,"
3931					"initiating next stage...\n");
3932
3933		dev_notice(&instance->pdev->dev, "HBA recovery state machine,"
3934					"state 2 starting...\n");
3935
3936		/* waiting for about 20 second before start the second init */
3937		for (wait = 0; wait < 30; wait++) {
3938			msleep(1000);
3939		}
3940
3941		if (megasas_transition_to_ready(instance, 1)) {
3942			dev_notice(&instance->pdev->dev, "adapter not ready\n");
3943
3944			atomic_set(&instance->fw_reset_no_pci_access, 1);
3945			megaraid_sas_kill_hba(instance);
3946			return ;
3947		}
3948
3949		if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
3950			(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
3951			(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)
3952			) {
3953			*instance->consumer = *instance->producer;
3954		} else {
3955			*instance->consumer = 0;
3956			*instance->producer = 0;
3957		}
3958
3959		megasas_issue_init_mfi(instance);
3960
3961		spin_lock_irqsave(&instance->hba_lock, flags);
3962		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
3963		spin_unlock_irqrestore(&instance->hba_lock, flags);
3964		instance->instancet->enable_intr(instance);
3965
3966		megasas_issue_pending_cmds_again(instance);
3967		instance->issuepend_done = 1;
3968	}
3969}
3970
3971/**
3972 * megasas_deplete_reply_queue -	Processes all completed commands
3973 * @instance:				Adapter soft state
3974 * @alt_status:				Alternate status to be returned to
3975 *					SCSI mid-layer instead of the status
3976 *					returned by the FW
3977 * Note: this must be called with hba lock held
3978 */
3979static int
3980megasas_deplete_reply_queue(struct megasas_instance *instance,
3981					u8 alt_status)
3982{
3983	u32 mfiStatus;
3984	u32 fw_state;
3985
3986	if ((mfiStatus = instance->instancet->check_reset(instance,
3987					instance->reg_set)) == 1) {
3988		return IRQ_HANDLED;
3989	}
3990
3991	mfiStatus = instance->instancet->clear_intr(instance);
3992	if (mfiStatus == 0) {
3993		/* Hardware may not set outbound_intr_status in MSI-X mode */
3994		if (!instance->msix_vectors)
3995			return IRQ_NONE;
3996	}
3997
3998	instance->mfiStatus = mfiStatus;
3999
4000	if ((mfiStatus & MFI_INTR_FLAG_FIRMWARE_STATE_CHANGE)) {
4001		fw_state = instance->instancet->read_fw_status_reg(
4002				instance) & MFI_STATE_MASK;
4003
4004		if (fw_state != MFI_STATE_FAULT) {
4005			dev_notice(&instance->pdev->dev, "fw state:%x\n",
4006						fw_state);
4007		}
4008
4009		if ((fw_state == MFI_STATE_FAULT) &&
4010				(instance->disableOnlineCtrlReset == 0)) {
4011			dev_notice(&instance->pdev->dev, "wait adp restart\n");
4012
4013			if ((instance->pdev->device ==
4014					PCI_DEVICE_ID_LSI_SAS1064R) ||
4015				(instance->pdev->device ==
4016					PCI_DEVICE_ID_DELL_PERC5) ||
4017				(instance->pdev->device ==
4018					PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
4019
4020				*instance->consumer =
4021					cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN);
4022			}
4023
4024
4025			instance->instancet->disable_intr(instance);
4026			atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4027			instance->issuepend_done = 0;
4028
4029			atomic_set(&instance->fw_outstanding, 0);
4030			megasas_internal_reset_defer_cmds(instance);
4031
4032			dev_notice(&instance->pdev->dev, "fwState=%x, stage:%d\n",
4033					fw_state, atomic_read(&instance->adprecovery));
4034
4035			schedule_work(&instance->work_init);
4036			return IRQ_HANDLED;
4037
4038		} else {
4039			dev_notice(&instance->pdev->dev, "fwstate:%x, dis_OCR=%x\n",
4040				fw_state, instance->disableOnlineCtrlReset);
4041		}
4042	}
4043
4044	tasklet_schedule(&instance->isr_tasklet);
4045	return IRQ_HANDLED;
4046}
4047
4048/**
4049 * megasas_isr - isr entry point
4050 * @irq:	IRQ number
4051 * @devp:	IRQ context address
4052 */
4053static irqreturn_t megasas_isr(int irq, void *devp)
4054{
4055	struct megasas_irq_context *irq_context = devp;
4056	struct megasas_instance *instance = irq_context->instance;
4057	unsigned long flags;
4058	irqreturn_t rc;
4059
4060	if (atomic_read(&instance->fw_reset_no_pci_access))
4061		return IRQ_HANDLED;
4062
4063	spin_lock_irqsave(&instance->hba_lock, flags);
4064	rc = megasas_deplete_reply_queue(instance, DID_OK);
4065	spin_unlock_irqrestore(&instance->hba_lock, flags);
4066
4067	return rc;
4068}
4069
4070/**
4071 * megasas_transition_to_ready -	Move the FW to READY state
4072 * @instance:				Adapter soft state
4073 * @ocr:				Adapter reset state
4074 *
4075 * During the initialization, FW passes can potentially be in any one of
4076 * several possible states. If the FW in operational, waiting-for-handshake
4077 * states, driver must take steps to bring it to ready state. Otherwise, it
4078 * has to wait for the ready state.
4079 */
4080int
4081megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
4082{
4083	int i;
4084	u8 max_wait;
4085	u32 fw_state;
4086	u32 abs_state, curr_abs_state;
4087
4088	abs_state = instance->instancet->read_fw_status_reg(instance);
4089	fw_state = abs_state & MFI_STATE_MASK;
4090
4091	if (fw_state != MFI_STATE_READY)
4092		dev_info(&instance->pdev->dev, "Waiting for FW to come to ready"
4093		       " state\n");
4094
4095	while (fw_state != MFI_STATE_READY) {
4096
4097		switch (fw_state) {
4098
4099		case MFI_STATE_FAULT:
4100			dev_printk(KERN_ERR, &instance->pdev->dev,
4101				   "FW in FAULT state, Fault code:0x%x subcode:0x%x func:%s\n",
4102				   abs_state & MFI_STATE_FAULT_CODE,
4103				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4104			if (ocr) {
4105				max_wait = MEGASAS_RESET_WAIT_TIME;
4106				break;
4107			} else {
4108				dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4109				megasas_dump_reg_set(instance->reg_set);
4110				return -ENODEV;
4111			}
4112
4113		case MFI_STATE_WAIT_HANDSHAKE:
4114			/*
4115			 * Set the CLR bit in inbound doorbell
4116			 */
4117			if ((instance->pdev->device ==
4118				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4119				(instance->pdev->device ==
4120				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4121				(instance->adapter_type != MFI_SERIES))
4122				writel(
4123				  MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4124				  &instance->reg_set->doorbell);
4125			else
4126				writel(
4127				    MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG,
4128					&instance->reg_set->inbound_doorbell);
4129
4130			max_wait = MEGASAS_RESET_WAIT_TIME;
4131			break;
4132
4133		case MFI_STATE_BOOT_MESSAGE_PENDING:
4134			if ((instance->pdev->device ==
4135			     PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4136				(instance->pdev->device ==
4137				 PCI_DEVICE_ID_LSI_SAS0071SKINNY) ||
4138				(instance->adapter_type != MFI_SERIES))
4139				writel(MFI_INIT_HOTPLUG,
4140				       &instance->reg_set->doorbell);
4141			else
4142				writel(MFI_INIT_HOTPLUG,
4143					&instance->reg_set->inbound_doorbell);
4144
4145			max_wait = MEGASAS_RESET_WAIT_TIME;
4146			break;
4147
4148		case MFI_STATE_OPERATIONAL:
4149			/*
4150			 * Bring it to READY state; assuming max wait 10 secs
4151			 */
4152			instance->instancet->disable_intr(instance);
4153			if ((instance->pdev->device ==
4154				PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
4155				(instance->pdev->device ==
4156				PCI_DEVICE_ID_LSI_SAS0071SKINNY)  ||
4157				(instance->adapter_type != MFI_SERIES)) {
4158				writel(MFI_RESET_FLAGS,
4159					&instance->reg_set->doorbell);
4160
4161				if (instance->adapter_type != MFI_SERIES) {
4162					for (i = 0; i < (10 * 1000); i += 20) {
4163						if (megasas_readl(
4164							    instance,
4165							    &instance->
4166							    reg_set->
4167							    doorbell) & 1)
4168							msleep(20);
4169						else
4170							break;
4171					}
4172				}
4173			} else
4174				writel(MFI_RESET_FLAGS,
4175					&instance->reg_set->inbound_doorbell);
4176
4177			max_wait = MEGASAS_RESET_WAIT_TIME;
4178			break;
4179
4180		case MFI_STATE_UNDEFINED:
4181			/*
4182			 * This state should not last for more than 2 seconds
4183			 */
4184			max_wait = MEGASAS_RESET_WAIT_TIME;
4185			break;
4186
4187		case MFI_STATE_BB_INIT:
4188			max_wait = MEGASAS_RESET_WAIT_TIME;
4189			break;
4190
4191		case MFI_STATE_FW_INIT:
4192			max_wait = MEGASAS_RESET_WAIT_TIME;
4193			break;
4194
4195		case MFI_STATE_FW_INIT_2:
4196			max_wait = MEGASAS_RESET_WAIT_TIME;
4197			break;
4198
4199		case MFI_STATE_DEVICE_SCAN:
4200			max_wait = MEGASAS_RESET_WAIT_TIME;
4201			break;
4202
4203		case MFI_STATE_FLUSH_CACHE:
4204			max_wait = MEGASAS_RESET_WAIT_TIME;
4205			break;
4206
4207		default:
4208			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Unknown state 0x%x\n",
4209			       fw_state);
4210			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4211			megasas_dump_reg_set(instance->reg_set);
4212			return -ENODEV;
4213		}
4214
4215		/*
4216		 * The cur_state should not last for more than max_wait secs
4217		 */
4218		for (i = 0; i < max_wait * 50; i++) {
4219			curr_abs_state = instance->instancet->
4220				read_fw_status_reg(instance);
4221
4222			if (abs_state == curr_abs_state) {
4223				msleep(20);
4224			} else
4225				break;
4226		}
4227
4228		/*
4229		 * Return error if fw_state hasn't changed after max_wait
4230		 */
4231		if (curr_abs_state == abs_state) {
4232			dev_printk(KERN_DEBUG, &instance->pdev->dev, "FW state [%d] hasn't changed "
4233			       "in %d secs\n", fw_state, max_wait);
4234			dev_printk(KERN_DEBUG, &instance->pdev->dev, "System Register set:\n");
4235			megasas_dump_reg_set(instance->reg_set);
4236			return -ENODEV;
4237		}
4238
4239		abs_state = curr_abs_state;
4240		fw_state = curr_abs_state & MFI_STATE_MASK;
4241	}
4242	dev_info(&instance->pdev->dev, "FW now in Ready state\n");
4243
4244	return 0;
4245}
4246
4247/**
4248 * megasas_teardown_frame_pool -	Destroy the cmd frame DMA pool
4249 * @instance:				Adapter soft state
4250 */
4251static void megasas_teardown_frame_pool(struct megasas_instance *instance)
4252{
4253	int i;
4254	u16 max_cmd = instance->max_mfi_cmds;
4255	struct megasas_cmd *cmd;
4256
4257	if (!instance->frame_dma_pool)
4258		return;
4259
4260	/*
4261	 * Return all frames to pool
4262	 */
4263	for (i = 0; i < max_cmd; i++) {
4264
4265		cmd = instance->cmd_list[i];
4266
4267		if (cmd->frame)
4268			dma_pool_free(instance->frame_dma_pool, cmd->frame,
4269				      cmd->frame_phys_addr);
4270
4271		if (cmd->sense)
4272			dma_pool_free(instance->sense_dma_pool, cmd->sense,
4273				      cmd->sense_phys_addr);
4274	}
4275
4276	/*
4277	 * Now destroy the pool itself
4278	 */
4279	dma_pool_destroy(instance->frame_dma_pool);
4280	dma_pool_destroy(instance->sense_dma_pool);
4281
4282	instance->frame_dma_pool = NULL;
4283	instance->sense_dma_pool = NULL;
4284}
4285
4286/**
4287 * megasas_create_frame_pool -	Creates DMA pool for cmd frames
4288 * @instance:			Adapter soft state
4289 *
4290 * Each command packet has an embedded DMA memory buffer that is used for
4291 * filling MFI frame and the SG list that immediately follows the frame. This
4292 * function creates those DMA memory buffers for each command packet by using
4293 * PCI pool facility.
4294 */
4295static int megasas_create_frame_pool(struct megasas_instance *instance)
4296{
4297	int i;
4298	u16 max_cmd;
4299	u32 frame_count;
4300	struct megasas_cmd *cmd;
4301
4302	max_cmd = instance->max_mfi_cmds;
4303
4304	/*
4305	 * For MFI controllers.
4306	 * max_num_sge = 60
4307	 * max_sge_sz  = 16 byte (sizeof megasas_sge_skinny)
4308	 * Total 960 byte (15 MFI frame of 64 byte)
4309	 *
4310	 * Fusion adapter require only 3 extra frame.
4311	 * max_num_sge = 16 (defined as MAX_IOCTL_SGE)
4312	 * max_sge_sz  = 12 byte (sizeof  megasas_sge64)
4313	 * Total 192 byte (3 MFI frame of 64 byte)
4314	 */
4315	frame_count = (instance->adapter_type == MFI_SERIES) ?
4316			(15 + 1) : (3 + 1);
4317	instance->mfi_frame_size = MEGAMFI_FRAME_SIZE * frame_count;
4318	/*
4319	 * Use DMA pool facility provided by PCI layer
4320	 */
4321	instance->frame_dma_pool = dma_pool_create("megasas frame pool",
4322					&instance->pdev->dev,
4323					instance->mfi_frame_size, 256, 0);
4324
4325	if (!instance->frame_dma_pool) {
4326		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup frame pool\n");
4327		return -ENOMEM;
4328	}
4329
4330	instance->sense_dma_pool = dma_pool_create("megasas sense pool",
4331						   &instance->pdev->dev, 128,
4332						   4, 0);
4333
4334	if (!instance->sense_dma_pool) {
4335		dev_printk(KERN_DEBUG, &instance->pdev->dev, "failed to setup sense pool\n");
4336
4337		dma_pool_destroy(instance->frame_dma_pool);
4338		instance->frame_dma_pool = NULL;
4339
4340		return -ENOMEM;
4341	}
4342
4343	/*
4344	 * Allocate and attach a frame to each of the commands in cmd_list.
4345	 * By making cmd->index as the context instead of the &cmd, we can
4346	 * always use 32bit context regardless of the architecture
4347	 */
4348	for (i = 0; i < max_cmd; i++) {
4349
4350		cmd = instance->cmd_list[i];
4351
4352		cmd->frame = dma_pool_zalloc(instance->frame_dma_pool,
4353					    GFP_KERNEL, &cmd->frame_phys_addr);
4354
4355		cmd->sense = dma_pool_alloc(instance->sense_dma_pool,
4356					    GFP_KERNEL, &cmd->sense_phys_addr);
4357
4358		/*
4359		 * megasas_teardown_frame_pool() takes care of freeing
4360		 * whatever has been allocated
4361		 */
4362		if (!cmd->frame || !cmd->sense) {
4363			dev_printk(KERN_DEBUG, &instance->pdev->dev, "dma_pool_alloc failed\n");
4364			megasas_teardown_frame_pool(instance);
4365			return -ENOMEM;
4366		}
4367
4368		cmd->frame->io.context = cpu_to_le32(cmd->index);
4369		cmd->frame->io.pad_0 = 0;
4370		if ((instance->adapter_type == MFI_SERIES) && reset_devices)
4371			cmd->frame->hdr.cmd = MFI_CMD_INVALID;
4372	}
4373
4374	return 0;
4375}
4376
4377/**
4378 * megasas_free_cmds -	Free all the cmds in the free cmd pool
4379 * @instance:		Adapter soft state
4380 */
4381void megasas_free_cmds(struct megasas_instance *instance)
4382{
4383	int i;
4384
4385	/* First free the MFI frame pool */
4386	megasas_teardown_frame_pool(instance);
4387
4388	/* Free all the commands in the cmd_list */
4389	for (i = 0; i < instance->max_mfi_cmds; i++)
4390
4391		kfree(instance->cmd_list[i]);
4392
4393	/* Free the cmd_list buffer itself */
4394	kfree(instance->cmd_list);
4395	instance->cmd_list = NULL;
4396
4397	INIT_LIST_HEAD(&instance->cmd_pool);
4398}
4399
4400/**
4401 * megasas_alloc_cmds -	Allocates the command packets
4402 * @instance:		Adapter soft state
4403 *
4404 * Each command that is issued to the FW, whether IO commands from the OS or
4405 * internal commands like IOCTLs, are wrapped in local data structure called
4406 * megasas_cmd. The frame embedded in this megasas_cmd is actually issued to
4407 * the FW.
4408 *
4409 * Each frame has a 32-bit field called context (tag). This context is used
4410 * to get back the megasas_cmd from the frame when a frame gets completed in
4411 * the ISR. Typically the address of the megasas_cmd itself would be used as
4412 * the context. But we wanted to keep the differences between 32 and 64 bit
4413 * systems to the mininum. We always use 32 bit integers for the context. In
4414 * this driver, the 32 bit values are the indices into an array cmd_list.
4415 * This array is used only to look up the megasas_cmd given the context. The
4416 * free commands themselves are maintained in a linked list called cmd_pool.
4417 */
4418int megasas_alloc_cmds(struct megasas_instance *instance)
4419{
4420	int i;
4421	int j;
4422	u16 max_cmd;
4423	struct megasas_cmd *cmd;
4424
4425	max_cmd = instance->max_mfi_cmds;
4426
4427	/*
4428	 * instance->cmd_list is an array of struct megasas_cmd pointers.
4429	 * Allocate the dynamic array first and then allocate individual
4430	 * commands.
4431	 */
4432	instance->cmd_list = kcalloc(max_cmd, sizeof(struct megasas_cmd*), GFP_KERNEL);
4433
4434	if (!instance->cmd_list) {
4435		dev_printk(KERN_DEBUG, &instance->pdev->dev, "out of memory\n");
4436		return -ENOMEM;
4437	}
4438
4439	memset(instance->cmd_list, 0, sizeof(struct megasas_cmd *) *max_cmd);
4440
4441	for (i = 0; i < max_cmd; i++) {
4442		instance->cmd_list[i] = kmalloc(sizeof(struct megasas_cmd),
4443						GFP_KERNEL);
4444
4445		if (!instance->cmd_list[i]) {
4446
4447			for (j = 0; j < i; j++)
4448				kfree(instance->cmd_list[j]);
4449
4450			kfree(instance->cmd_list);
4451			instance->cmd_list = NULL;
4452
4453			return -ENOMEM;
4454		}
4455	}
4456
4457	for (i = 0; i < max_cmd; i++) {
4458		cmd = instance->cmd_list[i];
4459		memset(cmd, 0, sizeof(struct megasas_cmd));
4460		cmd->index = i;
4461		cmd->scmd = NULL;
4462		cmd->instance = instance;
4463
4464		list_add_tail(&cmd->list, &instance->cmd_pool);
4465	}
4466
4467	/*
4468	 * Create a frame pool and assign one frame to each cmd
4469	 */
4470	if (megasas_create_frame_pool(instance)) {
4471		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
4472		megasas_free_cmds(instance);
4473		return -ENOMEM;
4474	}
4475
4476	return 0;
4477}
4478
4479/*
4480 * dcmd_timeout_ocr_possible -	Check if OCR is possible based on Driver/FW state.
4481 * @instance:				Adapter soft state
4482 *
4483 * Return 0 for only Fusion adapter, if driver load/unload is not in progress
4484 * or FW is not under OCR.
4485 */
4486inline int
4487dcmd_timeout_ocr_possible(struct megasas_instance *instance) {
4488
4489	if (instance->adapter_type == MFI_SERIES)
4490		return KILL_ADAPTER;
4491	else if (instance->unload ||
4492			test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE,
4493				 &instance->reset_flags))
4494		return IGNORE_TIMEOUT;
4495	else
4496		return INITIATE_OCR;
4497}
4498
4499static void
4500megasas_get_pd_info(struct megasas_instance *instance, struct scsi_device *sdev)
4501{
4502	int ret;
4503	struct megasas_cmd *cmd;
4504	struct megasas_dcmd_frame *dcmd;
4505
4506	struct MR_PRIV_DEVICE *mr_device_priv_data;
4507	u16 device_id = 0;
4508
4509	device_id = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id;
4510	cmd = megasas_get_cmd(instance);
4511
4512	if (!cmd) {
4513		dev_err(&instance->pdev->dev, "Failed to get cmd %s\n", __func__);
4514		return;
4515	}
4516
4517	dcmd = &cmd->frame->dcmd;
4518
4519	memset(instance->pd_info, 0, sizeof(*instance->pd_info));
4520	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4521
4522	dcmd->mbox.s[0] = cpu_to_le16(device_id);
4523	dcmd->cmd = MFI_CMD_DCMD;
4524	dcmd->cmd_status = 0xFF;
4525	dcmd->sge_count = 1;
4526	dcmd->flags = MFI_FRAME_DIR_READ;
4527	dcmd->timeout = 0;
4528	dcmd->pad_0 = 0;
4529	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_PD_INFO));
4530	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_GET_INFO);
4531
4532	megasas_set_dma_settings(instance, dcmd, instance->pd_info_h,
4533				 sizeof(struct MR_PD_INFO));
4534
4535	if ((instance->adapter_type != MFI_SERIES) &&
4536	    !instance->mask_interrupts)
4537		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4538	else
4539		ret = megasas_issue_polled(instance, cmd);
4540
4541	switch (ret) {
4542	case DCMD_SUCCESS:
4543		mr_device_priv_data = sdev->hostdata;
4544		le16_to_cpus((u16 *)&instance->pd_info->state.ddf.pdType);
4545		mr_device_priv_data->interface_type =
4546				instance->pd_info->state.ddf.pdType.intf;
4547		break;
4548
4549	case DCMD_TIMEOUT:
4550
4551		switch (dcmd_timeout_ocr_possible(instance)) {
4552		case INITIATE_OCR:
4553			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4554			mutex_unlock(&instance->reset_mutex);
4555			megasas_reset_fusion(instance->host,
4556				MFI_IO_TIMEOUT_OCR);
4557			mutex_lock(&instance->reset_mutex);
4558			break;
4559		case KILL_ADAPTER:
4560			megaraid_sas_kill_hba(instance);
4561			break;
4562		case IGNORE_TIMEOUT:
4563			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4564				__func__, __LINE__);
4565			break;
4566		}
4567
4568		break;
4569	}
4570
4571	if (ret != DCMD_TIMEOUT)
4572		megasas_return_cmd(instance, cmd);
4573
4574	return;
4575}
4576/*
4577 * megasas_get_pd_list_info -	Returns FW's pd_list structure
4578 * @instance:				Adapter soft state
4579 * @pd_list:				pd_list structure
4580 *
4581 * Issues an internal command (DCMD) to get the FW's controller PD
4582 * list structure.  This information is mainly used to find out SYSTEM
4583 * supported by the FW.
4584 */
4585static int
4586megasas_get_pd_list(struct megasas_instance *instance)
4587{
4588	int ret = 0, pd_index = 0;
4589	struct megasas_cmd *cmd;
4590	struct megasas_dcmd_frame *dcmd;
4591	struct MR_PD_LIST *ci;
4592	struct MR_PD_ADDRESS *pd_addr;
4593
4594	if (instance->pd_list_not_supported) {
4595		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4596		"not supported by firmware\n");
4597		return ret;
4598	}
4599
4600	ci = instance->pd_list_buf;
4601
4602	cmd = megasas_get_cmd(instance);
4603
4604	if (!cmd) {
4605		dev_printk(KERN_DEBUG, &instance->pdev->dev, "(get_pd_list): Failed to get cmd\n");
4606		return -ENOMEM;
4607	}
4608
4609	dcmd = &cmd->frame->dcmd;
4610
4611	memset(ci, 0, sizeof(*ci));
4612	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4613
4614	dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST;
4615	dcmd->mbox.b[1] = 0;
4616	dcmd->cmd = MFI_CMD_DCMD;
4617	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4618	dcmd->sge_count = 1;
4619	dcmd->flags = MFI_FRAME_DIR_READ;
4620	dcmd->timeout = 0;
4621	dcmd->pad_0 = 0;
4622	dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
4623	dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY);
4624
4625	megasas_set_dma_settings(instance, dcmd, instance->pd_list_buf_h,
4626				 (MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)));
4627
4628	if ((instance->adapter_type != MFI_SERIES) &&
4629	    !instance->mask_interrupts)
4630		ret = megasas_issue_blocked_cmd(instance, cmd,
4631			MFI_IO_TIMEOUT_SECS);
4632	else
4633		ret = megasas_issue_polled(instance, cmd);
4634
4635	switch (ret) {
4636	case DCMD_FAILED:
4637		dev_info(&instance->pdev->dev, "MR_DCMD_PD_LIST_QUERY "
4638			"failed/not supported by firmware\n");
4639
4640		if (instance->adapter_type != MFI_SERIES)
4641			megaraid_sas_kill_hba(instance);
4642		else
4643			instance->pd_list_not_supported = 1;
4644		break;
4645	case DCMD_TIMEOUT:
4646
4647		switch (dcmd_timeout_ocr_possible(instance)) {
4648		case INITIATE_OCR:
4649			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4650			/*
4651			 * DCMD failed from AEN path.
4652			 * AEN path already hold reset_mutex to avoid PCI access
4653			 * while OCR is in progress.
4654			 */
4655			mutex_unlock(&instance->reset_mutex);
4656			megasas_reset_fusion(instance->host,
4657						MFI_IO_TIMEOUT_OCR);
4658			mutex_lock(&instance->reset_mutex);
4659			break;
4660		case KILL_ADAPTER:
4661			megaraid_sas_kill_hba(instance);
4662			break;
4663		case IGNORE_TIMEOUT:
4664			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d \n",
4665				__func__, __LINE__);
4666			break;
4667		}
4668
4669		break;
4670
4671	case DCMD_SUCCESS:
4672		pd_addr = ci->addr;
4673		if (megasas_dbg_lvl & LD_PD_DEBUG)
4674			dev_info(&instance->pdev->dev, "%s, sysPD count: 0x%x\n",
4675				 __func__, le32_to_cpu(ci->count));
4676
4677		if ((le32_to_cpu(ci->count) >
4678			(MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL)))
4679			break;
4680
4681		memset(instance->local_pd_list, 0,
4682				MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
4683
4684		for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) {
4685			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].tid	=
4686					le16_to_cpu(pd_addr->deviceId);
4687			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveType	=
4688					pd_addr->scsiDevType;
4689			instance->local_pd_list[le16_to_cpu(pd_addr->deviceId)].driveState	=
4690					MR_PD_STATE_SYSTEM;
4691			if (megasas_dbg_lvl & LD_PD_DEBUG)
4692				dev_info(&instance->pdev->dev,
4693					 "PD%d: targetID: 0x%03x deviceType:0x%x\n",
4694					 pd_index, le16_to_cpu(pd_addr->deviceId),
4695					 pd_addr->scsiDevType);
4696			pd_addr++;
4697		}
4698
4699		memcpy(instance->pd_list, instance->local_pd_list,
4700			sizeof(instance->pd_list));
4701		break;
4702
4703	}
4704
4705	if (ret != DCMD_TIMEOUT)
4706		megasas_return_cmd(instance, cmd);
4707
4708	return ret;
4709}
4710
4711/*
4712 * megasas_get_ld_list_info -	Returns FW's ld_list structure
4713 * @instance:				Adapter soft state
4714 * @ld_list:				ld_list structure
4715 *
4716 * Issues an internal command (DCMD) to get the FW's controller PD
4717 * list structure.  This information is mainly used to find out SYSTEM
4718 * supported by the FW.
4719 */
4720static int
4721megasas_get_ld_list(struct megasas_instance *instance)
4722{
4723	int ret = 0, ld_index = 0, ids = 0;
4724	struct megasas_cmd *cmd;
4725	struct megasas_dcmd_frame *dcmd;
4726	struct MR_LD_LIST *ci;
4727	dma_addr_t ci_h = 0;
4728	u32 ld_count;
4729
4730	ci = instance->ld_list_buf;
4731	ci_h = instance->ld_list_buf_h;
4732
4733	cmd = megasas_get_cmd(instance);
4734
4735	if (!cmd) {
4736		dev_printk(KERN_DEBUG, &instance->pdev->dev, "megasas_get_ld_list: Failed to get cmd\n");
4737		return -ENOMEM;
4738	}
4739
4740	dcmd = &cmd->frame->dcmd;
4741
4742	memset(ci, 0, sizeof(*ci));
4743	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4744
4745	if (instance->supportmax256vd)
4746		dcmd->mbox.b[0] = 1;
4747	dcmd->cmd = MFI_CMD_DCMD;
4748	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4749	dcmd->sge_count = 1;
4750	dcmd->flags = MFI_FRAME_DIR_READ;
4751	dcmd->timeout = 0;
4752	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST));
4753	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST);
4754	dcmd->pad_0  = 0;
4755
4756	megasas_set_dma_settings(instance, dcmd, ci_h,
4757				 sizeof(struct MR_LD_LIST));
4758
4759	if ((instance->adapter_type != MFI_SERIES) &&
4760	    !instance->mask_interrupts)
4761		ret = megasas_issue_blocked_cmd(instance, cmd,
4762			MFI_IO_TIMEOUT_SECS);
4763	else
4764		ret = megasas_issue_polled(instance, cmd);
4765
4766	ld_count = le32_to_cpu(ci->ldCount);
4767
4768	switch (ret) {
4769	case DCMD_FAILED:
4770		megaraid_sas_kill_hba(instance);
4771		break;
4772	case DCMD_TIMEOUT:
4773
4774		switch (dcmd_timeout_ocr_possible(instance)) {
4775		case INITIATE_OCR:
4776			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4777			/*
4778			 * DCMD failed from AEN path.
4779			 * AEN path already hold reset_mutex to avoid PCI access
4780			 * while OCR is in progress.
4781			 */
4782			mutex_unlock(&instance->reset_mutex);
4783			megasas_reset_fusion(instance->host,
4784						MFI_IO_TIMEOUT_OCR);
4785			mutex_lock(&instance->reset_mutex);
4786			break;
4787		case KILL_ADAPTER:
4788			megaraid_sas_kill_hba(instance);
4789			break;
4790		case IGNORE_TIMEOUT:
4791			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4792				__func__, __LINE__);
4793			break;
4794		}
4795
4796		break;
4797
4798	case DCMD_SUCCESS:
4799		if (megasas_dbg_lvl & LD_PD_DEBUG)
4800			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4801				 __func__, ld_count);
4802
4803		if (ld_count > instance->fw_supported_vd_count)
4804			break;
4805
4806		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
4807
4808		for (ld_index = 0; ld_index < ld_count; ld_index++) {
4809			if (ci->ldList[ld_index].state != 0) {
4810				ids = ci->ldList[ld_index].ref.targetId;
4811				instance->ld_ids[ids] = ci->ldList[ld_index].ref.targetId;
4812				if (megasas_dbg_lvl & LD_PD_DEBUG)
4813					dev_info(&instance->pdev->dev,
4814						 "LD%d: targetID: 0x%03x\n",
4815						 ld_index, ids);
4816			}
4817		}
4818
4819		break;
4820	}
4821
4822	if (ret != DCMD_TIMEOUT)
4823		megasas_return_cmd(instance, cmd);
4824
4825	return ret;
4826}
4827
4828/**
4829 * megasas_ld_list_query -	Returns FW's ld_list structure
4830 * @instance:				Adapter soft state
4831 * @query_type:				ld_list structure type
4832 *
4833 * Issues an internal command (DCMD) to get the FW's controller PD
4834 * list structure.  This information is mainly used to find out SYSTEM
4835 * supported by the FW.
4836 */
4837static int
4838megasas_ld_list_query(struct megasas_instance *instance, u8 query_type)
4839{
4840	int ret = 0, ld_index = 0, ids = 0;
4841	struct megasas_cmd *cmd;
4842	struct megasas_dcmd_frame *dcmd;
4843	struct MR_LD_TARGETID_LIST *ci;
4844	dma_addr_t ci_h = 0;
4845	u32 tgtid_count;
4846
4847	ci = instance->ld_targetid_list_buf;
4848	ci_h = instance->ld_targetid_list_buf_h;
4849
4850	cmd = megasas_get_cmd(instance);
4851
4852	if (!cmd) {
4853		dev_warn(&instance->pdev->dev,
4854		         "megasas_ld_list_query: Failed to get cmd\n");
4855		return -ENOMEM;
4856	}
4857
4858	dcmd = &cmd->frame->dcmd;
4859
4860	memset(ci, 0, sizeof(*ci));
4861	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4862
4863	dcmd->mbox.b[0] = query_type;
4864	if (instance->supportmax256vd)
4865		dcmd->mbox.b[2] = 1;
4866
4867	dcmd->cmd = MFI_CMD_DCMD;
4868	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4869	dcmd->sge_count = 1;
4870	dcmd->flags = MFI_FRAME_DIR_READ;
4871	dcmd->timeout = 0;
4872	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
4873	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY);
4874	dcmd->pad_0  = 0;
4875
4876	megasas_set_dma_settings(instance, dcmd, ci_h,
4877				 sizeof(struct MR_LD_TARGETID_LIST));
4878
4879	if ((instance->adapter_type != MFI_SERIES) &&
4880	    !instance->mask_interrupts)
4881		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
4882	else
4883		ret = megasas_issue_polled(instance, cmd);
4884
4885	switch (ret) {
4886	case DCMD_FAILED:
4887		dev_info(&instance->pdev->dev,
4888			"DCMD not supported by firmware - %s %d\n",
4889				__func__, __LINE__);
4890		ret = megasas_get_ld_list(instance);
4891		break;
4892	case DCMD_TIMEOUT:
4893		switch (dcmd_timeout_ocr_possible(instance)) {
4894		case INITIATE_OCR:
4895			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
4896			/*
4897			 * DCMD failed from AEN path.
4898			 * AEN path already hold reset_mutex to avoid PCI access
4899			 * while OCR is in progress.
4900			 */
4901			mutex_unlock(&instance->reset_mutex);
4902			megasas_reset_fusion(instance->host,
4903						MFI_IO_TIMEOUT_OCR);
4904			mutex_lock(&instance->reset_mutex);
4905			break;
4906		case KILL_ADAPTER:
4907			megaraid_sas_kill_hba(instance);
4908			break;
4909		case IGNORE_TIMEOUT:
4910			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
4911				__func__, __LINE__);
4912			break;
4913		}
4914
4915		break;
4916	case DCMD_SUCCESS:
4917		tgtid_count = le32_to_cpu(ci->count);
4918
4919		if (megasas_dbg_lvl & LD_PD_DEBUG)
4920			dev_info(&instance->pdev->dev, "%s, LD count: 0x%x\n",
4921				 __func__, tgtid_count);
4922
4923		if ((tgtid_count > (instance->fw_supported_vd_count)))
4924			break;
4925
4926		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
4927		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
4928			ids = ci->targetId[ld_index];
4929			instance->ld_ids[ids] = ci->targetId[ld_index];
4930			if (megasas_dbg_lvl & LD_PD_DEBUG)
4931				dev_info(&instance->pdev->dev, "LD%d: targetID: 0x%03x\n",
4932					 ld_index, ci->targetId[ld_index]);
4933		}
4934
4935		break;
4936	}
4937
4938	if (ret != DCMD_TIMEOUT)
4939		megasas_return_cmd(instance, cmd);
4940
4941	return ret;
4942}
4943
4944/**
4945 * dcmd.opcode            - MR_DCMD_CTRL_DEVICE_LIST_GET
4946 * dcmd.mbox              - reserved
4947 * dcmd.sge IN            - ptr to return MR_HOST_DEVICE_LIST structure
4948 * Desc:    This DCMD will return the combined device list
4949 * Status:  MFI_STAT_OK - List returned successfully
4950 *          MFI_STAT_INVALID_CMD - Firmware support for the feature has been
4951 *                                 disabled
4952 * @instance:			Adapter soft state
4953 * @is_probe:			Driver probe check
4954 * Return:			0 if DCMD succeeded
4955 *				 non-zero if failed
4956 */
4957static int
4958megasas_host_device_list_query(struct megasas_instance *instance,
4959			       bool is_probe)
4960{
4961	int ret, i, target_id;
4962	struct megasas_cmd *cmd;
4963	struct megasas_dcmd_frame *dcmd;
4964	struct MR_HOST_DEVICE_LIST *ci;
4965	u32 count;
4966	dma_addr_t ci_h;
4967
4968	ci = instance->host_device_list_buf;
4969	ci_h = instance->host_device_list_buf_h;
4970
4971	cmd = megasas_get_cmd(instance);
4972
4973	if (!cmd) {
4974		dev_warn(&instance->pdev->dev,
4975			 "%s: failed to get cmd\n",
4976			 __func__);
4977		return -ENOMEM;
4978	}
4979
4980	dcmd = &cmd->frame->dcmd;
4981
4982	memset(ci, 0, sizeof(*ci));
4983	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
4984
4985	dcmd->mbox.b[0] = is_probe ? 0 : 1;
4986	dcmd->cmd = MFI_CMD_DCMD;
4987	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
4988	dcmd->sge_count = 1;
4989	dcmd->flags = MFI_FRAME_DIR_READ;
4990	dcmd->timeout = 0;
4991	dcmd->pad_0 = 0;
4992	dcmd->data_xfer_len = cpu_to_le32(HOST_DEVICE_LIST_SZ);
4993	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_DEVICE_LIST_GET);
4994
4995	megasas_set_dma_settings(instance, dcmd, ci_h, HOST_DEVICE_LIST_SZ);
4996
4997	if (!instance->mask_interrupts) {
4998		ret = megasas_issue_blocked_cmd(instance, cmd,
4999						MFI_IO_TIMEOUT_SECS);
5000	} else {
5001		ret = megasas_issue_polled(instance, cmd);
5002		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5003	}
5004
5005	switch (ret) {
5006	case DCMD_SUCCESS:
5007		/* Fill the internal pd_list and ld_ids array based on
5008		 * targetIds returned by FW
5009		 */
5010		count = le32_to_cpu(ci->count);
5011
5012		if (count > (MEGASAS_MAX_PD + MAX_LOGICAL_DRIVES_EXT))
5013			break;
5014
5015		if (megasas_dbg_lvl & LD_PD_DEBUG)
5016			dev_info(&instance->pdev->dev, "%s, Device count: 0x%x\n",
5017				 __func__, count);
5018
5019		memset(instance->local_pd_list, 0,
5020		       MEGASAS_MAX_PD * sizeof(struct megasas_pd_list));
5021		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
5022		for (i = 0; i < count; i++) {
5023			target_id = le16_to_cpu(ci->host_device_list[i].target_id);
5024			if (ci->host_device_list[i].flags.u.bits.is_sys_pd) {
5025				instance->local_pd_list[target_id].tid = target_id;
5026				instance->local_pd_list[target_id].driveType =
5027						ci->host_device_list[i].scsi_type;
5028				instance->local_pd_list[target_id].driveState =
5029						MR_PD_STATE_SYSTEM;
5030				if (megasas_dbg_lvl & LD_PD_DEBUG)
5031					dev_info(&instance->pdev->dev,
5032						 "Device %d: PD targetID: 0x%03x deviceType:0x%x\n",
5033						 i, target_id, ci->host_device_list[i].scsi_type);
5034			} else {
5035				instance->ld_ids[target_id] = target_id;
5036				if (megasas_dbg_lvl & LD_PD_DEBUG)
5037					dev_info(&instance->pdev->dev,
5038						 "Device %d: LD targetID: 0x%03x\n",
5039						 i, target_id);
5040			}
5041		}
5042
5043		memcpy(instance->pd_list, instance->local_pd_list,
5044		       sizeof(instance->pd_list));
5045		break;
5046
5047	case DCMD_TIMEOUT:
5048		switch (dcmd_timeout_ocr_possible(instance)) {
5049		case INITIATE_OCR:
5050			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5051			mutex_unlock(&instance->reset_mutex);
5052			megasas_reset_fusion(instance->host,
5053				MFI_IO_TIMEOUT_OCR);
5054			mutex_lock(&instance->reset_mutex);
5055			break;
5056		case KILL_ADAPTER:
5057			megaraid_sas_kill_hba(instance);
5058			break;
5059		case IGNORE_TIMEOUT:
5060			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5061				 __func__, __LINE__);
5062			break;
5063		}
5064		break;
5065	case DCMD_FAILED:
5066		dev_err(&instance->pdev->dev,
5067			"%s: MR_DCMD_CTRL_DEVICE_LIST_GET failed\n",
5068			__func__);
5069		break;
5070	}
5071
5072	if (ret != DCMD_TIMEOUT)
5073		megasas_return_cmd(instance, cmd);
5074
5075	return ret;
5076}
5077
5078/*
5079 * megasas_update_ext_vd_details : Update details w.r.t Extended VD
5080 * instance			 : Controller's instance
5081*/
5082static void megasas_update_ext_vd_details(struct megasas_instance *instance)
5083{
5084	struct fusion_context *fusion;
5085	u32 ventura_map_sz = 0;
5086
5087	fusion = instance->ctrl_context;
5088	/* For MFI based controllers return dummy success */
5089	if (!fusion)
5090		return;
5091
5092	instance->supportmax256vd =
5093		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs;
5094	/* Below is additional check to address future FW enhancement */
5095	if (instance->ctrl_info_buf->max_lds > 64)
5096		instance->supportmax256vd = 1;
5097
5098	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
5099					* MEGASAS_MAX_DEV_PER_CHANNEL;
5100	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
5101					* MEGASAS_MAX_DEV_PER_CHANNEL;
5102	if (instance->supportmax256vd) {
5103		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
5104		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5105	} else {
5106		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
5107		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
5108	}
5109
5110	dev_info(&instance->pdev->dev,
5111		"FW provided supportMaxExtLDs: %d\tmax_lds: %d\n",
5112		instance->ctrl_info_buf->adapterOperations3.supportMaxExtLDs ? 1 : 0,
5113		instance->ctrl_info_buf->max_lds);
5114
5115	if (instance->max_raid_mapsize) {
5116		ventura_map_sz = instance->max_raid_mapsize *
5117						MR_MIN_MAP_SIZE; /* 64k */
5118		fusion->current_map_sz = ventura_map_sz;
5119		fusion->max_map_sz = ventura_map_sz;
5120	} else {
5121		fusion->old_map_sz =  sizeof(struct MR_FW_RAID_MAP) +
5122					(sizeof(struct MR_LD_SPAN_MAP) *
5123					(instance->fw_supported_vd_count - 1));
5124		fusion->new_map_sz =  sizeof(struct MR_FW_RAID_MAP_EXT);
5125
5126		fusion->max_map_sz =
5127			max(fusion->old_map_sz, fusion->new_map_sz);
5128
5129		if (instance->supportmax256vd)
5130			fusion->current_map_sz = fusion->new_map_sz;
5131		else
5132			fusion->current_map_sz = fusion->old_map_sz;
5133	}
5134	/* irrespective of FW raid maps, driver raid map is constant */
5135	fusion->drv_map_sz = sizeof(struct MR_DRV_RAID_MAP_ALL);
5136}
5137
5138/*
5139 * dcmd.opcode                - MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES
5140 * dcmd.hdr.length            - number of bytes to read
5141 * dcmd.sge                   - Ptr to MR_SNAPDUMP_PROPERTIES
5142 * Desc:			 Fill in snapdump properties
5143 * Status:			 MFI_STAT_OK- Command successful
5144 */
5145void megasas_get_snapdump_properties(struct megasas_instance *instance)
5146{
5147	int ret = 0;
5148	struct megasas_cmd *cmd;
5149	struct megasas_dcmd_frame *dcmd;
5150	struct MR_SNAPDUMP_PROPERTIES *ci;
5151	dma_addr_t ci_h = 0;
5152
5153	ci = instance->snapdump_prop;
5154	ci_h = instance->snapdump_prop_h;
5155
5156	if (!ci)
5157		return;
5158
5159	cmd = megasas_get_cmd(instance);
5160
5161	if (!cmd) {
5162		dev_dbg(&instance->pdev->dev, "Failed to get a free cmd\n");
5163		return;
5164	}
5165
5166	dcmd = &cmd->frame->dcmd;
5167
5168	memset(ci, 0, sizeof(*ci));
5169	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5170
5171	dcmd->cmd = MFI_CMD_DCMD;
5172	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5173	dcmd->sge_count = 1;
5174	dcmd->flags = MFI_FRAME_DIR_READ;
5175	dcmd->timeout = 0;
5176	dcmd->pad_0 = 0;
5177	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_SNAPDUMP_PROPERTIES));
5178	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SNAPDUMP_GET_PROPERTIES);
5179
5180	megasas_set_dma_settings(instance, dcmd, ci_h,
5181				 sizeof(struct MR_SNAPDUMP_PROPERTIES));
5182
5183	if (!instance->mask_interrupts) {
5184		ret = megasas_issue_blocked_cmd(instance, cmd,
5185						MFI_IO_TIMEOUT_SECS);
5186	} else {
5187		ret = megasas_issue_polled(instance, cmd);
5188		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5189	}
5190
5191	switch (ret) {
5192	case DCMD_SUCCESS:
5193		instance->snapdump_wait_time =
5194			min_t(u8, ci->trigger_min_num_sec_before_ocr,
5195				MEGASAS_MAX_SNAP_DUMP_WAIT_TIME);
5196		break;
5197
5198	case DCMD_TIMEOUT:
5199		switch (dcmd_timeout_ocr_possible(instance)) {
5200		case INITIATE_OCR:
5201			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5202			mutex_unlock(&instance->reset_mutex);
5203			megasas_reset_fusion(instance->host,
5204				MFI_IO_TIMEOUT_OCR);
5205			mutex_lock(&instance->reset_mutex);
5206			break;
5207		case KILL_ADAPTER:
5208			megaraid_sas_kill_hba(instance);
5209			break;
5210		case IGNORE_TIMEOUT:
5211			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5212				__func__, __LINE__);
5213			break;
5214		}
5215	}
5216
5217	if (ret != DCMD_TIMEOUT)
5218		megasas_return_cmd(instance, cmd);
5219}
5220
5221/**
5222 * megasas_get_controller_info -	Returns FW's controller structure
5223 * @instance:				Adapter soft state
5224 *
5225 * Issues an internal command (DCMD) to get the FW's controller structure.
5226 * This information is mainly used to find out the maximum IO transfer per
5227 * command supported by the FW.
5228 */
5229int
5230megasas_get_ctrl_info(struct megasas_instance *instance)
5231{
5232	int ret = 0;
5233	struct megasas_cmd *cmd;
5234	struct megasas_dcmd_frame *dcmd;
5235	struct megasas_ctrl_info *ci;
5236	dma_addr_t ci_h = 0;
5237
5238	ci = instance->ctrl_info_buf;
5239	ci_h = instance->ctrl_info_buf_h;
5240
5241	cmd = megasas_get_cmd(instance);
5242
5243	if (!cmd) {
5244		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a free cmd\n");
5245		return -ENOMEM;
5246	}
5247
5248	dcmd = &cmd->frame->dcmd;
5249
5250	memset(ci, 0, sizeof(*ci));
5251	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5252
5253	dcmd->cmd = MFI_CMD_DCMD;
5254	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5255	dcmd->sge_count = 1;
5256	dcmd->flags = MFI_FRAME_DIR_READ;
5257	dcmd->timeout = 0;
5258	dcmd->pad_0 = 0;
5259	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info));
5260	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
5261	dcmd->mbox.b[0] = 1;
5262
5263	megasas_set_dma_settings(instance, dcmd, ci_h,
5264				 sizeof(struct megasas_ctrl_info));
5265
5266	if ((instance->adapter_type != MFI_SERIES) &&
5267	    !instance->mask_interrupts) {
5268		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5269	} else {
5270		ret = megasas_issue_polled(instance, cmd);
5271		cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5272	}
5273
5274	switch (ret) {
5275	case DCMD_SUCCESS:
5276		/* Save required controller information in
5277		 * CPU endianness format.
5278		 */
5279		le32_to_cpus((u32 *)&ci->properties.OnOffProperties);
5280		le16_to_cpus((u16 *)&ci->properties.on_off_properties2);
5281		le32_to_cpus((u32 *)&ci->adapterOperations2);
5282		le32_to_cpus((u32 *)&ci->adapterOperations3);
5283		le16_to_cpus((u16 *)&ci->adapter_operations4);
5284		le32_to_cpus((u32 *)&ci->adapter_operations5);
5285
5286		/* Update the latest Ext VD info.
5287		 * From Init path, store current firmware details.
5288		 * From OCR path, detect any firmware properties changes.
5289		 * in case of Firmware upgrade without system reboot.
5290		 */
5291		megasas_update_ext_vd_details(instance);
5292		instance->support_seqnum_jbod_fp =
5293			ci->adapterOperations3.useSeqNumJbodFP;
5294		instance->support_morethan256jbod =
5295			ci->adapter_operations4.support_pd_map_target_id;
5296		instance->support_nvme_passthru =
5297			ci->adapter_operations4.support_nvme_passthru;
5298		instance->support_pci_lane_margining =
5299			ci->adapter_operations5.support_pci_lane_margining;
5300		instance->task_abort_tmo = ci->TaskAbortTO;
5301		instance->max_reset_tmo = ci->MaxResetTO;
5302
5303		/*Check whether controller is iMR or MR */
5304		instance->is_imr = (ci->memory_size ? 0 : 1);
5305
5306		instance->snapdump_wait_time =
5307			(ci->properties.on_off_properties2.enable_snap_dump ?
5308			 MEGASAS_DEFAULT_SNAP_DUMP_WAIT_TIME : 0);
5309
5310		instance->enable_fw_dev_list =
5311			ci->properties.on_off_properties2.enable_fw_dev_list;
5312
5313		dev_info(&instance->pdev->dev,
5314			"controller type\t: %s(%dMB)\n",
5315			instance->is_imr ? "iMR" : "MR",
5316			le16_to_cpu(ci->memory_size));
5317
5318		instance->disableOnlineCtrlReset =
5319			ci->properties.OnOffProperties.disableOnlineCtrlReset;
5320		instance->secure_jbod_support =
5321			ci->adapterOperations3.supportSecurityonJBOD;
5322		dev_info(&instance->pdev->dev, "Online Controller Reset(OCR)\t: %s\n",
5323			instance->disableOnlineCtrlReset ? "Disabled" : "Enabled");
5324		dev_info(&instance->pdev->dev, "Secure JBOD support\t: %s\n",
5325			instance->secure_jbod_support ? "Yes" : "No");
5326		dev_info(&instance->pdev->dev, "NVMe passthru support\t: %s\n",
5327			 instance->support_nvme_passthru ? "Yes" : "No");
5328		dev_info(&instance->pdev->dev,
5329			 "FW provided TM TaskAbort/Reset timeout\t: %d secs/%d secs\n",
5330			 instance->task_abort_tmo, instance->max_reset_tmo);
5331		dev_info(&instance->pdev->dev, "JBOD sequence map support\t: %s\n",
5332			 instance->support_seqnum_jbod_fp ? "Yes" : "No");
5333		dev_info(&instance->pdev->dev, "PCI Lane Margining support\t: %s\n",
5334			 instance->support_pci_lane_margining ? "Yes" : "No");
5335
5336		break;
5337
5338	case DCMD_TIMEOUT:
5339		switch (dcmd_timeout_ocr_possible(instance)) {
5340		case INITIATE_OCR:
5341			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5342			mutex_unlock(&instance->reset_mutex);
5343			megasas_reset_fusion(instance->host,
5344				MFI_IO_TIMEOUT_OCR);
5345			mutex_lock(&instance->reset_mutex);
5346			break;
5347		case KILL_ADAPTER:
5348			megaraid_sas_kill_hba(instance);
5349			break;
5350		case IGNORE_TIMEOUT:
5351			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5352				__func__, __LINE__);
5353			break;
5354		}
5355		break;
5356	case DCMD_FAILED:
5357		megaraid_sas_kill_hba(instance);
5358		break;
5359
5360	}
5361
5362	if (ret != DCMD_TIMEOUT)
5363		megasas_return_cmd(instance, cmd);
5364
5365	return ret;
5366}
5367
5368/*
5369 * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
5370 *					to firmware
5371 *
5372 * @instance:				Adapter soft state
5373 * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
5374					MR_CRASH_BUF_TURN_OFF = 0
5375					MR_CRASH_BUF_TURN_ON = 1
5376 * @return 0 on success non-zero on failure.
5377 * Issues an internal command (DCMD) to set parameters for crash dump feature.
5378 * Driver will send address of crash dump DMA buffer and set mbox to tell FW
5379 * that driver supports crash dump feature. This DCMD will be sent only if
5380 * crash dump feature is supported by the FW.
5381 *
5382 */
5383int megasas_set_crash_dump_params(struct megasas_instance *instance,
5384	u8 crash_buf_state)
5385{
5386	int ret = 0;
5387	struct megasas_cmd *cmd;
5388	struct megasas_dcmd_frame *dcmd;
5389
5390	cmd = megasas_get_cmd(instance);
5391
5392	if (!cmd) {
5393		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
5394		return -ENOMEM;
5395	}
5396
5397
5398	dcmd = &cmd->frame->dcmd;
5399
5400	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
5401	dcmd->mbox.b[0] = crash_buf_state;
5402	dcmd->cmd = MFI_CMD_DCMD;
5403	dcmd->cmd_status = MFI_STAT_INVALID_STATUS;
5404	dcmd->sge_count = 1;
5405	dcmd->flags = MFI_FRAME_DIR_NONE;
5406	dcmd->timeout = 0;
5407	dcmd->pad_0 = 0;
5408	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
5409	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
5410
5411	megasas_set_dma_settings(instance, dcmd, instance->crash_dump_h,
5412				 CRASH_DMA_BUF_SIZE);
5413
5414	if ((instance->adapter_type != MFI_SERIES) &&
5415	    !instance->mask_interrupts)
5416		ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
5417	else
5418		ret = megasas_issue_polled(instance, cmd);
5419
5420	if (ret == DCMD_TIMEOUT) {
5421		switch (dcmd_timeout_ocr_possible(instance)) {
5422		case INITIATE_OCR:
5423			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
5424			megasas_reset_fusion(instance->host,
5425					MFI_IO_TIMEOUT_OCR);
5426			break;
5427		case KILL_ADAPTER:
5428			megaraid_sas_kill_hba(instance);
5429			break;
5430		case IGNORE_TIMEOUT:
5431			dev_info(&instance->pdev->dev, "Ignore DCMD timeout: %s %d\n",
5432				__func__, __LINE__);
5433			break;
5434		}
5435	} else
5436		megasas_return_cmd(instance, cmd);
5437
5438	return ret;
5439}
5440
5441/**
5442 * megasas_issue_init_mfi -	Initializes the FW
5443 * @instance:		Adapter soft state
5444 *
5445 * Issues the INIT MFI cmd
5446 */
5447static int
5448megasas_issue_init_mfi(struct megasas_instance *instance)
5449{
5450	__le32 context;
5451	struct megasas_cmd *cmd;
5452	struct megasas_init_frame *init_frame;
5453	struct megasas_init_queue_info *initq_info;
5454	dma_addr_t init_frame_h;
5455	dma_addr_t initq_info_h;
5456
5457	/*
5458	 * Prepare a init frame. Note the init frame points to queue info
5459	 * structure. Each frame has SGL allocated after first 64 bytes. For
5460	 * this frame - since we don't need any SGL - we use SGL's space as
5461	 * queue info structure
5462	 *
5463	 * We will not get a NULL command below. We just created the pool.
5464	 */
5465	cmd = megasas_get_cmd(instance);
5466
5467	init_frame = (struct megasas_init_frame *)cmd->frame;
5468	initq_info = (struct megasas_init_queue_info *)
5469		((unsigned long)init_frame + 64);
5470
5471	init_frame_h = cmd->frame_phys_addr;
5472	initq_info_h = init_frame_h + 64;
5473
5474	context = init_frame->context;
5475	memset(init_frame, 0, MEGAMFI_FRAME_SIZE);
5476	memset(initq_info, 0, sizeof(struct megasas_init_queue_info));
5477	init_frame->context = context;
5478
5479	initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1);
5480	initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h);
5481
5482	initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h);
5483	initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h);
5484
5485	init_frame->cmd = MFI_CMD_INIT;
5486	init_frame->cmd_status = MFI_STAT_INVALID_STATUS;
5487	init_frame->queue_info_new_phys_addr_lo =
5488		cpu_to_le32(lower_32_bits(initq_info_h));
5489	init_frame->queue_info_new_phys_addr_hi =
5490		cpu_to_le32(upper_32_bits(initq_info_h));
5491
5492	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info));
5493
5494	/*
5495	 * disable the intr before firing the init frame to FW
5496	 */
5497	instance->instancet->disable_intr(instance);
5498
5499	/*
5500	 * Issue the init frame in polled mode
5501	 */
5502
5503	if (megasas_issue_polled(instance, cmd)) {
5504		dev_err(&instance->pdev->dev, "Failed to init firmware\n");
5505		megasas_return_cmd(instance, cmd);
5506		goto fail_fw_init;
5507	}
5508
5509	megasas_return_cmd(instance, cmd);
5510
5511	return 0;
5512
5513fail_fw_init:
5514	return -EINVAL;
5515}
5516
5517static u32
5518megasas_init_adapter_mfi(struct megasas_instance *instance)
5519{
5520	u32 context_sz;
5521	u32 reply_q_sz;
5522
5523	/*
5524	 * Get various operational parameters from status register
5525	 */
5526	instance->max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
5527	/*
5528	 * Reduce the max supported cmds by 1. This is to ensure that the
5529	 * reply_q_sz (1 more than the max cmd that driver may send)
5530	 * does not exceed max cmds that the FW can support
5531	 */
5532	instance->max_fw_cmds = instance->max_fw_cmds-1;
5533	instance->max_mfi_cmds = instance->max_fw_cmds;
5534	instance->max_num_sge = (instance->instancet->read_fw_status_reg(instance) & 0xFF0000) >>
5535					0x10;
5536	/*
5537	 * For MFI skinny adapters, MEGASAS_SKINNY_INT_CMDS commands
5538	 * are reserved for IOCTL + driver's internal DCMDs.
5539	 */
5540	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
5541		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
5542		instance->max_scsi_cmds = (instance->max_fw_cmds -
5543			MEGASAS_SKINNY_INT_CMDS);
5544		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
5545	} else {
5546		instance->max_scsi_cmds = (instance->max_fw_cmds -
5547			MEGASAS_INT_CMDS);
5548		sema_init(&instance->ioctl_sem, (MEGASAS_MFI_IOCTL_CMDS));
5549	}
5550
5551	instance->cur_can_queue = instance->max_scsi_cmds;
5552	/*
5553	 * Create a pool of commands
5554	 */
5555	if (megasas_alloc_cmds(instance))
5556		goto fail_alloc_cmds;
5557
5558	/*
5559	 * Allocate memory for reply queue. Length of reply queue should
5560	 * be _one_ more than the maximum commands handled by the firmware.
5561	 *
5562	 * Note: When FW completes commands, it places corresponding contex
5563	 * values in this circular reply queue. This circular queue is a fairly
5564	 * typical producer-consumer queue. FW is the producer (of completed
5565	 * commands) and the driver is the consumer.
5566	 */
5567	context_sz = sizeof(u32);
5568	reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
5569
5570	instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
5571			reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
5572
5573	if (!instance->reply_queue) {
5574		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
5575		goto fail_reply_queue;
5576	}
5577
5578	if (megasas_issue_init_mfi(instance))
5579		goto fail_fw_init;
5580
5581	if (megasas_get_ctrl_info(instance)) {
5582		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
5583			"Fail from %s %d\n", instance->unique_id,
5584			__func__, __LINE__);
5585		goto fail_fw_init;
5586	}
5587
5588	instance->fw_support_ieee = 0;
5589	instance->fw_support_ieee =
5590		(instance->instancet->read_fw_status_reg(instance) &
5591		0x04000000);
5592
5593	dev_notice(&instance->pdev->dev, "megasas_init_mfi: fw_support_ieee=%d",
5594			instance->fw_support_ieee);
5595
5596	if (instance->fw_support_ieee)
5597		instance->flag_ieee = 1;
5598
5599	return 0;
5600
5601fail_fw_init:
5602
5603	dma_free_coherent(&instance->pdev->dev, reply_q_sz,
5604			    instance->reply_queue, instance->reply_queue_h);
5605fail_reply_queue:
5606	megasas_free_cmds(instance);
5607
5608fail_alloc_cmds:
5609	return 1;
5610}
5611
5612static
5613void megasas_setup_irq_poll(struct megasas_instance *instance)
5614{
5615	struct megasas_irq_context *irq_ctx;
5616	u32 count, i;
5617
5618	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5619
5620	/* Initialize IRQ poll */
5621	for (i = 0; i < count; i++) {
5622		irq_ctx = &instance->irq_context[i];
5623		irq_ctx->os_irq = pci_irq_vector(instance->pdev, i);
5624		irq_ctx->irq_poll_scheduled = false;
5625		irq_poll_init(&irq_ctx->irqpoll,
5626			      instance->threshold_reply_count,
5627			      megasas_irqpoll);
5628	}
5629}
5630
5631/*
5632 * megasas_setup_irqs_ioapic -		register legacy interrupts.
5633 * @instance:				Adapter soft state
5634 *
5635 * Do not enable interrupt, only setup ISRs.
5636 *
5637 * Return 0 on success.
5638 */
5639static int
5640megasas_setup_irqs_ioapic(struct megasas_instance *instance)
5641{
5642	struct pci_dev *pdev;
5643
5644	pdev = instance->pdev;
5645	instance->irq_context[0].instance = instance;
5646	instance->irq_context[0].MSIxIndex = 0;
5647	snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u",
5648		"megasas", instance->host->host_no);
5649	if (request_irq(pci_irq_vector(pdev, 0),
5650			instance->instancet->service_isr, IRQF_SHARED,
5651			instance->irq_context->name, &instance->irq_context[0])) {
5652		dev_err(&instance->pdev->dev,
5653				"Failed to register IRQ from %s %d\n",
5654				__func__, __LINE__);
5655		return -1;
5656	}
5657	instance->perf_mode = MR_LATENCY_PERF_MODE;
5658	instance->low_latency_index_start = 0;
5659	return 0;
5660}
5661
5662/**
5663 * megasas_setup_irqs_msix -		register MSI-x interrupts.
5664 * @instance:				Adapter soft state
5665 * @is_probe:				Driver probe check
5666 *
5667 * Do not enable interrupt, only setup ISRs.
5668 *
5669 * Return 0 on success.
5670 */
5671static int
5672megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
5673{
5674	int i, j;
5675	struct pci_dev *pdev;
5676
5677	pdev = instance->pdev;
5678
5679	/* Try MSI-x */
5680	for (i = 0; i < instance->msix_vectors; i++) {
5681		instance->irq_context[i].instance = instance;
5682		instance->irq_context[i].MSIxIndex = i;
5683		snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u",
5684			"megasas", instance->host->host_no, i);
5685		if (request_irq(pci_irq_vector(pdev, i),
5686			instance->instancet->service_isr, 0, instance->irq_context[i].name,
5687			&instance->irq_context[i])) {
5688			dev_err(&instance->pdev->dev,
5689				"Failed to register IRQ for vector %d.\n", i);
5690			for (j = 0; j < i; j++) {
5691				if (j < instance->low_latency_index_start)
5692					irq_set_affinity_hint(
5693						pci_irq_vector(pdev, j), NULL);
5694				free_irq(pci_irq_vector(pdev, j),
5695					 &instance->irq_context[j]);
5696			}
5697			/* Retry irq register for IO_APIC*/
5698			instance->msix_vectors = 0;
5699			instance->msix_load_balance = false;
5700			if (is_probe) {
5701				pci_free_irq_vectors(instance->pdev);
5702				return megasas_setup_irqs_ioapic(instance);
5703			} else {
5704				return -1;
5705			}
5706		}
5707	}
5708
5709	return 0;
5710}
5711
5712/*
5713 * megasas_destroy_irqs-		unregister interrupts.
5714 * @instance:				Adapter soft state
5715 * return:				void
5716 */
5717static void
5718megasas_destroy_irqs(struct megasas_instance *instance) {
5719
5720	int i;
5721	int count;
5722	struct megasas_irq_context *irq_ctx;
5723
5724	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
5725	if (instance->adapter_type != MFI_SERIES) {
5726		for (i = 0; i < count; i++) {
5727			irq_ctx = &instance->irq_context[i];
5728			irq_poll_disable(&irq_ctx->irqpoll);
5729		}
5730	}
5731
5732	if (instance->msix_vectors)
5733		for (i = 0; i < instance->msix_vectors; i++) {
5734			if (i < instance->low_latency_index_start)
5735				irq_set_affinity_hint(
5736				    pci_irq_vector(instance->pdev, i), NULL);
5737			free_irq(pci_irq_vector(instance->pdev, i),
5738				 &instance->irq_context[i]);
5739		}
5740	else
5741		free_irq(pci_irq_vector(instance->pdev, 0),
5742			 &instance->irq_context[0]);
5743}
5744
5745/**
5746 * megasas_setup_jbod_map -	setup jbod map for FP seq_number.
5747 * @instance:				Adapter soft state
5748 *
5749 * Return 0 on success.
5750 */
5751void
5752megasas_setup_jbod_map(struct megasas_instance *instance)
5753{
5754	int i;
5755	struct fusion_context *fusion = instance->ctrl_context;
5756	u32 pd_seq_map_sz;
5757
5758	pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
5759		(sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1));
5760
5761	instance->use_seqnum_jbod_fp =
5762		instance->support_seqnum_jbod_fp;
5763	if (reset_devices || !fusion ||
5764		!instance->support_seqnum_jbod_fp) {
5765		dev_info(&instance->pdev->dev,
5766			"JBOD sequence map is disabled %s %d\n",
5767			__func__, __LINE__);
5768		instance->use_seqnum_jbod_fp = false;
5769		return;
5770	}
5771
5772	if (fusion->pd_seq_sync[0])
5773		goto skip_alloc;
5774
5775	for (i = 0; i < JBOD_MAPS_COUNT; i++) {
5776		fusion->pd_seq_sync[i] = dma_alloc_coherent
5777			(&instance->pdev->dev, pd_seq_map_sz,
5778			&fusion->pd_seq_phys[i], GFP_KERNEL);
5779		if (!fusion->pd_seq_sync[i]) {
5780			dev_err(&instance->pdev->dev,
5781				"Failed to allocate memory from %s %d\n",
5782				__func__, __LINE__);
5783			if (i == 1) {
5784				dma_free_coherent(&instance->pdev->dev,
5785					pd_seq_map_sz, fusion->pd_seq_sync[0],
5786					fusion->pd_seq_phys[0]);
5787				fusion->pd_seq_sync[0] = NULL;
5788			}
5789			instance->use_seqnum_jbod_fp = false;
5790			return;
5791		}
5792	}
5793
5794skip_alloc:
5795	if (!megasas_sync_pd_seq_num(instance, false) &&
5796		!megasas_sync_pd_seq_num(instance, true))
5797		instance->use_seqnum_jbod_fp = true;
5798	else
5799		instance->use_seqnum_jbod_fp = false;
5800}
5801
5802static void megasas_setup_reply_map(struct megasas_instance *instance)
5803{
5804	const struct cpumask *mask;
5805	unsigned int queue, cpu, low_latency_index_start;
5806
5807	low_latency_index_start = instance->low_latency_index_start;
5808
5809	for (queue = low_latency_index_start; queue < instance->msix_vectors; queue++) {
5810		mask = pci_irq_get_affinity(instance->pdev, queue);
5811		if (!mask)
5812			goto fallback;
5813
5814		for_each_cpu(cpu, mask)
5815			instance->reply_map[cpu] = queue;
5816	}
5817	return;
5818
5819fallback:
5820	queue = low_latency_index_start;
5821	for_each_possible_cpu(cpu) {
5822		instance->reply_map[cpu] = queue;
5823		if (queue == (instance->msix_vectors - 1))
5824			queue = low_latency_index_start;
5825		else
5826			queue++;
5827	}
5828}
5829
5830/**
5831 * megasas_get_device_list -	Get the PD and LD device list from FW.
5832 * @instance:			Adapter soft state
5833 * @return:			Success or failure
5834 *
5835 * Issue DCMDs to Firmware to get the PD and LD list.
5836 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
5837 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
5838 */
5839static
5840int megasas_get_device_list(struct megasas_instance *instance)
5841{
5842	memset(instance->pd_list, 0,
5843	       (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)));
5844	memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
5845
5846	if (instance->enable_fw_dev_list) {
5847		if (megasas_host_device_list_query(instance, true))
5848			return FAILED;
5849	} else {
5850		if (megasas_get_pd_list(instance) < 0) {
5851			dev_err(&instance->pdev->dev, "failed to get PD list\n");
5852			return FAILED;
5853		}
5854
5855		if (megasas_ld_list_query(instance,
5856					  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) {
5857			dev_err(&instance->pdev->dev, "failed to get LD list\n");
5858			return FAILED;
5859		}
5860	}
5861
5862	return SUCCESS;
5863}
5864
5865/**
5866 * megasas_set_high_iops_queue_affinity_hint -	Set affinity hint for high IOPS queues
5867 * @instance:					Adapter soft state
5868 * return:					void
5869 */
5870static inline void
5871megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
5872{
5873	int i;
5874	int local_numa_node;
5875
5876	if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
5877		local_numa_node = dev_to_node(&instance->pdev->dev);
5878
5879		for (i = 0; i < instance->low_latency_index_start; i++)
5880			irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
5881				cpumask_of_node(local_numa_node));
5882	}
5883}
5884
5885static int
5886__megasas_alloc_irq_vectors(struct megasas_instance *instance)
5887{
5888	int i, irq_flags;
5889	struct irq_affinity desc = { .pre_vectors = instance->low_latency_index_start };
5890	struct irq_affinity *descp = &desc;
5891
5892	irq_flags = PCI_IRQ_MSIX;
5893
5894	if (instance->smp_affinity_enable)
5895		irq_flags |= PCI_IRQ_AFFINITY;
5896	else
5897		descp = NULL;
5898
5899	i = pci_alloc_irq_vectors_affinity(instance->pdev,
5900		instance->low_latency_index_start,
5901		instance->msix_vectors, irq_flags, descp);
5902
5903	return i;
5904}
5905
5906/**
5907 * megasas_alloc_irq_vectors -	Allocate IRQ vectors/enable MSI-x vectors
5908 * @instance:			Adapter soft state
5909 * return:			void
5910 */
5911static void
5912megasas_alloc_irq_vectors(struct megasas_instance *instance)
5913{
5914	int i;
5915	unsigned int num_msix_req;
5916
5917	i = __megasas_alloc_irq_vectors(instance);
5918
5919	if ((instance->perf_mode == MR_BALANCED_PERF_MODE) &&
5920	    (i != instance->msix_vectors)) {
5921		if (instance->msix_vectors)
5922			pci_free_irq_vectors(instance->pdev);
5923		/* Disable Balanced IOPS mode and try realloc vectors */
5924		instance->perf_mode = MR_LATENCY_PERF_MODE;
5925		instance->low_latency_index_start = 1;
5926		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
5927
5928		instance->msix_vectors = min(num_msix_req,
5929				instance->msix_vectors);
5930
5931		i = __megasas_alloc_irq_vectors(instance);
5932
5933	}
5934
5935	dev_info(&instance->pdev->dev,
5936		"requested/available msix %d/%d\n", instance->msix_vectors, i);
5937
5938	if (i > 0)
5939		instance->msix_vectors = i;
5940	else
5941		instance->msix_vectors = 0;
5942
5943	if (instance->smp_affinity_enable)
5944		megasas_set_high_iops_queue_affinity_hint(instance);
5945}
5946
5947/**
5948 * megasas_init_fw -	Initializes the FW
5949 * @instance:		Adapter soft state
5950 *
5951 * This is the main function for initializing firmware
5952 */
5953
5954static int megasas_init_fw(struct megasas_instance *instance)
5955{
5956	u32 max_sectors_1;
5957	u32 max_sectors_2, tmp_sectors, msix_enable;
5958	u32 scratch_pad_1, scratch_pad_2, scratch_pad_3, status_reg;
5959	resource_size_t base_addr;
5960	void *base_addr_phys;
5961	struct megasas_ctrl_info *ctrl_info = NULL;
5962	unsigned long bar_list;
5963	int i, j, loop;
5964	struct IOV_111 *iovPtr;
5965	struct fusion_context *fusion;
5966	bool intr_coalescing;
5967	unsigned int num_msix_req;
5968	u16 lnksta, speed;
5969
5970	fusion = instance->ctrl_context;
5971
5972	/* Find first memory bar */
5973	bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM);
5974	instance->bar = find_first_bit(&bar_list, BITS_PER_LONG);
5975	if (pci_request_selected_regions(instance->pdev, 1<<instance->bar,
5976					 "megasas: LSI")) {
5977		dev_printk(KERN_DEBUG, &instance->pdev->dev, "IO memory region busy!\n");
5978		return -EBUSY;
5979	}
5980
5981	base_addr = pci_resource_start(instance->pdev, instance->bar);
5982	instance->reg_set = ioremap(base_addr, 8192);
5983
5984	if (!instance->reg_set) {
5985		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n");
5986		goto fail_ioremap;
5987	}
5988
5989	base_addr_phys = &base_addr;
5990	dev_printk(KERN_DEBUG, &instance->pdev->dev,
5991		   "BAR:0x%lx  BAR's base_addr(phys):%pa  mapped virt_addr:0x%p\n",
5992		   instance->bar, base_addr_phys, instance->reg_set);
5993
5994	if (instance->adapter_type != MFI_SERIES)
5995		instance->instancet = &megasas_instance_template_fusion;
5996	else {
5997		switch (instance->pdev->device) {
5998		case PCI_DEVICE_ID_LSI_SAS1078R:
5999		case PCI_DEVICE_ID_LSI_SAS1078DE:
6000			instance->instancet = &megasas_instance_template_ppc;
6001			break;
6002		case PCI_DEVICE_ID_LSI_SAS1078GEN2:
6003		case PCI_DEVICE_ID_LSI_SAS0079GEN2:
6004			instance->instancet = &megasas_instance_template_gen2;
6005			break;
6006		case PCI_DEVICE_ID_LSI_SAS0073SKINNY:
6007		case PCI_DEVICE_ID_LSI_SAS0071SKINNY:
6008			instance->instancet = &megasas_instance_template_skinny;
6009			break;
6010		case PCI_DEVICE_ID_LSI_SAS1064R:
6011		case PCI_DEVICE_ID_DELL_PERC5:
6012		default:
6013			instance->instancet = &megasas_instance_template_xscale;
6014			instance->pd_list_not_supported = 1;
6015			break;
6016		}
6017	}
6018
6019	if (megasas_transition_to_ready(instance, 0)) {
6020		dev_info(&instance->pdev->dev,
6021			 "Failed to transition controller to ready from %s!\n",
6022			 __func__);
6023		if (instance->adapter_type != MFI_SERIES) {
6024			status_reg = instance->instancet->read_fw_status_reg(
6025					instance);
6026			if (status_reg & MFI_RESET_ADAPTER) {
6027				if (megasas_adp_reset_wait_for_ready
6028					(instance, true, 0) == FAILED)
6029					goto fail_ready_state;
6030			} else {
6031				goto fail_ready_state;
6032			}
6033		} else {
6034			atomic_set(&instance->fw_reset_no_pci_access, 1);
6035			instance->instancet->adp_reset
6036				(instance, instance->reg_set);
6037			atomic_set(&instance->fw_reset_no_pci_access, 0);
6038
6039			/*waiting for about 30 second before retry*/
6040			ssleep(30);
6041
6042			if (megasas_transition_to_ready(instance, 0))
6043				goto fail_ready_state;
6044		}
6045
6046		dev_info(&instance->pdev->dev,
6047			 "FW restarted successfully from %s!\n",
6048			 __func__);
6049	}
6050
6051	megasas_init_ctrl_params(instance);
6052
6053	if (megasas_set_dma_mask(instance))
6054		goto fail_ready_state;
6055
6056	if (megasas_alloc_ctrl_mem(instance))
6057		goto fail_alloc_dma_buf;
6058
6059	if (megasas_alloc_ctrl_dma_buffers(instance))
6060		goto fail_alloc_dma_buf;
6061
6062	fusion = instance->ctrl_context;
6063
6064	if (instance->adapter_type >= VENTURA_SERIES) {
6065		scratch_pad_2 =
6066			megasas_readl(instance,
6067				      &instance->reg_set->outbound_scratch_pad_2);
6068		instance->max_raid_mapsize = ((scratch_pad_2 >>
6069			MR_MAX_RAID_MAP_SIZE_OFFSET_SHIFT) &
6070			MR_MAX_RAID_MAP_SIZE_MASK);
6071	}
6072
6073	instance->enable_sdev_max_qd = enable_sdev_max_qd;
6074
6075	switch (instance->adapter_type) {
6076	case VENTURA_SERIES:
6077		fusion->pcie_bw_limitation = true;
6078		break;
6079	case AERO_SERIES:
6080		fusion->r56_div_offload = true;
6081		break;
6082	default:
6083		break;
6084	}
6085
6086	/* Check if MSI-X is supported while in ready state */
6087	msix_enable = (instance->instancet->read_fw_status_reg(instance) &
6088		       0x4000000) >> 0x1a;
6089	if (msix_enable && !msix_disable) {
6090
6091		scratch_pad_1 = megasas_readl
6092			(instance, &instance->reg_set->outbound_scratch_pad_1);
6093		/* Check max MSI-X vectors */
6094		if (fusion) {
6095			if (instance->adapter_type == THUNDERBOLT_SERIES) {
6096				/* Thunderbolt Series*/
6097				instance->msix_vectors = (scratch_pad_1
6098					& MR_MAX_REPLY_QUEUES_OFFSET) + 1;
6099			} else {
6100				instance->msix_vectors = ((scratch_pad_1
6101					& MR_MAX_REPLY_QUEUES_EXT_OFFSET)
6102					>> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1;
6103
6104				/*
6105				 * For Invader series, > 8 MSI-x vectors
6106				 * supported by FW/HW implies combined
6107				 * reply queue mode is enabled.
6108				 * For Ventura series, > 16 MSI-x vectors
6109				 * supported by FW/HW implies combined
6110				 * reply queue mode is enabled.
6111				 */
6112				switch (instance->adapter_type) {
6113				case INVADER_SERIES:
6114					if (instance->msix_vectors > 8)
6115						instance->msix_combined = true;
6116					break;
6117				case AERO_SERIES:
6118				case VENTURA_SERIES:
6119					if (instance->msix_vectors > 16)
6120						instance->msix_combined = true;
6121					break;
6122				}
6123
6124				if (rdpq_enable)
6125					instance->is_rdpq = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ?
6126								1 : 0;
6127
6128				if (instance->adapter_type >= INVADER_SERIES &&
6129				    !instance->msix_combined) {
6130					instance->msix_load_balance = true;
6131					instance->smp_affinity_enable = false;
6132				}
6133
6134				/* Save 1-15 reply post index address to local memory
6135				 * Index 0 is already saved from reg offset
6136				 * MPI2_REPLY_POST_HOST_INDEX_OFFSET
6137				 */
6138				for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) {
6139					instance->reply_post_host_index_addr[loop] =
6140						(u32 __iomem *)
6141						((u8 __iomem *)instance->reg_set +
6142						MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET
6143						+ (loop * 0x10));
6144				}
6145			}
6146
6147			dev_info(&instance->pdev->dev,
6148				 "firmware supports msix\t: (%d)",
6149				 instance->msix_vectors);
6150			if (msix_vectors)
6151				instance->msix_vectors = min(msix_vectors,
6152					instance->msix_vectors);
6153		} else /* MFI adapters */
6154			instance->msix_vectors = 1;
6155
6156
6157		/*
6158		 * For Aero (if some conditions are met), driver will configure a
6159		 * few additional reply queues with interrupt coalescing enabled.
6160		 * These queues with interrupt coalescing enabled are called
6161		 * High IOPS queues and rest of reply queues (based on number of
6162		 * logical CPUs) are termed as Low latency queues.
6163		 *
6164		 * Total Number of reply queues = High IOPS queues + low latency queues
6165		 *
6166		 * For rest of fusion adapters, 1 additional reply queue will be
6167		 * reserved for management commands, rest of reply queues
6168		 * (based on number of logical CPUs) will be used for IOs and
6169		 * referenced as IO queues.
6170		 * Total Number of reply queues = 1 + IO queues
6171		 *
6172		 * MFI adapters supports single MSI-x so single reply queue
6173		 * will be used for IO and management commands.
6174		 */
6175
6176		intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
6177								true : false;
6178		if (intr_coalescing &&
6179			(num_online_cpus() >= MR_HIGH_IOPS_QUEUE_COUNT) &&
6180			(instance->msix_vectors == MEGASAS_MAX_MSIX_QUEUES))
6181			instance->perf_mode = MR_BALANCED_PERF_MODE;
6182		else
6183			instance->perf_mode = MR_LATENCY_PERF_MODE;
6184
6185
6186		if (instance->adapter_type == AERO_SERIES) {
6187			pcie_capability_read_word(instance->pdev, PCI_EXP_LNKSTA, &lnksta);
6188			speed = lnksta & PCI_EXP_LNKSTA_CLS;
6189
6190			/*
6191			 * For Aero, if PCIe link speed is <16 GT/s, then driver should operate
6192			 * in latency perf mode and enable R1 PCI bandwidth algorithm
6193			 */
6194			if (speed < 0x4) {
6195				instance->perf_mode = MR_LATENCY_PERF_MODE;
6196				fusion->pcie_bw_limitation = true;
6197			}
6198
6199			/*
6200			 * Performance mode settings provided through module parameter-perf_mode will
6201			 * take affect only for:
6202			 * 1. Aero family of adapters.
6203			 * 2. When user sets module parameter- perf_mode in range of 0-2.
6204			 */
6205			if ((perf_mode >= MR_BALANCED_PERF_MODE) &&
6206				(perf_mode <= MR_LATENCY_PERF_MODE))
6207				instance->perf_mode = perf_mode;
6208			/*
6209			 * If intr coalescing is not supported by controller FW, then IOPS
6210			 * and Balanced modes are not feasible.
6211			 */
6212			if (!intr_coalescing)
6213				instance->perf_mode = MR_LATENCY_PERF_MODE;
6214
6215		}
6216
6217		if (instance->perf_mode == MR_BALANCED_PERF_MODE)
6218			instance->low_latency_index_start =
6219				MR_HIGH_IOPS_QUEUE_COUNT;
6220		else
6221			instance->low_latency_index_start = 1;
6222
6223		num_msix_req = num_online_cpus() + instance->low_latency_index_start;
6224
6225		instance->msix_vectors = min(num_msix_req,
6226				instance->msix_vectors);
6227
6228		megasas_alloc_irq_vectors(instance);
6229		if (!instance->msix_vectors)
6230			instance->msix_load_balance = false;
6231	}
6232	/*
6233	 * MSI-X host index 0 is common for all adapter.
6234	 * It is used for all MPT based Adapters.
6235	 */
6236	if (instance->msix_combined) {
6237		instance->reply_post_host_index_addr[0] =
6238				(u32 *)((u8 *)instance->reg_set +
6239				MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET);
6240	} else {
6241		instance->reply_post_host_index_addr[0] =
6242			(u32 *)((u8 *)instance->reg_set +
6243			MPI2_REPLY_POST_HOST_INDEX_OFFSET);
6244	}
6245
6246	if (!instance->msix_vectors) {
6247		i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY);
6248		if (i < 0)
6249			goto fail_init_adapter;
6250	}
6251
6252	megasas_setup_reply_map(instance);
6253
6254	dev_info(&instance->pdev->dev,
6255		"current msix/online cpus\t: (%d/%d)\n",
6256		instance->msix_vectors, (unsigned int)num_online_cpus());
6257	dev_info(&instance->pdev->dev,
6258		"RDPQ mode\t: (%s)\n", instance->is_rdpq ? "enabled" : "disabled");
6259
6260	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
6261		(unsigned long)instance);
6262
6263	/*
6264	 * Below are default value for legacy Firmware.
6265	 * non-fusion based controllers
6266	 */
6267	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
6268	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
6269	/* Get operational params, sge flags, send init cmd to controller */
6270	if (instance->instancet->init_adapter(instance))
6271		goto fail_init_adapter;
6272
6273	if (instance->adapter_type >= VENTURA_SERIES) {
6274		scratch_pad_3 =
6275			megasas_readl(instance,
6276				      &instance->reg_set->outbound_scratch_pad_3);
6277		if ((scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK) >=
6278			MR_DEFAULT_NVME_PAGE_SHIFT)
6279			instance->nvme_page_size =
6280				(1 << (scratch_pad_3 & MR_NVME_PAGE_SIZE_MASK));
6281
6282		dev_info(&instance->pdev->dev,
6283			 "NVME page size\t: (%d)\n", instance->nvme_page_size);
6284	}
6285
6286	if (instance->msix_vectors ?
6287		megasas_setup_irqs_msix(instance, 1) :
6288		megasas_setup_irqs_ioapic(instance))
6289		goto fail_init_adapter;
6290
6291	if (instance->adapter_type != MFI_SERIES)
6292		megasas_setup_irq_poll(instance);
6293
6294	instance->instancet->enable_intr(instance);
6295
6296	dev_info(&instance->pdev->dev, "INIT adapter done\n");
6297
6298	megasas_setup_jbod_map(instance);
6299
6300	if (megasas_get_device_list(instance) != SUCCESS) {
6301		dev_err(&instance->pdev->dev,
6302			"%s: megasas_get_device_list failed\n",
6303			__func__);
6304		goto fail_get_ld_pd_list;
6305	}
6306
6307	/* stream detection initialization */
6308	if (instance->adapter_type >= VENTURA_SERIES) {
6309		fusion->stream_detect_by_ld =
6310			kcalloc(MAX_LOGICAL_DRIVES_EXT,
6311				sizeof(struct LD_STREAM_DETECT *),
6312				GFP_KERNEL);
6313		if (!fusion->stream_detect_by_ld) {
6314			dev_err(&instance->pdev->dev,
6315				"unable to allocate stream detection for pool of LDs\n");
6316			goto fail_get_ld_pd_list;
6317		}
6318		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i) {
6319			fusion->stream_detect_by_ld[i] =
6320				kzalloc(sizeof(struct LD_STREAM_DETECT),
6321				GFP_KERNEL);
6322			if (!fusion->stream_detect_by_ld[i]) {
6323				dev_err(&instance->pdev->dev,
6324					"unable to allocate stream detect by LD\n ");
6325				for (j = 0; j < i; ++j)
6326					kfree(fusion->stream_detect_by_ld[j]);
6327				kfree(fusion->stream_detect_by_ld);
6328				fusion->stream_detect_by_ld = NULL;
6329				goto fail_get_ld_pd_list;
6330			}
6331			fusion->stream_detect_by_ld[i]->mru_bit_map
6332				= MR_STREAM_BITMAP;
6333		}
6334	}
6335
6336	/*
6337	 * Compute the max allowed sectors per IO: The controller info has two
6338	 * limits on max sectors. Driver should use the minimum of these two.
6339	 *
6340	 * 1 << stripe_sz_ops.min = max sectors per strip
6341	 *
6342	 * Note that older firmwares ( < FW ver 30) didn't report information
6343	 * to calculate max_sectors_1. So the number ended up as zero always.
6344	 */
6345	tmp_sectors = 0;
6346	ctrl_info = instance->ctrl_info_buf;
6347
6348	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
6349		le16_to_cpu(ctrl_info->max_strips_per_io);
6350	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
6351
6352	tmp_sectors = min_t(u32, max_sectors_1, max_sectors_2);
6353
6354	instance->peerIsPresent = ctrl_info->cluster.peerIsPresent;
6355	instance->passive = ctrl_info->cluster.passive;
6356	memcpy(instance->clusterId, ctrl_info->clusterId, sizeof(instance->clusterId));
6357	instance->UnevenSpanSupport =
6358		ctrl_info->adapterOperations2.supportUnevenSpans;
6359	if (instance->UnevenSpanSupport) {
6360		struct fusion_context *fusion = instance->ctrl_context;
6361		if (MR_ValidateMapInfo(instance, instance->map_id))
6362			fusion->fast_path_io = 1;
6363		else
6364			fusion->fast_path_io = 0;
6365
6366	}
6367	if (ctrl_info->host_interface.SRIOV) {
6368		instance->requestorId = ctrl_info->iov.requestorId;
6369		if (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) {
6370			if (!ctrl_info->adapterOperations2.activePassive)
6371			    instance->PlasmaFW111 = 1;
6372
6373			dev_info(&instance->pdev->dev, "SR-IOV: firmware type: %s\n",
6374			    instance->PlasmaFW111 ? "1.11" : "new");
6375
6376			if (instance->PlasmaFW111) {
6377			    iovPtr = (struct IOV_111 *)
6378				((unsigned char *)ctrl_info + IOV_111_OFFSET);
6379			    instance->requestorId = iovPtr->requestorId;
6380			}
6381		}
6382		dev_info(&instance->pdev->dev, "SRIOV: VF requestorId %d\n",
6383			instance->requestorId);
6384	}
6385
6386	instance->crash_dump_fw_support =
6387		ctrl_info->adapterOperations3.supportCrashDump;
6388	instance->crash_dump_drv_support =
6389		(instance->crash_dump_fw_support &&
6390		instance->crash_dump_buf);
6391	if (instance->crash_dump_drv_support)
6392		megasas_set_crash_dump_params(instance,
6393			MR_CRASH_BUF_TURN_OFF);
6394
6395	else {
6396		if (instance->crash_dump_buf)
6397			dma_free_coherent(&instance->pdev->dev,
6398				CRASH_DMA_BUF_SIZE,
6399				instance->crash_dump_buf,
6400				instance->crash_dump_h);
6401		instance->crash_dump_buf = NULL;
6402	}
6403
6404	if (instance->snapdump_wait_time) {
6405		megasas_get_snapdump_properties(instance);
6406		dev_info(&instance->pdev->dev, "Snap dump wait time\t: %d\n",
6407			 instance->snapdump_wait_time);
6408	}
6409
6410	dev_info(&instance->pdev->dev,
6411		"pci id\t\t: (0x%04x)/(0x%04x)/(0x%04x)/(0x%04x)\n",
6412		le16_to_cpu(ctrl_info->pci.vendor_id),
6413		le16_to_cpu(ctrl_info->pci.device_id),
6414		le16_to_cpu(ctrl_info->pci.sub_vendor_id),
6415		le16_to_cpu(ctrl_info->pci.sub_device_id));
6416	dev_info(&instance->pdev->dev, "unevenspan support	: %s\n",
6417		instance->UnevenSpanSupport ? "yes" : "no");
6418	dev_info(&instance->pdev->dev, "firmware crash dump	: %s\n",
6419		instance->crash_dump_drv_support ? "yes" : "no");
6420	dev_info(&instance->pdev->dev, "JBOD sequence map	: %s\n",
6421		instance->use_seqnum_jbod_fp ? "enabled" : "disabled");
6422
6423	instance->max_sectors_per_req = instance->max_num_sge *
6424						SGE_BUFFER_SIZE / 512;
6425	if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors))
6426		instance->max_sectors_per_req = tmp_sectors;
6427
6428	/* Check for valid throttlequeuedepth module parameter */
6429	if (throttlequeuedepth &&
6430			throttlequeuedepth <= instance->max_scsi_cmds)
6431		instance->throttlequeuedepth = throttlequeuedepth;
6432	else
6433		instance->throttlequeuedepth =
6434				MEGASAS_THROTTLE_QUEUE_DEPTH;
6435
6436	if ((resetwaittime < 1) ||
6437	    (resetwaittime > MEGASAS_RESET_WAIT_TIME))
6438		resetwaittime = MEGASAS_RESET_WAIT_TIME;
6439
6440	if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
6441		scmd_timeout = MEGASAS_DEFAULT_CMD_TIMEOUT;
6442
6443	/* Launch SR-IOV heartbeat timer */
6444	if (instance->requestorId) {
6445		if (!megasas_sriov_start_heartbeat(instance, 1)) {
6446			megasas_start_timer(instance);
6447		} else {
6448			instance->skip_heartbeat_timer_del = 1;
6449			goto fail_get_ld_pd_list;
6450		}
6451	}
6452
6453	/*
6454	 * Create and start watchdog thread which will monitor
6455	 * controller state every 1 sec and trigger OCR when
6456	 * it enters fault state
6457	 */
6458	if (instance->adapter_type != MFI_SERIES)
6459		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
6460			goto fail_start_watchdog;
6461
6462	return 0;
6463
6464fail_start_watchdog:
6465	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
6466		del_timer_sync(&instance->sriov_heartbeat_timer);
6467fail_get_ld_pd_list:
6468	instance->instancet->disable_intr(instance);
6469	megasas_destroy_irqs(instance);
6470fail_init_adapter:
6471	if (instance->msix_vectors)
6472		pci_free_irq_vectors(instance->pdev);
6473	instance->msix_vectors = 0;
6474fail_alloc_dma_buf:
6475	megasas_free_ctrl_dma_buffers(instance);
6476	megasas_free_ctrl_mem(instance);
6477fail_ready_state:
6478	iounmap(instance->reg_set);
6479
6480fail_ioremap:
6481	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6482
6483	dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6484		__func__, __LINE__);
6485	return -EINVAL;
6486}
6487
6488/**
6489 * megasas_release_mfi -	Reverses the FW initialization
6490 * @instance:			Adapter soft state
6491 */
6492static void megasas_release_mfi(struct megasas_instance *instance)
6493{
6494	u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
6495
6496	if (instance->reply_queue)
6497		dma_free_coherent(&instance->pdev->dev, reply_q_sz,
6498			    instance->reply_queue, instance->reply_queue_h);
6499
6500	megasas_free_cmds(instance);
6501
6502	iounmap(instance->reg_set);
6503
6504	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
6505}
6506
6507/**
6508 * megasas_get_seq_num -	Gets latest event sequence numbers
6509 * @instance:			Adapter soft state
6510 * @eli:			FW event log sequence numbers information
6511 *
6512 * FW maintains a log of all events in a non-volatile area. Upper layers would
6513 * usually find out the latest sequence number of the events, the seq number at
6514 * the boot etc. They would "read" all the events below the latest seq number
6515 * by issuing a direct fw cmd (DCMD). For the future events (beyond latest seq
6516 * number), they would subsribe to AEN (asynchronous event notification) and
6517 * wait for the events to happen.
6518 */
6519static int
6520megasas_get_seq_num(struct megasas_instance *instance,
6521		    struct megasas_evt_log_info *eli)
6522{
6523	struct megasas_cmd *cmd;
6524	struct megasas_dcmd_frame *dcmd;
6525	struct megasas_evt_log_info *el_info;
6526	dma_addr_t el_info_h = 0;
6527	int ret;
6528
6529	cmd = megasas_get_cmd(instance);
6530
6531	if (!cmd) {
6532		return -ENOMEM;
6533	}
6534
6535	dcmd = &cmd->frame->dcmd;
6536	el_info = dma_alloc_coherent(&instance->pdev->dev,
6537				     sizeof(struct megasas_evt_log_info),
6538				     &el_info_h, GFP_KERNEL);
6539	if (!el_info) {
6540		megasas_return_cmd(instance, cmd);
6541		return -ENOMEM;
6542	}
6543
6544	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6545
6546	dcmd->cmd = MFI_CMD_DCMD;
6547	dcmd->cmd_status = 0x0;
6548	dcmd->sge_count = 1;
6549	dcmd->flags = MFI_FRAME_DIR_READ;
6550	dcmd->timeout = 0;
6551	dcmd->pad_0 = 0;
6552	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info));
6553	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO);
6554
6555	megasas_set_dma_settings(instance, dcmd, el_info_h,
6556				 sizeof(struct megasas_evt_log_info));
6557
6558	ret = megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS);
6559	if (ret != DCMD_SUCCESS) {
6560		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
6561			__func__, __LINE__);
6562		goto dcmd_failed;
6563	}
6564
6565	/*
6566	 * Copy the data back into callers buffer
6567	 */
6568	eli->newest_seq_num = el_info->newest_seq_num;
6569	eli->oldest_seq_num = el_info->oldest_seq_num;
6570	eli->clear_seq_num = el_info->clear_seq_num;
6571	eli->shutdown_seq_num = el_info->shutdown_seq_num;
6572	eli->boot_seq_num = el_info->boot_seq_num;
6573
6574dcmd_failed:
6575	dma_free_coherent(&instance->pdev->dev,
6576			sizeof(struct megasas_evt_log_info),
6577			el_info, el_info_h);
6578
6579	megasas_return_cmd(instance, cmd);
6580
6581	return ret;
6582}
6583
6584/**
6585 * megasas_register_aen -	Registers for asynchronous event notification
6586 * @instance:			Adapter soft state
6587 * @seq_num:			The starting sequence number
6588 * @class_locale_word:		Class of the event
6589 *
6590 * This function subscribes for AEN for events beyond the @seq_num. It requests
6591 * to be notified if and only if the event is of type @class_locale
6592 */
6593static int
6594megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
6595		     u32 class_locale_word)
6596{
6597	int ret_val;
6598	struct megasas_cmd *cmd;
6599	struct megasas_dcmd_frame *dcmd;
6600	union megasas_evt_class_locale curr_aen;
6601	union megasas_evt_class_locale prev_aen;
6602
6603	/*
6604	 * If there an AEN pending already (aen_cmd), check if the
6605	 * class_locale of that pending AEN is inclusive of the new
6606	 * AEN request we currently have. If it is, then we don't have
6607	 * to do anything. In other words, whichever events the current
6608	 * AEN request is subscribing to, have already been subscribed
6609	 * to.
6610	 *
6611	 * If the old_cmd is _not_ inclusive, then we have to abort
6612	 * that command, form a class_locale that is superset of both
6613	 * old and current and re-issue to the FW
6614	 */
6615
6616	curr_aen.word = class_locale_word;
6617
6618	if (instance->aen_cmd) {
6619
6620		prev_aen.word =
6621			le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
6622
6623		if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
6624		    (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
6625			dev_info(&instance->pdev->dev,
6626				 "%s %d out of range class %d send by application\n",
6627				 __func__, __LINE__, curr_aen.members.class);
6628			return 0;
6629		}
6630
6631		/*
6632		 * A class whose enum value is smaller is inclusive of all
6633		 * higher values. If a PROGRESS (= -1) was previously
6634		 * registered, then a new registration requests for higher
6635		 * classes need not be sent to FW. They are automatically
6636		 * included.
6637		 *
6638		 * Locale numbers don't have such hierarchy. They are bitmap
6639		 * values
6640		 */
6641		if ((prev_aen.members.class <= curr_aen.members.class) &&
6642		    !((prev_aen.members.locale & curr_aen.members.locale) ^
6643		      curr_aen.members.locale)) {
6644			/*
6645			 * Previously issued event registration includes
6646			 * current request. Nothing to do.
6647			 */
6648			return 0;
6649		} else {
6650			curr_aen.members.locale |= prev_aen.members.locale;
6651
6652			if (prev_aen.members.class < curr_aen.members.class)
6653				curr_aen.members.class = prev_aen.members.class;
6654
6655			instance->aen_cmd->abort_aen = 1;
6656			ret_val = megasas_issue_blocked_abort_cmd(instance,
6657								  instance->
6658								  aen_cmd, 30);
6659
6660			if (ret_val) {
6661				dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to abort "
6662				       "previous AEN command\n");
6663				return ret_val;
6664			}
6665		}
6666	}
6667
6668	cmd = megasas_get_cmd(instance);
6669
6670	if (!cmd)
6671		return -ENOMEM;
6672
6673	dcmd = &cmd->frame->dcmd;
6674
6675	memset(instance->evt_detail, 0, sizeof(struct megasas_evt_detail));
6676
6677	/*
6678	 * Prepare DCMD for aen registration
6679	 */
6680	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6681
6682	dcmd->cmd = MFI_CMD_DCMD;
6683	dcmd->cmd_status = 0x0;
6684	dcmd->sge_count = 1;
6685	dcmd->flags = MFI_FRAME_DIR_READ;
6686	dcmd->timeout = 0;
6687	dcmd->pad_0 = 0;
6688	dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail));
6689	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT);
6690	dcmd->mbox.w[0] = cpu_to_le32(seq_num);
6691	instance->last_seq_num = seq_num;
6692	dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word);
6693
6694	megasas_set_dma_settings(instance, dcmd, instance->evt_detail_h,
6695				 sizeof(struct megasas_evt_detail));
6696
6697	if (instance->aen_cmd != NULL) {
6698		megasas_return_cmd(instance, cmd);
6699		return 0;
6700	}
6701
6702	/*
6703	 * Store reference to the cmd used to register for AEN. When an
6704	 * application wants us to register for AEN, we have to abort this
6705	 * cmd and re-register with a new EVENT LOCALE supplied by that app
6706	 */
6707	instance->aen_cmd = cmd;
6708
6709	/*
6710	 * Issue the aen registration frame
6711	 */
6712	instance->instancet->issue_dcmd(instance, cmd);
6713
6714	return 0;
6715}
6716
6717/* megasas_get_target_prop - Send DCMD with below details to firmware.
6718 *
6719 * This DCMD will fetch few properties of LD/system PD defined
6720 * in MR_TARGET_DEV_PROPERTIES. eg. Queue Depth, MDTS value.
6721 *
6722 * DCMD send by drivers whenever new target is added to the OS.
6723 *
6724 * dcmd.opcode         - MR_DCMD_DEV_GET_TARGET_PROP
6725 * dcmd.mbox.b[0]      - DCMD is to be fired for LD or system PD.
6726 *                       0 = system PD, 1 = LD.
6727 * dcmd.mbox.s[1]      - TargetID for LD/system PD.
6728 * dcmd.sge IN         - Pointer to return MR_TARGET_DEV_PROPERTIES.
6729 *
6730 * @instance:		Adapter soft state
6731 * @sdev:		OS provided scsi device
6732 *
6733 * Returns 0 on success non-zero on failure.
6734 */
6735int
6736megasas_get_target_prop(struct megasas_instance *instance,
6737			struct scsi_device *sdev)
6738{
6739	int ret;
6740	struct megasas_cmd *cmd;
6741	struct megasas_dcmd_frame *dcmd;
6742	u16 targetId = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +
6743			sdev->id;
6744
6745	cmd = megasas_get_cmd(instance);
6746
6747	if (!cmd) {
6748		dev_err(&instance->pdev->dev,
6749			"Failed to get cmd %s\n", __func__);
6750		return -ENOMEM;
6751	}
6752
6753	dcmd = &cmd->frame->dcmd;
6754
6755	memset(instance->tgt_prop, 0, sizeof(*instance->tgt_prop));
6756	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
6757	dcmd->mbox.b[0] = MEGASAS_IS_LOGICAL(sdev);
6758
6759	dcmd->mbox.s[1] = cpu_to_le16(targetId);
6760	dcmd->cmd = MFI_CMD_DCMD;
6761	dcmd->cmd_status = 0xFF;
6762	dcmd->sge_count = 1;
6763	dcmd->flags = MFI_FRAME_DIR_READ;
6764	dcmd->timeout = 0;
6765	dcmd->pad_0 = 0;
6766	dcmd->data_xfer_len =
6767		cpu_to_le32(sizeof(struct MR_TARGET_PROPERTIES));
6768	dcmd->opcode = cpu_to_le32(MR_DCMD_DRV_GET_TARGET_PROP);
6769
6770	megasas_set_dma_settings(instance, dcmd, instance->tgt_prop_h,
6771				 sizeof(struct MR_TARGET_PROPERTIES));
6772
6773	if ((instance->adapter_type != MFI_SERIES) &&
6774	    !instance->mask_interrupts)
6775		ret = megasas_issue_blocked_cmd(instance,
6776						cmd, MFI_IO_TIMEOUT_SECS);
6777	else
6778		ret = megasas_issue_polled(instance, cmd);
6779
6780	switch (ret) {
6781	case DCMD_TIMEOUT:
6782		switch (dcmd_timeout_ocr_possible(instance)) {
6783		case INITIATE_OCR:
6784			cmd->flags |= DRV_DCMD_SKIP_REFIRE;
6785			mutex_unlock(&instance->reset_mutex);
6786			megasas_reset_fusion(instance->host,
6787					     MFI_IO_TIMEOUT_OCR);
6788			mutex_lock(&instance->reset_mutex);
6789			break;
6790		case KILL_ADAPTER:
6791			megaraid_sas_kill_hba(instance);
6792			break;
6793		case IGNORE_TIMEOUT:
6794			dev_info(&instance->pdev->dev,
6795				 "Ignore DCMD timeout: %s %d\n",
6796				 __func__, __LINE__);
6797			break;
6798		}
6799		break;
6800
6801	default:
6802		megasas_return_cmd(instance, cmd);
6803	}
6804	if (ret != DCMD_SUCCESS)
6805		dev_err(&instance->pdev->dev,
6806			"return from %s %d return value %d\n",
6807			__func__, __LINE__, ret);
6808
6809	return ret;
6810}
6811
6812/**
6813 * megasas_start_aen -	Subscribes to AEN during driver load time
6814 * @instance:		Adapter soft state
6815 */
6816static int megasas_start_aen(struct megasas_instance *instance)
6817{
6818	struct megasas_evt_log_info eli;
6819	union megasas_evt_class_locale class_locale;
6820
6821	/*
6822	 * Get the latest sequence number from FW
6823	 */
6824	memset(&eli, 0, sizeof(eli));
6825
6826	if (megasas_get_seq_num(instance, &eli))
6827		return -1;
6828
6829	/*
6830	 * Register AEN with FW for latest sequence number plus 1
6831	 */
6832	class_locale.members.reserved = 0;
6833	class_locale.members.locale = MR_EVT_LOCALE_ALL;
6834	class_locale.members.class = MR_EVT_CLASS_DEBUG;
6835
6836	return megasas_register_aen(instance,
6837			le32_to_cpu(eli.newest_seq_num) + 1,
6838			class_locale.word);
6839}
6840
6841/**
6842 * megasas_io_attach -	Attaches this driver to SCSI mid-layer
6843 * @instance:		Adapter soft state
6844 */
6845static int megasas_io_attach(struct megasas_instance *instance)
6846{
6847	struct Scsi_Host *host = instance->host;
6848
6849	/*
6850	 * Export parameters required by SCSI mid-layer
6851	 */
6852	host->unique_id = instance->unique_id;
6853	host->can_queue = instance->max_scsi_cmds;
6854	host->this_id = instance->init_id;
6855	host->sg_tablesize = instance->max_num_sge;
6856
6857	if (instance->fw_support_ieee)
6858		instance->max_sectors_per_req = MEGASAS_MAX_SECTORS_IEEE;
6859
6860	/*
6861	 * Check if the module parameter value for max_sectors can be used
6862	 */
6863	if (max_sectors && max_sectors < instance->max_sectors_per_req)
6864		instance->max_sectors_per_req = max_sectors;
6865	else {
6866		if (max_sectors) {
6867			if (((instance->pdev->device ==
6868				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
6869				(instance->pdev->device ==
6870				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
6871				(max_sectors <= MEGASAS_MAX_SECTORS)) {
6872				instance->max_sectors_per_req = max_sectors;
6873			} else {
6874			dev_info(&instance->pdev->dev, "max_sectors should be > 0"
6875				"and <= %d (or < 1MB for GEN2 controller)\n",
6876				instance->max_sectors_per_req);
6877			}
6878		}
6879	}
6880
6881	host->max_sectors = instance->max_sectors_per_req;
6882	host->cmd_per_lun = MEGASAS_DEFAULT_CMD_PER_LUN;
6883	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
6884	host->max_id = MEGASAS_MAX_DEV_PER_CHANNEL;
6885	host->max_lun = MEGASAS_MAX_LUN;
6886	host->max_cmd_len = 16;
6887
6888	/* Use shared host tagset only for fusion adaptors
6889	 * if there are managed interrupts (smp affinity enabled case).
6890	 * Single msix_vectors in kdump, so shared host tag is also disabled.
6891	 */
6892
6893	host->host_tagset = 0;
6894	host->nr_hw_queues = 1;
6895
6896	if ((instance->adapter_type != MFI_SERIES) &&
6897		(instance->msix_vectors > instance->low_latency_index_start) &&
6898		host_tagset_enable &&
6899		instance->smp_affinity_enable) {
6900		host->host_tagset = 1;
6901		host->nr_hw_queues = instance->msix_vectors -
6902			instance->low_latency_index_start;
6903	}
6904
6905	dev_info(&instance->pdev->dev,
6906		"Max firmware commands: %d shared with nr_hw_queues = %d\n",
6907		instance->max_fw_cmds, host->nr_hw_queues);
6908	/*
6909	 * Notify the mid-layer about the new controller
6910	 */
6911	if (scsi_add_host(host, &instance->pdev->dev)) {
6912		dev_err(&instance->pdev->dev,
6913			"Failed to add host from %s %d\n",
6914			__func__, __LINE__);
6915		return -ENODEV;
6916	}
6917
6918	return 0;
6919}
6920
6921/**
6922 * megasas_set_dma_mask -	Set DMA mask for supported controllers
6923 *
6924 * @instance:		Adapter soft state
6925 * Description:
6926 *
6927 * For Ventura, driver/FW will operate in 63bit DMA addresses.
6928 *
6929 * For invader-
6930 *	By default, driver/FW will operate in 32bit DMA addresses
6931 *	for consistent DMA mapping but if 32 bit consistent
6932 *	DMA mask fails, driver will try with 63 bit consistent
6933 *	mask provided FW is true 63bit DMA capable
6934 *
6935 * For older controllers(Thunderbolt and MFI based adapters)-
6936 *	driver/FW will operate in 32 bit consistent DMA addresses.
6937 */
6938static int
6939megasas_set_dma_mask(struct megasas_instance *instance)
6940{
6941	u64 consistent_mask;
6942	struct pci_dev *pdev;
6943	u32 scratch_pad_1;
6944
6945	pdev = instance->pdev;
6946	consistent_mask = (instance->adapter_type >= VENTURA_SERIES) ?
6947				DMA_BIT_MASK(63) : DMA_BIT_MASK(32);
6948
6949	if (IS_DMA64) {
6950		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(63)) &&
6951		    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6952			goto fail_set_dma_mask;
6953
6954		if ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) &&
6955		    (dma_set_coherent_mask(&pdev->dev, consistent_mask) &&
6956		     dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))) {
6957			/*
6958			 * If 32 bit DMA mask fails, then try for 64 bit mask
6959			 * for FW capable of handling 64 bit DMA.
6960			 */
6961			scratch_pad_1 = megasas_readl
6962				(instance, &instance->reg_set->outbound_scratch_pad_1);
6963
6964			if (!(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET))
6965				goto fail_set_dma_mask;
6966			else if (dma_set_mask_and_coherent(&pdev->dev,
6967							   DMA_BIT_MASK(63)))
6968				goto fail_set_dma_mask;
6969		}
6970	} else if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
6971		goto fail_set_dma_mask;
6972
6973	if (pdev->dev.coherent_dma_mask == DMA_BIT_MASK(32))
6974		instance->consistent_mask_64bit = false;
6975	else
6976		instance->consistent_mask_64bit = true;
6977
6978	dev_info(&pdev->dev, "%s bit DMA mask and %s bit consistent mask\n",
6979		 ((*pdev->dev.dma_mask == DMA_BIT_MASK(63)) ? "63" : "32"),
6980		 (instance->consistent_mask_64bit ? "63" : "32"));
6981
6982	return 0;
6983
6984fail_set_dma_mask:
6985	dev_err(&pdev->dev, "Failed to set DMA mask\n");
6986	return -1;
6987
6988}
6989
6990/*
6991 * megasas_set_adapter_type -	Set adapter type.
6992 *				Supported controllers can be divided in
6993 *				different categories-
6994 *					enum MR_ADAPTER_TYPE {
6995 *						MFI_SERIES = 1,
6996 *						THUNDERBOLT_SERIES = 2,
6997 *						INVADER_SERIES = 3,
6998 *						VENTURA_SERIES = 4,
6999 *						AERO_SERIES = 5,
7000 *					};
7001 * @instance:			Adapter soft state
7002 * return:			void
7003 */
7004static inline void megasas_set_adapter_type(struct megasas_instance *instance)
7005{
7006	if ((instance->pdev->vendor == PCI_VENDOR_ID_DELL) &&
7007	    (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5)) {
7008		instance->adapter_type = MFI_SERIES;
7009	} else {
7010		switch (instance->pdev->device) {
7011		case PCI_DEVICE_ID_LSI_AERO_10E1:
7012		case PCI_DEVICE_ID_LSI_AERO_10E2:
7013		case PCI_DEVICE_ID_LSI_AERO_10E5:
7014		case PCI_DEVICE_ID_LSI_AERO_10E6:
7015			instance->adapter_type = AERO_SERIES;
7016			break;
7017		case PCI_DEVICE_ID_LSI_VENTURA:
7018		case PCI_DEVICE_ID_LSI_CRUSADER:
7019		case PCI_DEVICE_ID_LSI_HARPOON:
7020		case PCI_DEVICE_ID_LSI_TOMCAT:
7021		case PCI_DEVICE_ID_LSI_VENTURA_4PORT:
7022		case PCI_DEVICE_ID_LSI_CRUSADER_4PORT:
7023			instance->adapter_type = VENTURA_SERIES;
7024			break;
7025		case PCI_DEVICE_ID_LSI_FUSION:
7026		case PCI_DEVICE_ID_LSI_PLASMA:
7027			instance->adapter_type = THUNDERBOLT_SERIES;
7028			break;
7029		case PCI_DEVICE_ID_LSI_INVADER:
7030		case PCI_DEVICE_ID_LSI_INTRUDER:
7031		case PCI_DEVICE_ID_LSI_INTRUDER_24:
7032		case PCI_DEVICE_ID_LSI_CUTLASS_52:
7033		case PCI_DEVICE_ID_LSI_CUTLASS_53:
7034		case PCI_DEVICE_ID_LSI_FURY:
7035			instance->adapter_type = INVADER_SERIES;
7036			break;
7037		default: /* For all other supported controllers */
7038			instance->adapter_type = MFI_SERIES;
7039			break;
7040		}
7041	}
7042}
7043
7044static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
7045{
7046	instance->producer = dma_alloc_coherent(&instance->pdev->dev,
7047			sizeof(u32), &instance->producer_h, GFP_KERNEL);
7048	instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
7049			sizeof(u32), &instance->consumer_h, GFP_KERNEL);
7050
7051	if (!instance->producer || !instance->consumer) {
7052		dev_err(&instance->pdev->dev,
7053			"Failed to allocate memory for producer, consumer\n");
7054		return -1;
7055	}
7056
7057	*instance->producer = 0;
7058	*instance->consumer = 0;
7059	return 0;
7060}
7061
7062/**
7063 * megasas_alloc_ctrl_mem -	Allocate per controller memory for core data
7064 *				structures which are not common across MFI
7065 *				adapters and fusion adapters.
7066 *				For MFI based adapters, allocate producer and
7067 *				consumer buffers. For fusion adapters, allocate
7068 *				memory for fusion context.
7069 * @instance:			Adapter soft state
7070 * return:			0 for SUCCESS
7071 */
7072static int megasas_alloc_ctrl_mem(struct megasas_instance *instance)
7073{
7074	instance->reply_map = kcalloc(nr_cpu_ids, sizeof(unsigned int),
7075				      GFP_KERNEL);
7076	if (!instance->reply_map)
7077		return -ENOMEM;
7078
7079	switch (instance->adapter_type) {
7080	case MFI_SERIES:
7081		if (megasas_alloc_mfi_ctrl_mem(instance))
7082			goto fail;
7083		break;
7084	case AERO_SERIES:
7085	case VENTURA_SERIES:
7086	case THUNDERBOLT_SERIES:
7087	case INVADER_SERIES:
7088		if (megasas_alloc_fusion_context(instance))
7089			goto fail;
7090		break;
7091	}
7092
7093	return 0;
7094 fail:
7095	kfree(instance->reply_map);
7096	instance->reply_map = NULL;
7097	return -ENOMEM;
7098}
7099
7100/*
7101 * megasas_free_ctrl_mem -	Free fusion context for fusion adapters and
7102 *				producer, consumer buffers for MFI adapters
7103 *
7104 * @instance -			Adapter soft instance
7105 *
7106 */
7107static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
7108{
7109	kfree(instance->reply_map);
7110	if (instance->adapter_type == MFI_SERIES) {
7111		if (instance->producer)
7112			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7113					    instance->producer,
7114					    instance->producer_h);
7115		if (instance->consumer)
7116			dma_free_coherent(&instance->pdev->dev, sizeof(u32),
7117					    instance->consumer,
7118					    instance->consumer_h);
7119	} else {
7120		megasas_free_fusion_context(instance);
7121	}
7122}
7123
7124/**
7125 * megasas_alloc_ctrl_dma_buffers -	Allocate consistent DMA buffers during
7126 *					driver load time
7127 *
7128 * @instance:				Adapter soft instance
7129 *
7130 * @return:				O for SUCCESS
7131 */
7132static inline
7133int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
7134{
7135	struct pci_dev *pdev = instance->pdev;
7136	struct fusion_context *fusion = instance->ctrl_context;
7137
7138	instance->evt_detail = dma_alloc_coherent(&pdev->dev,
7139			sizeof(struct megasas_evt_detail),
7140			&instance->evt_detail_h, GFP_KERNEL);
7141
7142	if (!instance->evt_detail) {
7143		dev_err(&instance->pdev->dev,
7144			"Failed to allocate event detail buffer\n");
7145		return -ENOMEM;
7146	}
7147
7148	if (fusion) {
7149		fusion->ioc_init_request =
7150			dma_alloc_coherent(&pdev->dev,
7151					   sizeof(struct MPI2_IOC_INIT_REQUEST),
7152					   &fusion->ioc_init_request_phys,
7153					   GFP_KERNEL);
7154
7155		if (!fusion->ioc_init_request) {
7156			dev_err(&pdev->dev,
7157				"Failed to allocate PD list buffer\n");
7158			return -ENOMEM;
7159		}
7160
7161		instance->snapdump_prop = dma_alloc_coherent(&pdev->dev,
7162				sizeof(struct MR_SNAPDUMP_PROPERTIES),
7163				&instance->snapdump_prop_h, GFP_KERNEL);
7164
7165		if (!instance->snapdump_prop)
7166			dev_err(&pdev->dev,
7167				"Failed to allocate snapdump properties buffer\n");
7168
7169		instance->host_device_list_buf = dma_alloc_coherent(&pdev->dev,
7170							HOST_DEVICE_LIST_SZ,
7171							&instance->host_device_list_buf_h,
7172							GFP_KERNEL);
7173
7174		if (!instance->host_device_list_buf) {
7175			dev_err(&pdev->dev,
7176				"Failed to allocate targetid list buffer\n");
7177			return -ENOMEM;
7178		}
7179
7180	}
7181
7182	instance->pd_list_buf =
7183		dma_alloc_coherent(&pdev->dev,
7184				     MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7185				     &instance->pd_list_buf_h, GFP_KERNEL);
7186
7187	if (!instance->pd_list_buf) {
7188		dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
7189		return -ENOMEM;
7190	}
7191
7192	instance->ctrl_info_buf =
7193		dma_alloc_coherent(&pdev->dev,
7194				     sizeof(struct megasas_ctrl_info),
7195				     &instance->ctrl_info_buf_h, GFP_KERNEL);
7196
7197	if (!instance->ctrl_info_buf) {
7198		dev_err(&pdev->dev,
7199			"Failed to allocate controller info buffer\n");
7200		return -ENOMEM;
7201	}
7202
7203	instance->ld_list_buf =
7204		dma_alloc_coherent(&pdev->dev,
7205				     sizeof(struct MR_LD_LIST),
7206				     &instance->ld_list_buf_h, GFP_KERNEL);
7207
7208	if (!instance->ld_list_buf) {
7209		dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
7210		return -ENOMEM;
7211	}
7212
7213	instance->ld_targetid_list_buf =
7214		dma_alloc_coherent(&pdev->dev,
7215				sizeof(struct MR_LD_TARGETID_LIST),
7216				&instance->ld_targetid_list_buf_h, GFP_KERNEL);
7217
7218	if (!instance->ld_targetid_list_buf) {
7219		dev_err(&pdev->dev,
7220			"Failed to allocate LD targetid list buffer\n");
7221		return -ENOMEM;
7222	}
7223
7224	if (!reset_devices) {
7225		instance->system_info_buf =
7226			dma_alloc_coherent(&pdev->dev,
7227					sizeof(struct MR_DRV_SYSTEM_INFO),
7228					&instance->system_info_h, GFP_KERNEL);
7229		instance->pd_info =
7230			dma_alloc_coherent(&pdev->dev,
7231					sizeof(struct MR_PD_INFO),
7232					&instance->pd_info_h, GFP_KERNEL);
7233		instance->tgt_prop =
7234			dma_alloc_coherent(&pdev->dev,
7235					sizeof(struct MR_TARGET_PROPERTIES),
7236					&instance->tgt_prop_h, GFP_KERNEL);
7237		instance->crash_dump_buf =
7238			dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7239					&instance->crash_dump_h, GFP_KERNEL);
7240
7241		if (!instance->system_info_buf)
7242			dev_err(&instance->pdev->dev,
7243				"Failed to allocate system info buffer\n");
7244
7245		if (!instance->pd_info)
7246			dev_err(&instance->pdev->dev,
7247				"Failed to allocate pd_info buffer\n");
7248
7249		if (!instance->tgt_prop)
7250			dev_err(&instance->pdev->dev,
7251				"Failed to allocate tgt_prop buffer\n");
7252
7253		if (!instance->crash_dump_buf)
7254			dev_err(&instance->pdev->dev,
7255				"Failed to allocate crash dump buffer\n");
7256	}
7257
7258	return 0;
7259}
7260
7261/*
7262 * megasas_free_ctrl_dma_buffers -	Free consistent DMA buffers allocated
7263 *					during driver load time
7264 *
7265 * @instance-				Adapter soft instance
7266 *
7267 */
7268static inline
7269void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
7270{
7271	struct pci_dev *pdev = instance->pdev;
7272	struct fusion_context *fusion = instance->ctrl_context;
7273
7274	if (instance->evt_detail)
7275		dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
7276				    instance->evt_detail,
7277				    instance->evt_detail_h);
7278
7279	if (fusion && fusion->ioc_init_request)
7280		dma_free_coherent(&pdev->dev,
7281				  sizeof(struct MPI2_IOC_INIT_REQUEST),
7282				  fusion->ioc_init_request,
7283				  fusion->ioc_init_request_phys);
7284
7285	if (instance->pd_list_buf)
7286		dma_free_coherent(&pdev->dev,
7287				    MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
7288				    instance->pd_list_buf,
7289				    instance->pd_list_buf_h);
7290
7291	if (instance->ld_list_buf)
7292		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
7293				    instance->ld_list_buf,
7294				    instance->ld_list_buf_h);
7295
7296	if (instance->ld_targetid_list_buf)
7297		dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
7298				    instance->ld_targetid_list_buf,
7299				    instance->ld_targetid_list_buf_h);
7300
7301	if (instance->ctrl_info_buf)
7302		dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
7303				    instance->ctrl_info_buf,
7304				    instance->ctrl_info_buf_h);
7305
7306	if (instance->system_info_buf)
7307		dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
7308				    instance->system_info_buf,
7309				    instance->system_info_h);
7310
7311	if (instance->pd_info)
7312		dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
7313				    instance->pd_info, instance->pd_info_h);
7314
7315	if (instance->tgt_prop)
7316		dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
7317				    instance->tgt_prop, instance->tgt_prop_h);
7318
7319	if (instance->crash_dump_buf)
7320		dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
7321				    instance->crash_dump_buf,
7322				    instance->crash_dump_h);
7323
7324	if (instance->snapdump_prop)
7325		dma_free_coherent(&pdev->dev,
7326				  sizeof(struct MR_SNAPDUMP_PROPERTIES),
7327				  instance->snapdump_prop,
7328				  instance->snapdump_prop_h);
7329
7330	if (instance->host_device_list_buf)
7331		dma_free_coherent(&pdev->dev,
7332				  HOST_DEVICE_LIST_SZ,
7333				  instance->host_device_list_buf,
7334				  instance->host_device_list_buf_h);
7335
7336}
7337
7338/*
7339 * megasas_init_ctrl_params -		Initialize controller's instance
7340 *					parameters before FW init
7341 * @instance -				Adapter soft instance
7342 * @return -				void
7343 */
7344static inline void megasas_init_ctrl_params(struct megasas_instance *instance)
7345{
7346	instance->fw_crash_state = UNAVAILABLE;
7347
7348	megasas_poll_wait_aen = 0;
7349	instance->issuepend_done = 1;
7350	atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
7351
7352	/*
7353	 * Initialize locks and queues
7354	 */
7355	INIT_LIST_HEAD(&instance->cmd_pool);
7356	INIT_LIST_HEAD(&instance->internal_reset_pending_q);
7357
7358	atomic_set(&instance->fw_outstanding, 0);
7359	atomic64_set(&instance->total_io_count, 0);
7360
7361	init_waitqueue_head(&instance->int_cmd_wait_q);
7362	init_waitqueue_head(&instance->abort_cmd_wait_q);
7363
7364	mutex_init(&instance->crashdump_lock);
7365	spin_lock_init(&instance->mfi_pool_lock);
7366	spin_lock_init(&instance->hba_lock);
7367	spin_lock_init(&instance->stream_lock);
7368	spin_lock_init(&instance->completion_lock);
7369
7370	mutex_init(&instance->reset_mutex);
7371
7372	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
7373	    (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY))
7374		instance->flag_ieee = 1;
7375
7376	megasas_dbg_lvl = 0;
7377	instance->flag = 0;
7378	instance->unload = 1;
7379	instance->last_time = 0;
7380	instance->disableOnlineCtrlReset = 1;
7381	instance->UnevenSpanSupport = 0;
7382	instance->smp_affinity_enable = smp_affinity_enable ? true : false;
7383	instance->msix_load_balance = false;
7384
7385	if (instance->adapter_type != MFI_SERIES)
7386		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
7387	else
7388		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
7389}
7390
7391/**
7392 * megasas_probe_one -	PCI hotplug entry point
7393 * @pdev:		PCI device structure
7394 * @id:			PCI ids of supported hotplugged adapter
7395 */
7396static int megasas_probe_one(struct pci_dev *pdev,
7397			     const struct pci_device_id *id)
7398{
7399	int rval, pos;
7400	struct Scsi_Host *host;
7401	struct megasas_instance *instance;
7402	u16 control = 0;
7403
7404	switch (pdev->device) {
7405	case PCI_DEVICE_ID_LSI_AERO_10E0:
7406	case PCI_DEVICE_ID_LSI_AERO_10E3:
7407	case PCI_DEVICE_ID_LSI_AERO_10E4:
7408	case PCI_DEVICE_ID_LSI_AERO_10E7:
7409		dev_err(&pdev->dev, "Adapter is in non secure mode\n");
7410		return 1;
7411	case PCI_DEVICE_ID_LSI_AERO_10E1:
7412	case PCI_DEVICE_ID_LSI_AERO_10E5:
7413		dev_info(&pdev->dev, "Adapter is in configurable secure mode\n");
7414		break;
7415	}
7416
7417	/* Reset MSI-X in the kdump kernel */
7418	if (reset_devices) {
7419		pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
7420		if (pos) {
7421			pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS,
7422					     &control);
7423			if (control & PCI_MSIX_FLAGS_ENABLE) {
7424				dev_info(&pdev->dev, "resetting MSI-X\n");
7425				pci_write_config_word(pdev,
7426						      pos + PCI_MSIX_FLAGS,
7427						      control &
7428						      ~PCI_MSIX_FLAGS_ENABLE);
7429			}
7430		}
7431	}
7432
7433	/*
7434	 * PCI prepping: enable device set bus mastering and dma mask
7435	 */
7436	rval = pci_enable_device_mem(pdev);
7437
7438	if (rval) {
7439		return rval;
7440	}
7441
7442	pci_set_master(pdev);
7443
7444	host = scsi_host_alloc(&megasas_template,
7445			       sizeof(struct megasas_instance));
7446
7447	if (!host) {
7448		dev_printk(KERN_DEBUG, &pdev->dev, "scsi_host_alloc failed\n");
7449		goto fail_alloc_instance;
7450	}
7451
7452	instance = (struct megasas_instance *)host->hostdata;
7453	memset(instance, 0, sizeof(*instance));
7454	atomic_set(&instance->fw_reset_no_pci_access, 0);
7455
7456	/*
7457	 * Initialize PCI related and misc parameters
7458	 */
7459	instance->pdev = pdev;
7460	instance->host = host;
7461	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
7462	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
7463
7464	megasas_set_adapter_type(instance);
7465
7466	/*
7467	 * Initialize MFI Firmware
7468	 */
7469	if (megasas_init_fw(instance))
7470		goto fail_init_mfi;
7471
7472	if (instance->requestorId) {
7473		if (instance->PlasmaFW111) {
7474			instance->vf_affiliation_111 =
7475				dma_alloc_coherent(&pdev->dev,
7476					sizeof(struct MR_LD_VF_AFFILIATION_111),
7477					&instance->vf_affiliation_111_h,
7478					GFP_KERNEL);
7479			if (!instance->vf_affiliation_111)
7480				dev_warn(&pdev->dev, "Can't allocate "
7481				       "memory for VF affiliation buffer\n");
7482		} else {
7483			instance->vf_affiliation =
7484				dma_alloc_coherent(&pdev->dev,
7485					(MAX_LOGICAL_DRIVES + 1) *
7486					sizeof(struct MR_LD_VF_AFFILIATION),
7487					&instance->vf_affiliation_h,
7488					GFP_KERNEL);
7489			if (!instance->vf_affiliation)
7490				dev_warn(&pdev->dev, "Can't allocate "
7491				       "memory for VF affiliation buffer\n");
7492		}
7493	}
7494
7495	/*
7496	 * Store instance in PCI softstate
7497	 */
7498	pci_set_drvdata(pdev, instance);
7499
7500	/*
7501	 * Add this controller to megasas_mgmt_info structure so that it
7502	 * can be exported to management applications
7503	 */
7504	megasas_mgmt_info.count++;
7505	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = instance;
7506	megasas_mgmt_info.max_index++;
7507
7508	/*
7509	 * Register with SCSI mid-layer
7510	 */
7511	if (megasas_io_attach(instance))
7512		goto fail_io_attach;
7513
7514	instance->unload = 0;
7515	/*
7516	 * Trigger SCSI to scan our drives
7517	 */
7518	if (!instance->enable_fw_dev_list ||
7519	    (instance->host_device_list_buf->count > 0))
7520		scsi_scan_host(host);
7521
7522	/*
7523	 * Initiate AEN (Asynchronous Event Notification)
7524	 */
7525	if (megasas_start_aen(instance)) {
7526		dev_printk(KERN_DEBUG, &pdev->dev, "start aen failed\n");
7527		goto fail_start_aen;
7528	}
7529
7530	megasas_setup_debugfs(instance);
7531
7532	/* Get current SR-IOV LD/VF affiliation */
7533	if (instance->requestorId)
7534		megasas_get_ld_vf_affiliation(instance, 1);
7535
7536	return 0;
7537
7538fail_start_aen:
7539	instance->unload = 1;
7540	scsi_remove_host(instance->host);
7541fail_io_attach:
7542	megasas_mgmt_info.count--;
7543	megasas_mgmt_info.max_index--;
7544	megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL;
7545
7546	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7547		del_timer_sync(&instance->sriov_heartbeat_timer);
7548
7549	instance->instancet->disable_intr(instance);
7550	megasas_destroy_irqs(instance);
7551
7552	if (instance->adapter_type != MFI_SERIES)
7553		megasas_release_fusion(instance);
7554	else
7555		megasas_release_mfi(instance);
7556
7557	if (instance->msix_vectors)
7558		pci_free_irq_vectors(instance->pdev);
7559	instance->msix_vectors = 0;
7560
7561	if (instance->fw_crash_state != UNAVAILABLE)
7562		megasas_free_host_crash_buffer(instance);
7563
7564	if (instance->adapter_type != MFI_SERIES)
7565		megasas_fusion_stop_watchdog(instance);
7566fail_init_mfi:
7567	scsi_host_put(host);
7568fail_alloc_instance:
7569	pci_disable_device(pdev);
7570
7571	return -ENODEV;
7572}
7573
7574/**
7575 * megasas_flush_cache -	Requests FW to flush all its caches
7576 * @instance:			Adapter soft state
7577 */
7578static void megasas_flush_cache(struct megasas_instance *instance)
7579{
7580	struct megasas_cmd *cmd;
7581	struct megasas_dcmd_frame *dcmd;
7582
7583	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7584		return;
7585
7586	cmd = megasas_get_cmd(instance);
7587
7588	if (!cmd)
7589		return;
7590
7591	dcmd = &cmd->frame->dcmd;
7592
7593	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7594
7595	dcmd->cmd = MFI_CMD_DCMD;
7596	dcmd->cmd_status = 0x0;
7597	dcmd->sge_count = 0;
7598	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7599	dcmd->timeout = 0;
7600	dcmd->pad_0 = 0;
7601	dcmd->data_xfer_len = 0;
7602	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH);
7603	dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
7604
7605	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7606			!= DCMD_SUCCESS) {
7607		dev_err(&instance->pdev->dev,
7608			"return from %s %d\n", __func__, __LINE__);
7609		return;
7610	}
7611
7612	megasas_return_cmd(instance, cmd);
7613}
7614
7615/**
7616 * megasas_shutdown_controller -	Instructs FW to shutdown the controller
7617 * @instance:				Adapter soft state
7618 * @opcode:				Shutdown/Hibernate
7619 */
7620static void megasas_shutdown_controller(struct megasas_instance *instance,
7621					u32 opcode)
7622{
7623	struct megasas_cmd *cmd;
7624	struct megasas_dcmd_frame *dcmd;
7625
7626	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
7627		return;
7628
7629	cmd = megasas_get_cmd(instance);
7630
7631	if (!cmd)
7632		return;
7633
7634	if (instance->aen_cmd)
7635		megasas_issue_blocked_abort_cmd(instance,
7636			instance->aen_cmd, MFI_IO_TIMEOUT_SECS);
7637	if (instance->map_update_cmd)
7638		megasas_issue_blocked_abort_cmd(instance,
7639			instance->map_update_cmd, MFI_IO_TIMEOUT_SECS);
7640	if (instance->jbod_seq_cmd)
7641		megasas_issue_blocked_abort_cmd(instance,
7642			instance->jbod_seq_cmd, MFI_IO_TIMEOUT_SECS);
7643
7644	dcmd = &cmd->frame->dcmd;
7645
7646	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
7647
7648	dcmd->cmd = MFI_CMD_DCMD;
7649	dcmd->cmd_status = 0x0;
7650	dcmd->sge_count = 0;
7651	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
7652	dcmd->timeout = 0;
7653	dcmd->pad_0 = 0;
7654	dcmd->data_xfer_len = 0;
7655	dcmd->opcode = cpu_to_le32(opcode);
7656
7657	if (megasas_issue_blocked_cmd(instance, cmd, MFI_IO_TIMEOUT_SECS)
7658			!= DCMD_SUCCESS) {
7659		dev_err(&instance->pdev->dev,
7660			"return from %s %d\n", __func__, __LINE__);
7661		return;
7662	}
7663
7664	megasas_return_cmd(instance, cmd);
7665}
7666
7667#ifdef CONFIG_PM
7668/**
7669 * megasas_suspend -	driver suspend entry point
7670 * @pdev:		PCI device structure
7671 * @state:		PCI power state to suspend routine
7672 */
7673static int
7674megasas_suspend(struct pci_dev *pdev, pm_message_t state)
7675{
7676	struct megasas_instance *instance;
7677
7678	instance = pci_get_drvdata(pdev);
7679
7680	if (!instance)
7681		return 0;
7682
7683	instance->unload = 1;
7684
7685	dev_info(&pdev->dev, "%s is called\n", __func__);
7686
7687	/* Shutdown SR-IOV heartbeat timer */
7688	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7689		del_timer_sync(&instance->sriov_heartbeat_timer);
7690
7691	/* Stop the FW fault detection watchdog */
7692	if (instance->adapter_type != MFI_SERIES)
7693		megasas_fusion_stop_watchdog(instance);
7694
7695	megasas_flush_cache(instance);
7696	megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
7697
7698	/* cancel the delayed work if this work still in queue */
7699	if (instance->ev != NULL) {
7700		struct megasas_aen_event *ev = instance->ev;
7701		cancel_delayed_work_sync(&ev->hotplug_work);
7702		instance->ev = NULL;
7703	}
7704
7705	tasklet_kill(&instance->isr_tasklet);
7706
7707	pci_set_drvdata(instance->pdev, instance);
7708	instance->instancet->disable_intr(instance);
7709
7710	megasas_destroy_irqs(instance);
7711
7712	if (instance->msix_vectors)
7713		pci_free_irq_vectors(instance->pdev);
7714
7715	pci_save_state(pdev);
7716	pci_disable_device(pdev);
7717
7718	pci_set_power_state(pdev, pci_choose_state(pdev, state));
7719
7720	return 0;
7721}
7722
7723/**
7724 * megasas_resume-      driver resume entry point
7725 * @pdev:               PCI device structure
7726 */
7727static int
7728megasas_resume(struct pci_dev *pdev)
7729{
7730	int rval;
7731	struct Scsi_Host *host;
7732	struct megasas_instance *instance;
7733	u32 status_reg;
7734
7735	instance = pci_get_drvdata(pdev);
7736
7737	if (!instance)
7738		return 0;
7739
7740	host = instance->host;
7741	pci_set_power_state(pdev, PCI_D0);
7742	pci_enable_wake(pdev, PCI_D0, 0);
7743	pci_restore_state(pdev);
7744
7745	dev_info(&pdev->dev, "%s is called\n", __func__);
7746	/*
7747	 * PCI prepping: enable device set bus mastering and dma mask
7748	 */
7749	rval = pci_enable_device_mem(pdev);
7750
7751	if (rval) {
7752		dev_err(&pdev->dev, "Enable device failed\n");
7753		return rval;
7754	}
7755
7756	pci_set_master(pdev);
7757
7758	/*
7759	 * We expect the FW state to be READY
7760	 */
7761
7762	if (megasas_transition_to_ready(instance, 0)) {
7763		dev_info(&instance->pdev->dev,
7764			 "Failed to transition controller to ready from %s!\n",
7765			 __func__);
7766		if (instance->adapter_type != MFI_SERIES) {
7767			status_reg =
7768				instance->instancet->read_fw_status_reg(instance);
7769			if (!(status_reg & MFI_RESET_ADAPTER) ||
7770				((megasas_adp_reset_wait_for_ready
7771				(instance, true, 0)) == FAILED))
7772				goto fail_ready_state;
7773		} else {
7774			atomic_set(&instance->fw_reset_no_pci_access, 1);
7775			instance->instancet->adp_reset
7776				(instance, instance->reg_set);
7777			atomic_set(&instance->fw_reset_no_pci_access, 0);
7778
7779			/* waiting for about 30 seconds before retry */
7780			ssleep(30);
7781
7782			if (megasas_transition_to_ready(instance, 0))
7783				goto fail_ready_state;
7784		}
7785
7786		dev_info(&instance->pdev->dev,
7787			 "FW restarted successfully from %s!\n",
7788			 __func__);
7789	}
7790	if (megasas_set_dma_mask(instance))
7791		goto fail_set_dma_mask;
7792
7793	/*
7794	 * Initialize MFI Firmware
7795	 */
7796
7797	atomic_set(&instance->fw_outstanding, 0);
7798	atomic_set(&instance->ldio_outstanding, 0);
7799
7800	/* Now re-enable MSI-X */
7801	if (instance->msix_vectors)
7802		megasas_alloc_irq_vectors(instance);
7803
7804	if (!instance->msix_vectors) {
7805		rval = pci_alloc_irq_vectors(instance->pdev, 1, 1,
7806					     PCI_IRQ_LEGACY);
7807		if (rval < 0)
7808			goto fail_reenable_msix;
7809	}
7810
7811	megasas_setup_reply_map(instance);
7812
7813	if (instance->adapter_type != MFI_SERIES) {
7814		megasas_reset_reply_desc(instance);
7815		if (megasas_ioc_init_fusion(instance)) {
7816			megasas_free_cmds(instance);
7817			megasas_free_cmds_fusion(instance);
7818			goto fail_init_mfi;
7819		}
7820		if (!megasas_get_map_info(instance))
7821			megasas_sync_map_info(instance);
7822	} else {
7823		*instance->producer = 0;
7824		*instance->consumer = 0;
7825		if (megasas_issue_init_mfi(instance))
7826			goto fail_init_mfi;
7827	}
7828
7829	if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
7830		goto fail_init_mfi;
7831
7832	tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
7833		     (unsigned long)instance);
7834
7835	if (instance->msix_vectors ?
7836			megasas_setup_irqs_msix(instance, 0) :
7837			megasas_setup_irqs_ioapic(instance))
7838		goto fail_init_mfi;
7839
7840	if (instance->adapter_type != MFI_SERIES)
7841		megasas_setup_irq_poll(instance);
7842
7843	/* Re-launch SR-IOV heartbeat timer */
7844	if (instance->requestorId) {
7845		if (!megasas_sriov_start_heartbeat(instance, 0))
7846			megasas_start_timer(instance);
7847		else {
7848			instance->skip_heartbeat_timer_del = 1;
7849			goto fail_init_mfi;
7850		}
7851	}
7852
7853	instance->instancet->enable_intr(instance);
7854	megasas_setup_jbod_map(instance);
7855	instance->unload = 0;
7856
7857	/*
7858	 * Initiate AEN (Asynchronous Event Notification)
7859	 */
7860	if (megasas_start_aen(instance))
7861		dev_err(&instance->pdev->dev, "Start AEN failed\n");
7862
7863	/* Re-launch FW fault watchdog */
7864	if (instance->adapter_type != MFI_SERIES)
7865		if (megasas_fusion_start_watchdog(instance) != SUCCESS)
7866			goto fail_start_watchdog;
7867
7868	return 0;
7869
7870fail_start_watchdog:
7871	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7872		del_timer_sync(&instance->sriov_heartbeat_timer);
7873fail_init_mfi:
7874	megasas_free_ctrl_dma_buffers(instance);
7875	megasas_free_ctrl_mem(instance);
7876	scsi_host_put(host);
7877
7878fail_reenable_msix:
7879fail_set_dma_mask:
7880fail_ready_state:
7881
7882	pci_disable_device(pdev);
7883
7884	return -ENODEV;
7885}
7886#else
7887#define megasas_suspend	NULL
7888#define megasas_resume	NULL
7889#endif
7890
7891static inline int
7892megasas_wait_for_adapter_operational(struct megasas_instance *instance)
7893{
7894	int wait_time = MEGASAS_RESET_WAIT_TIME * 2;
7895	int i;
7896	u8 adp_state;
7897
7898	for (i = 0; i < wait_time; i++) {
7899		adp_state = atomic_read(&instance->adprecovery);
7900		if ((adp_state == MEGASAS_HBA_OPERATIONAL) ||
7901		    (adp_state == MEGASAS_HW_CRITICAL_ERROR))
7902			break;
7903
7904		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL))
7905			dev_notice(&instance->pdev->dev, "waiting for controller reset to finish\n");
7906
7907		msleep(1000);
7908	}
7909
7910	if (adp_state != MEGASAS_HBA_OPERATIONAL) {
7911		dev_info(&instance->pdev->dev,
7912			 "%s HBA failed to become operational, adp_state %d\n",
7913			 __func__, adp_state);
7914		return 1;
7915	}
7916
7917	return 0;
7918}
7919
7920/**
7921 * megasas_detach_one -	PCI hot"un"plug entry point
7922 * @pdev:		PCI device structure
7923 */
7924static void megasas_detach_one(struct pci_dev *pdev)
7925{
7926	int i;
7927	struct Scsi_Host *host;
7928	struct megasas_instance *instance;
7929	struct fusion_context *fusion;
7930	u32 pd_seq_map_sz;
7931
7932	instance = pci_get_drvdata(pdev);
7933
7934	if (!instance)
7935		return;
7936
7937	host = instance->host;
7938	fusion = instance->ctrl_context;
7939
7940	/* Shutdown SR-IOV heartbeat timer */
7941	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
7942		del_timer_sync(&instance->sriov_heartbeat_timer);
7943
7944	/* Stop the FW fault detection watchdog */
7945	if (instance->adapter_type != MFI_SERIES)
7946		megasas_fusion_stop_watchdog(instance);
7947
7948	if (instance->fw_crash_state != UNAVAILABLE)
7949		megasas_free_host_crash_buffer(instance);
7950	scsi_remove_host(instance->host);
7951	instance->unload = 1;
7952
7953	if (megasas_wait_for_adapter_operational(instance))
7954		goto skip_firing_dcmds;
7955
7956	megasas_flush_cache(instance);
7957	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
7958
7959skip_firing_dcmds:
7960	/* cancel the delayed work if this work still in queue*/
7961	if (instance->ev != NULL) {
7962		struct megasas_aen_event *ev = instance->ev;
7963		cancel_delayed_work_sync(&ev->hotplug_work);
7964		instance->ev = NULL;
7965	}
7966
7967	/* cancel all wait events */
7968	wake_up_all(&instance->int_cmd_wait_q);
7969
7970	tasklet_kill(&instance->isr_tasklet);
7971
7972	/*
7973	 * Take the instance off the instance array. Note that we will not
7974	 * decrement the max_index. We let this array be sparse array
7975	 */
7976	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
7977		if (megasas_mgmt_info.instance[i] == instance) {
7978			megasas_mgmt_info.count--;
7979			megasas_mgmt_info.instance[i] = NULL;
7980
7981			break;
7982		}
7983	}
7984
7985	instance->instancet->disable_intr(instance);
7986
7987	megasas_destroy_irqs(instance);
7988
7989	if (instance->msix_vectors)
7990		pci_free_irq_vectors(instance->pdev);
7991
7992	if (instance->adapter_type >= VENTURA_SERIES) {
7993		for (i = 0; i < MAX_LOGICAL_DRIVES_EXT; ++i)
7994			kfree(fusion->stream_detect_by_ld[i]);
7995		kfree(fusion->stream_detect_by_ld);
7996		fusion->stream_detect_by_ld = NULL;
7997	}
7998
7999
8000	if (instance->adapter_type != MFI_SERIES) {
8001		megasas_release_fusion(instance);
8002			pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) +
8003				(sizeof(struct MR_PD_CFG_SEQ) *
8004					(MAX_PHYSICAL_DEVICES - 1));
8005		for (i = 0; i < 2 ; i++) {
8006			if (fusion->ld_map[i])
8007				dma_free_coherent(&instance->pdev->dev,
8008						  fusion->max_map_sz,
8009						  fusion->ld_map[i],
8010						  fusion->ld_map_phys[i]);
8011			if (fusion->ld_drv_map[i]) {
8012				if (is_vmalloc_addr(fusion->ld_drv_map[i]))
8013					vfree(fusion->ld_drv_map[i]);
8014				else
8015					free_pages((ulong)fusion->ld_drv_map[i],
8016						   fusion->drv_map_pages);
8017			}
8018
8019			if (fusion->pd_seq_sync[i])
8020				dma_free_coherent(&instance->pdev->dev,
8021					pd_seq_map_sz,
8022					fusion->pd_seq_sync[i],
8023					fusion->pd_seq_phys[i]);
8024		}
8025	} else {
8026		megasas_release_mfi(instance);
8027	}
8028
8029	if (instance->vf_affiliation)
8030		dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
8031				    sizeof(struct MR_LD_VF_AFFILIATION),
8032				    instance->vf_affiliation,
8033				    instance->vf_affiliation_h);
8034
8035	if (instance->vf_affiliation_111)
8036		dma_free_coherent(&pdev->dev,
8037				    sizeof(struct MR_LD_VF_AFFILIATION_111),
8038				    instance->vf_affiliation_111,
8039				    instance->vf_affiliation_111_h);
8040
8041	if (instance->hb_host_mem)
8042		dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
8043				    instance->hb_host_mem,
8044				    instance->hb_host_mem_h);
8045
8046	megasas_free_ctrl_dma_buffers(instance);
8047
8048	megasas_free_ctrl_mem(instance);
8049
8050	megasas_destroy_debugfs(instance);
8051
8052	scsi_host_put(host);
8053
8054	pci_disable_device(pdev);
8055}
8056
8057/**
8058 * megasas_shutdown -	Shutdown entry point
8059 * @pdev:		Generic device structure
8060 */
8061static void megasas_shutdown(struct pci_dev *pdev)
8062{
8063	struct megasas_instance *instance = pci_get_drvdata(pdev);
8064
8065	if (!instance)
8066		return;
8067
8068	instance->unload = 1;
8069
8070	if (megasas_wait_for_adapter_operational(instance))
8071		goto skip_firing_dcmds;
8072
8073	megasas_flush_cache(instance);
8074	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
8075
8076skip_firing_dcmds:
8077	instance->instancet->disable_intr(instance);
8078	megasas_destroy_irqs(instance);
8079
8080	if (instance->msix_vectors)
8081		pci_free_irq_vectors(instance->pdev);
8082}
8083
8084/*
8085 * megasas_mgmt_open -	char node "open" entry point
8086 * @inode:	char node inode
8087 * @filep:	char node file
8088 */
8089static int megasas_mgmt_open(struct inode *inode, struct file *filep)
8090{
8091	/*
8092	 * Allow only those users with admin rights
8093	 */
8094	if (!capable(CAP_SYS_ADMIN))
8095		return -EACCES;
8096
8097	return 0;
8098}
8099
8100/*
8101 * megasas_mgmt_fasync -	Async notifier registration from applications
8102 * @fd:		char node file descriptor number
8103 * @filep:	char node file
8104 * @mode:	notifier on/off
8105 *
8106 * This function adds the calling process to a driver global queue. When an
8107 * event occurs, SIGIO will be sent to all processes in this queue.
8108 */
8109static int megasas_mgmt_fasync(int fd, struct file *filep, int mode)
8110{
8111	int rc;
8112
8113	mutex_lock(&megasas_async_queue_mutex);
8114
8115	rc = fasync_helper(fd, filep, mode, &megasas_async_queue);
8116
8117	mutex_unlock(&megasas_async_queue_mutex);
8118
8119	if (rc >= 0) {
8120		/* For sanity check when we get ioctl */
8121		filep->private_data = filep;
8122		return 0;
8123	}
8124
8125	printk(KERN_DEBUG "megasas: fasync_helper failed [%d]\n", rc);
8126
8127	return rc;
8128}
8129
8130/*
8131 * megasas_mgmt_poll -  char node "poll" entry point
8132 * @filep:	char node file
8133 * @wait:	Events to poll for
8134 */
8135static __poll_t megasas_mgmt_poll(struct file *file, poll_table *wait)
8136{
8137	__poll_t mask;
8138	unsigned long flags;
8139
8140	poll_wait(file, &megasas_poll_wait, wait);
8141	spin_lock_irqsave(&poll_aen_lock, flags);
8142	if (megasas_poll_wait_aen)
8143		mask = (EPOLLIN | EPOLLRDNORM);
8144	else
8145		mask = 0;
8146	megasas_poll_wait_aen = 0;
8147	spin_unlock_irqrestore(&poll_aen_lock, flags);
8148	return mask;
8149}
8150
8151/*
8152 * megasas_set_crash_dump_params_ioctl:
8153 *		Send CRASH_DUMP_MODE DCMD to all controllers
8154 * @cmd:	MFI command frame
8155 */
8156
8157static int megasas_set_crash_dump_params_ioctl(struct megasas_cmd *cmd)
8158{
8159	struct megasas_instance *local_instance;
8160	int i, error = 0;
8161	int crash_support;
8162
8163	crash_support = cmd->frame->dcmd.mbox.w[0];
8164
8165	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
8166		local_instance = megasas_mgmt_info.instance[i];
8167		if (local_instance && local_instance->crash_dump_drv_support) {
8168			if ((atomic_read(&local_instance->adprecovery) ==
8169				MEGASAS_HBA_OPERATIONAL) &&
8170				!megasas_set_crash_dump_params(local_instance,
8171					crash_support)) {
8172				local_instance->crash_dump_app_support =
8173					crash_support;
8174				dev_info(&local_instance->pdev->dev,
8175					"Application firmware crash "
8176					"dump mode set success\n");
8177				error = 0;
8178			} else {
8179				dev_info(&local_instance->pdev->dev,
8180					"Application firmware crash "
8181					"dump mode set failed\n");
8182				error = -1;
8183			}
8184		}
8185	}
8186	return error;
8187}
8188
8189/**
8190 * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
8191 * @instance:			Adapter soft state
8192 * @user_ioc:			User's ioctl packet
8193 * @ioc:			ioctl packet
8194 */
8195static int
8196megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
8197		      struct megasas_iocpacket __user * user_ioc,
8198		      struct megasas_iocpacket *ioc)
8199{
8200	struct megasas_sge64 *kern_sge64 = NULL;
8201	struct megasas_sge32 *kern_sge32 = NULL;
8202	struct megasas_cmd *cmd;
8203	void *kbuff_arr[MAX_IOCTL_SGE];
8204	dma_addr_t buf_handle = 0;
8205	int error = 0, i;
8206	void *sense = NULL;
8207	dma_addr_t sense_handle;
8208	void *sense_ptr;
8209	u32 opcode = 0;
8210	int ret = DCMD_SUCCESS;
8211
8212	memset(kbuff_arr, 0, sizeof(kbuff_arr));
8213
8214	if (ioc->sge_count > MAX_IOCTL_SGE) {
8215		dev_printk(KERN_DEBUG, &instance->pdev->dev, "SGE count [%d] >  max limit [%d]\n",
8216		       ioc->sge_count, MAX_IOCTL_SGE);
8217		return -EINVAL;
8218	}
8219
8220	if ((ioc->frame.hdr.cmd >= MFI_CMD_OP_COUNT) ||
8221	    ((ioc->frame.hdr.cmd == MFI_CMD_NVME) &&
8222	    !instance->support_nvme_passthru) ||
8223	    ((ioc->frame.hdr.cmd == MFI_CMD_TOOLBOX) &&
8224	    !instance->support_pci_lane_margining)) {
8225		dev_err(&instance->pdev->dev,
8226			"Received invalid ioctl command 0x%x\n",
8227			ioc->frame.hdr.cmd);
8228		return -ENOTSUPP;
8229	}
8230
8231	cmd = megasas_get_cmd(instance);
8232	if (!cmd) {
8233		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get a cmd packet\n");
8234		return -ENOMEM;
8235	}
8236
8237	/*
8238	 * User's IOCTL packet has 2 frames (maximum). Copy those two
8239	 * frames into our cmd's frames. cmd->frame's context will get
8240	 * overwritten when we copy from user's frames. So set that value
8241	 * alone separately
8242	 */
8243	memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
8244	cmd->frame->hdr.context = cpu_to_le32(cmd->index);
8245	cmd->frame->hdr.pad_0 = 0;
8246
8247	cmd->frame->hdr.flags &= (~MFI_FRAME_IEEE);
8248
8249	if (instance->consistent_mask_64bit)
8250		cmd->frame->hdr.flags |= cpu_to_le16((MFI_FRAME_SGL64 |
8251				       MFI_FRAME_SENSE64));
8252	else
8253		cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_SGL64 |
8254					       MFI_FRAME_SENSE64));
8255
8256	if (cmd->frame->hdr.cmd == MFI_CMD_DCMD)
8257		opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
8258
8259	if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
8260		mutex_lock(&instance->reset_mutex);
8261		if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS) {
8262			megasas_return_cmd(instance, cmd);
8263			mutex_unlock(&instance->reset_mutex);
8264			return -1;
8265		}
8266		mutex_unlock(&instance->reset_mutex);
8267	}
8268
8269	if (opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
8270		error = megasas_set_crash_dump_params_ioctl(cmd);
8271		megasas_return_cmd(instance, cmd);
8272		return error;
8273	}
8274
8275	/*
8276	 * The management interface between applications and the fw uses
8277	 * MFI frames. E.g, RAID configuration changes, LD property changes
8278	 * etc are accomplishes through different kinds of MFI frames. The
8279	 * driver needs to care only about substituting user buffers with
8280	 * kernel buffers in SGLs. The location of SGL is embedded in the
8281	 * struct iocpacket itself.
8282	 */
8283	if (instance->consistent_mask_64bit)
8284		kern_sge64 = (struct megasas_sge64 *)
8285			((unsigned long)cmd->frame + ioc->sgl_off);
8286	else
8287		kern_sge32 = (struct megasas_sge32 *)
8288			((unsigned long)cmd->frame + ioc->sgl_off);
8289
8290	/*
8291	 * For each user buffer, create a mirror buffer and copy in
8292	 */
8293	for (i = 0; i < ioc->sge_count; i++) {
8294		if (!ioc->sgl[i].iov_len)
8295			continue;
8296
8297		kbuff_arr[i] = dma_alloc_coherent(&instance->pdev->dev,
8298						    ioc->sgl[i].iov_len,
8299						    &buf_handle, GFP_KERNEL);
8300		if (!kbuff_arr[i]) {
8301			dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc "
8302			       "kernel SGL buffer for IOCTL\n");
8303			error = -ENOMEM;
8304			goto out;
8305		}
8306
8307		/*
8308		 * We don't change the dma_coherent_mask, so
8309		 * dma_alloc_coherent only returns 32bit addresses
8310		 */
8311		if (instance->consistent_mask_64bit) {
8312			kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
8313			kern_sge64[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8314		} else {
8315			kern_sge32[i].phys_addr = cpu_to_le32(buf_handle);
8316			kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len);
8317		}
8318
8319		/*
8320		 * We created a kernel buffer corresponding to the
8321		 * user buffer. Now copy in from the user buffer
8322		 */
8323		if (copy_from_user(kbuff_arr[i], ioc->sgl[i].iov_base,
8324				   (u32) (ioc->sgl[i].iov_len))) {
8325			error = -EFAULT;
8326			goto out;
8327		}
8328	}
8329
8330	if (ioc->sense_len) {
8331		/* make sure the pointer is part of the frame */
8332		if (ioc->sense_off >
8333		    (sizeof(union megasas_frame) - sizeof(__le64))) {
8334			error = -EINVAL;
8335			goto out;
8336		}
8337
8338		sense = dma_alloc_coherent(&instance->pdev->dev, ioc->sense_len,
8339					     &sense_handle, GFP_KERNEL);
8340		if (!sense) {
8341			error = -ENOMEM;
8342			goto out;
8343		}
8344
8345		/* always store 64 bits regardless of addressing */
8346		sense_ptr = (void *)cmd->frame + ioc->sense_off;
8347		put_unaligned_le64(sense_handle, sense_ptr);
8348	}
8349
8350	/*
8351	 * Set the sync_cmd flag so that the ISR knows not to complete this
8352	 * cmd to the SCSI mid-layer
8353	 */
8354	cmd->sync_cmd = 1;
8355
8356	ret = megasas_issue_blocked_cmd(instance, cmd, 0);
8357	switch (ret) {
8358	case DCMD_INIT:
8359	case DCMD_BUSY:
8360		cmd->sync_cmd = 0;
8361		dev_err(&instance->pdev->dev,
8362			"return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n",
8363			 __func__, __LINE__, cmd->frame->hdr.cmd, opcode,
8364			 cmd->cmd_status_drv);
8365		error = -EBUSY;
8366		goto out;
8367	}
8368
8369	cmd->sync_cmd = 0;
8370
8371	if (instance->unload == 1) {
8372		dev_info(&instance->pdev->dev, "Driver unload is in progress "
8373			"don't submit data to application\n");
8374		goto out;
8375	}
8376	/*
8377	 * copy out the kernel buffers to user buffers
8378	 */
8379	for (i = 0; i < ioc->sge_count; i++) {
8380		if (copy_to_user(ioc->sgl[i].iov_base, kbuff_arr[i],
8381				 ioc->sgl[i].iov_len)) {
8382			error = -EFAULT;
8383			goto out;
8384		}
8385	}
8386
8387	/*
8388	 * copy out the sense
8389	 */
8390	if (ioc->sense_len) {
8391		/*
8392		 * sense_ptr points to the location that has the user
8393		 * sense buffer address
8394		 */
8395		sense_ptr = (unsigned long *) ((unsigned long)ioc->frame.raw +
8396				ioc->sense_off);
8397
8398		if (copy_to_user((void __user *)((unsigned long)
8399				 get_unaligned((unsigned long *)sense_ptr)),
8400				 sense, ioc->sense_len)) {
8401			dev_err(&instance->pdev->dev, "Failed to copy out to user "
8402					"sense data\n");
8403			error = -EFAULT;
8404			goto out;
8405		}
8406	}
8407
8408	/*
8409	 * copy the status codes returned by the fw
8410	 */
8411	if (copy_to_user(&user_ioc->frame.hdr.cmd_status,
8412			 &cmd->frame->hdr.cmd_status, sizeof(u8))) {
8413		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error copying out cmd_status\n");
8414		error = -EFAULT;
8415	}
8416
8417out:
8418	if (sense) {
8419		dma_free_coherent(&instance->pdev->dev, ioc->sense_len,
8420				    sense, sense_handle);
8421	}
8422
8423	for (i = 0; i < ioc->sge_count; i++) {
8424		if (kbuff_arr[i]) {
8425			if (instance->consistent_mask_64bit)
8426				dma_free_coherent(&instance->pdev->dev,
8427					le32_to_cpu(kern_sge64[i].length),
8428					kbuff_arr[i],
8429					le64_to_cpu(kern_sge64[i].phys_addr));
8430			else
8431				dma_free_coherent(&instance->pdev->dev,
8432					le32_to_cpu(kern_sge32[i].length),
8433					kbuff_arr[i],
8434					le32_to_cpu(kern_sge32[i].phys_addr));
8435			kbuff_arr[i] = NULL;
8436		}
8437	}
8438
8439	megasas_return_cmd(instance, cmd);
8440	return error;
8441}
8442
8443static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg)
8444{
8445	struct megasas_iocpacket __user *user_ioc =
8446	    (struct megasas_iocpacket __user *)arg;
8447	struct megasas_iocpacket *ioc;
8448	struct megasas_instance *instance;
8449	int error;
8450
8451	ioc = memdup_user(user_ioc, sizeof(*ioc));
8452	if (IS_ERR(ioc))
8453		return PTR_ERR(ioc);
8454
8455	instance = megasas_lookup_instance(ioc->host_no);
8456	if (!instance) {
8457		error = -ENODEV;
8458		goto out_kfree_ioc;
8459	}
8460
8461	/* Block ioctls in VF mode */
8462	if (instance->requestorId && !allow_vf_ioctls) {
8463		error = -ENODEV;
8464		goto out_kfree_ioc;
8465	}
8466
8467	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8468		dev_err(&instance->pdev->dev, "Controller in crit error\n");
8469		error = -ENODEV;
8470		goto out_kfree_ioc;
8471	}
8472
8473	if (instance->unload == 1) {
8474		error = -ENODEV;
8475		goto out_kfree_ioc;
8476	}
8477
8478	if (down_interruptible(&instance->ioctl_sem)) {
8479		error = -ERESTARTSYS;
8480		goto out_kfree_ioc;
8481	}
8482
8483	if  (megasas_wait_for_adapter_operational(instance)) {
8484		error = -ENODEV;
8485		goto out_up;
8486	}
8487
8488	error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc);
8489out_up:
8490	up(&instance->ioctl_sem);
8491
8492out_kfree_ioc:
8493	kfree(ioc);
8494	return error;
8495}
8496
8497static int megasas_mgmt_ioctl_aen(struct file *file, unsigned long arg)
8498{
8499	struct megasas_instance *instance;
8500	struct megasas_aen aen;
8501	int error;
8502
8503	if (file->private_data != file) {
8504		printk(KERN_DEBUG "megasas: fasync_helper was not "
8505		       "called first\n");
8506		return -EINVAL;
8507	}
8508
8509	if (copy_from_user(&aen, (void __user *)arg, sizeof(aen)))
8510		return -EFAULT;
8511
8512	instance = megasas_lookup_instance(aen.host_no);
8513
8514	if (!instance)
8515		return -ENODEV;
8516
8517	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
8518		return -ENODEV;
8519	}
8520
8521	if (instance->unload == 1) {
8522		return -ENODEV;
8523	}
8524
8525	if  (megasas_wait_for_adapter_operational(instance))
8526		return -ENODEV;
8527
8528	mutex_lock(&instance->reset_mutex);
8529	error = megasas_register_aen(instance, aen.seq_num,
8530				     aen.class_locale_word);
8531	mutex_unlock(&instance->reset_mutex);
8532	return error;
8533}
8534
8535/**
8536 * megasas_mgmt_ioctl -	char node ioctl entry point
8537 * @file:	char device file pointer
8538 * @cmd:	ioctl command
8539 * @arg:	ioctl command arguments address
8540 */
8541static long
8542megasas_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
8543{
8544	switch (cmd) {
8545	case MEGASAS_IOC_FIRMWARE:
8546		return megasas_mgmt_ioctl_fw(file, arg);
8547
8548	case MEGASAS_IOC_GET_AEN:
8549		return megasas_mgmt_ioctl_aen(file, arg);
8550	}
8551
8552	return -ENOTTY;
8553}
8554
8555#ifdef CONFIG_COMPAT
8556static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
8557{
8558	struct compat_megasas_iocpacket __user *cioc =
8559	    (struct compat_megasas_iocpacket __user *)arg;
8560	struct megasas_iocpacket __user *ioc =
8561	    compat_alloc_user_space(sizeof(struct megasas_iocpacket));
8562	int i;
8563	int error = 0;
8564	compat_uptr_t ptr;
8565	u32 local_sense_off;
8566	u32 local_sense_len;
8567	u32 user_sense_off;
8568
8569	if (clear_user(ioc, sizeof(*ioc)))
8570		return -EFAULT;
8571
8572	if (copy_in_user(&ioc->host_no, &cioc->host_no, sizeof(u16)) ||
8573	    copy_in_user(&ioc->sgl_off, &cioc->sgl_off, sizeof(u32)) ||
8574	    copy_in_user(&ioc->sense_off, &cioc->sense_off, sizeof(u32)) ||
8575	    copy_in_user(&ioc->sense_len, &cioc->sense_len, sizeof(u32)) ||
8576	    copy_in_user(ioc->frame.raw, cioc->frame.raw, 128) ||
8577	    copy_in_user(&ioc->sge_count, &cioc->sge_count, sizeof(u32)))
8578		return -EFAULT;
8579
8580	/*
8581	 * The sense_ptr is used in megasas_mgmt_fw_ioctl only when
8582	 * sense_len is not null, so prepare the 64bit value under
8583	 * the same condition.
8584	 */
8585	if (get_user(local_sense_off, &ioc->sense_off) ||
8586		get_user(local_sense_len, &ioc->sense_len) ||
8587		get_user(user_sense_off, &cioc->sense_off))
8588		return -EFAULT;
8589
8590	if (local_sense_off != user_sense_off)
8591		return -EINVAL;
8592
8593	if (local_sense_len) {
8594		void __user **sense_ioc_ptr =
8595			(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
8596		compat_uptr_t *sense_cioc_ptr =
8597			(compat_uptr_t *)(((unsigned long)&cioc->frame.raw) + user_sense_off);
8598		if (get_user(ptr, sense_cioc_ptr) ||
8599		    put_user(compat_ptr(ptr), sense_ioc_ptr))
8600			return -EFAULT;
8601	}
8602
8603	for (i = 0; i < MAX_IOCTL_SGE; i++) {
8604		if (get_user(ptr, &cioc->sgl[i].iov_base) ||
8605		    put_user(compat_ptr(ptr), &ioc->sgl[i].iov_base) ||
8606		    copy_in_user(&ioc->sgl[i].iov_len,
8607				 &cioc->sgl[i].iov_len, sizeof(compat_size_t)))
8608			return -EFAULT;
8609	}
8610
8611	error = megasas_mgmt_ioctl_fw(file, (unsigned long)ioc);
8612
8613	if (copy_in_user(&cioc->frame.hdr.cmd_status,
8614			 &ioc->frame.hdr.cmd_status, sizeof(u8))) {
8615		printk(KERN_DEBUG "megasas: error copy_in_user cmd_status\n");
8616		return -EFAULT;
8617	}
8618	return error;
8619}
8620
8621static long
8622megasas_mgmt_compat_ioctl(struct file *file, unsigned int cmd,
8623			  unsigned long arg)
8624{
8625	switch (cmd) {
8626	case MEGASAS_IOC_FIRMWARE32:
8627		return megasas_mgmt_compat_ioctl_fw(file, arg);
8628	case MEGASAS_IOC_GET_AEN:
8629		return megasas_mgmt_ioctl_aen(file, arg);
8630	}
8631
8632	return -ENOTTY;
8633}
8634#endif
8635
8636/*
8637 * File operations structure for management interface
8638 */
8639static const struct file_operations megasas_mgmt_fops = {
8640	.owner = THIS_MODULE,
8641	.open = megasas_mgmt_open,
8642	.fasync = megasas_mgmt_fasync,
8643	.unlocked_ioctl = megasas_mgmt_ioctl,
8644	.poll = megasas_mgmt_poll,
8645#ifdef CONFIG_COMPAT
8646	.compat_ioctl = megasas_mgmt_compat_ioctl,
8647#endif
8648	.llseek = noop_llseek,
8649};
8650
8651/*
8652 * PCI hotplug support registration structure
8653 */
8654static struct pci_driver megasas_pci_driver = {
8655
8656	.name = "megaraid_sas",
8657	.id_table = megasas_pci_table,
8658	.probe = megasas_probe_one,
8659	.remove = megasas_detach_one,
8660	.suspend = megasas_suspend,
8661	.resume = megasas_resume,
8662	.shutdown = megasas_shutdown,
8663};
8664
8665/*
8666 * Sysfs driver attributes
8667 */
8668static ssize_t version_show(struct device_driver *dd, char *buf)
8669{
8670	return snprintf(buf, strlen(MEGASAS_VERSION) + 2, "%s\n",
8671			MEGASAS_VERSION);
8672}
8673static DRIVER_ATTR_RO(version);
8674
8675static ssize_t release_date_show(struct device_driver *dd, char *buf)
8676{
8677	return snprintf(buf, strlen(MEGASAS_RELDATE) + 2, "%s\n",
8678		MEGASAS_RELDATE);
8679}
8680static DRIVER_ATTR_RO(release_date);
8681
8682static ssize_t support_poll_for_event_show(struct device_driver *dd, char *buf)
8683{
8684	return sprintf(buf, "%u\n", support_poll_for_event);
8685}
8686static DRIVER_ATTR_RO(support_poll_for_event);
8687
8688static ssize_t support_device_change_show(struct device_driver *dd, char *buf)
8689{
8690	return sprintf(buf, "%u\n", support_device_change);
8691}
8692static DRIVER_ATTR_RO(support_device_change);
8693
8694static ssize_t dbg_lvl_show(struct device_driver *dd, char *buf)
8695{
8696	return sprintf(buf, "%u\n", megasas_dbg_lvl);
8697}
8698
8699static ssize_t dbg_lvl_store(struct device_driver *dd, const char *buf,
8700			     size_t count)
8701{
8702	int retval = count;
8703
8704	if (sscanf(buf, "%u", &megasas_dbg_lvl) < 1) {
8705		printk(KERN_ERR "megasas: could not set dbg_lvl\n");
8706		retval = -EINVAL;
8707	}
8708	return retval;
8709}
8710static DRIVER_ATTR_RW(dbg_lvl);
8711
8712static ssize_t
8713support_nvme_encapsulation_show(struct device_driver *dd, char *buf)
8714{
8715	return sprintf(buf, "%u\n", support_nvme_encapsulation);
8716}
8717
8718static DRIVER_ATTR_RO(support_nvme_encapsulation);
8719
8720static ssize_t
8721support_pci_lane_margining_show(struct device_driver *dd, char *buf)
8722{
8723	return sprintf(buf, "%u\n", support_pci_lane_margining);
8724}
8725
8726static DRIVER_ATTR_RO(support_pci_lane_margining);
8727
8728static inline void megasas_remove_scsi_device(struct scsi_device *sdev)
8729{
8730	sdev_printk(KERN_INFO, sdev, "SCSI device is removed\n");
8731	scsi_remove_device(sdev);
8732	scsi_device_put(sdev);
8733}
8734
8735/**
8736 * megasas_update_device_list -	Update the PD and LD device list from FW
8737 *				after an AEN event notification
8738 * @instance:			Adapter soft state
8739 * @event_type:			Indicates type of event (PD or LD event)
8740 *
8741 * @return:			Success or failure
8742 *
8743 * Issue DCMDs to Firmware to update the internal device list in driver.
8744 * Based on the FW support, driver sends the HOST_DEVICE_LIST or combination
8745 * of PD_LIST/LD_LIST_QUERY DCMDs to get the device list.
8746 */
8747static
8748int megasas_update_device_list(struct megasas_instance *instance,
8749			       int event_type)
8750{
8751	int dcmd_ret = DCMD_SUCCESS;
8752
8753	if (instance->enable_fw_dev_list) {
8754		dcmd_ret = megasas_host_device_list_query(instance, false);
8755		if (dcmd_ret != DCMD_SUCCESS)
8756			goto out;
8757	} else {
8758		if (event_type & SCAN_PD_CHANNEL) {
8759			dcmd_ret = megasas_get_pd_list(instance);
8760
8761			if (dcmd_ret != DCMD_SUCCESS)
8762				goto out;
8763		}
8764
8765		if (event_type & SCAN_VD_CHANNEL) {
8766			if (!instance->requestorId ||
8767			    (instance->requestorId &&
8768			     megasas_get_ld_vf_affiliation(instance, 0))) {
8769				dcmd_ret = megasas_ld_list_query(instance,
8770						MR_LD_QUERY_TYPE_EXPOSED_TO_HOST);
8771				if (dcmd_ret != DCMD_SUCCESS)
8772					goto out;
8773			}
8774		}
8775	}
8776
8777out:
8778	return dcmd_ret;
8779}
8780
8781/**
8782 * megasas_add_remove_devices -	Add/remove devices to SCSI mid-layer
8783 *				after an AEN event notification
8784 * @instance:			Adapter soft state
8785 * @scan_type:			Indicates type of devices (PD/LD) to add
8786 * @return			void
8787 */
8788static
8789void megasas_add_remove_devices(struct megasas_instance *instance,
8790				int scan_type)
8791{
8792	int i, j;
8793	u16 pd_index = 0;
8794	u16 ld_index = 0;
8795	u16 channel = 0, id = 0;
8796	struct Scsi_Host *host;
8797	struct scsi_device *sdev1;
8798	struct MR_HOST_DEVICE_LIST *targetid_list = NULL;
8799	struct MR_HOST_DEVICE_LIST_ENTRY *targetid_entry = NULL;
8800
8801	host = instance->host;
8802
8803	if (instance->enable_fw_dev_list) {
8804		targetid_list = instance->host_device_list_buf;
8805		for (i = 0; i < targetid_list->count; i++) {
8806			targetid_entry = &targetid_list->host_device_list[i];
8807			if (targetid_entry->flags.u.bits.is_sys_pd) {
8808				channel = le16_to_cpu(targetid_entry->target_id) /
8809						MEGASAS_MAX_DEV_PER_CHANNEL;
8810				id = le16_to_cpu(targetid_entry->target_id) %
8811						MEGASAS_MAX_DEV_PER_CHANNEL;
8812			} else {
8813				channel = MEGASAS_MAX_PD_CHANNELS +
8814					  (le16_to_cpu(targetid_entry->target_id) /
8815					   MEGASAS_MAX_DEV_PER_CHANNEL);
8816				id = le16_to_cpu(targetid_entry->target_id) %
8817						MEGASAS_MAX_DEV_PER_CHANNEL;
8818			}
8819			sdev1 = scsi_device_lookup(host, channel, id, 0);
8820			if (!sdev1) {
8821				scsi_add_device(host, channel, id, 0);
8822			} else {
8823				scsi_device_put(sdev1);
8824			}
8825		}
8826	}
8827
8828	if (scan_type & SCAN_PD_CHANNEL) {
8829		for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) {
8830			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8831				pd_index = i * MEGASAS_MAX_DEV_PER_CHANNEL + j;
8832				sdev1 = scsi_device_lookup(host, i, j, 0);
8833				if (instance->pd_list[pd_index].driveState ==
8834							MR_PD_STATE_SYSTEM) {
8835					if (!sdev1)
8836						scsi_add_device(host, i, j, 0);
8837					else
8838						scsi_device_put(sdev1);
8839				} else {
8840					if (sdev1)
8841						megasas_remove_scsi_device(sdev1);
8842				}
8843			}
8844		}
8845	}
8846
8847	if (scan_type & SCAN_VD_CHANNEL) {
8848		for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) {
8849			for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) {
8850				ld_index = (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j;
8851				sdev1 = scsi_device_lookup(host,
8852						MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8853				if (instance->ld_ids[ld_index] != 0xff) {
8854					if (!sdev1)
8855						scsi_add_device(host, MEGASAS_MAX_PD_CHANNELS + i, j, 0);
8856					else
8857						scsi_device_put(sdev1);
8858				} else {
8859					if (sdev1)
8860						megasas_remove_scsi_device(sdev1);
8861				}
8862			}
8863		}
8864	}
8865
8866}
8867
8868static void
8869megasas_aen_polling(struct work_struct *work)
8870{
8871	struct megasas_aen_event *ev =
8872		container_of(work, struct megasas_aen_event, hotplug_work.work);
8873	struct megasas_instance *instance = ev->instance;
8874	union megasas_evt_class_locale class_locale;
8875	int event_type = 0;
8876	u32 seq_num;
8877	u16 ld_target_id;
8878	int error;
8879	u8  dcmd_ret = DCMD_SUCCESS;
8880	struct scsi_device *sdev1;
8881
8882	if (!instance) {
8883		printk(KERN_ERR "invalid instance!\n");
8884		kfree(ev);
8885		return;
8886	}
8887
8888	/* Don't run the event workqueue thread if OCR is running */
8889	mutex_lock(&instance->reset_mutex);
8890
8891	instance->ev = NULL;
8892	if (instance->evt_detail) {
8893		megasas_decode_evt(instance);
8894
8895		switch (le32_to_cpu(instance->evt_detail->code)) {
8896
8897		case MR_EVT_PD_INSERTED:
8898		case MR_EVT_PD_REMOVED:
8899			event_type = SCAN_PD_CHANNEL;
8900			break;
8901
8902		case MR_EVT_LD_OFFLINE:
8903		case MR_EVT_LD_DELETED:
8904			ld_target_id = instance->evt_detail->args.ld.target_id;
8905			sdev1 = scsi_device_lookup(instance->host,
8906						   MEGASAS_MAX_PD_CHANNELS +
8907						   (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL),
8908						   (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL),
8909						   0);
8910			if (sdev1)
8911				megasas_remove_scsi_device(sdev1);
8912
8913			event_type = SCAN_VD_CHANNEL;
8914			break;
8915		case MR_EVT_LD_CREATED:
8916			event_type = SCAN_VD_CHANNEL;
8917			break;
8918
8919		case MR_EVT_CFG_CLEARED:
8920		case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED:
8921		case MR_EVT_FOREIGN_CFG_IMPORTED:
8922		case MR_EVT_LD_STATE_CHANGE:
8923			event_type = SCAN_PD_CHANNEL | SCAN_VD_CHANNEL;
8924			dev_info(&instance->pdev->dev, "scanning for scsi%d...\n",
8925				instance->host->host_no);
8926			break;
8927
8928		case MR_EVT_CTRL_PROP_CHANGED:
8929			dcmd_ret = megasas_get_ctrl_info(instance);
8930			if (dcmd_ret == DCMD_SUCCESS &&
8931			    instance->snapdump_wait_time) {
8932				megasas_get_snapdump_properties(instance);
8933				dev_info(&instance->pdev->dev,
8934					 "Snap dump wait time\t: %d\n",
8935					 instance->snapdump_wait_time);
8936			}
8937			break;
8938		default:
8939			event_type = 0;
8940			break;
8941		}
8942	} else {
8943		dev_err(&instance->pdev->dev, "invalid evt_detail!\n");
8944		mutex_unlock(&instance->reset_mutex);
8945		kfree(ev);
8946		return;
8947	}
8948
8949	if (event_type)
8950		dcmd_ret = megasas_update_device_list(instance, event_type);
8951
8952	mutex_unlock(&instance->reset_mutex);
8953
8954	if (event_type && dcmd_ret == DCMD_SUCCESS)
8955		megasas_add_remove_devices(instance, event_type);
8956
8957	if (dcmd_ret == DCMD_SUCCESS)
8958		seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1;
8959	else
8960		seq_num = instance->last_seq_num;
8961
8962	/* Register AEN with FW for latest sequence number plus 1 */
8963	class_locale.members.reserved = 0;
8964	class_locale.members.locale = MR_EVT_LOCALE_ALL;
8965	class_locale.members.class = MR_EVT_CLASS_DEBUG;
8966
8967	if (instance->aen_cmd != NULL) {
8968		kfree(ev);
8969		return;
8970	}
8971
8972	mutex_lock(&instance->reset_mutex);
8973	error = megasas_register_aen(instance, seq_num,
8974					class_locale.word);
8975	if (error)
8976		dev_err(&instance->pdev->dev,
8977			"register aen failed error %x\n", error);
8978
8979	mutex_unlock(&instance->reset_mutex);
8980	kfree(ev);
8981}
8982
8983/**
8984 * megasas_init - Driver load entry point
8985 */
8986static int __init megasas_init(void)
8987{
8988	int rval;
8989
8990	/*
8991	 * Booted in kdump kernel, minimize memory footprints by
8992	 * disabling few features
8993	 */
8994	if (reset_devices) {
8995		msix_vectors = 1;
8996		rdpq_enable = 0;
8997		dual_qdepth_disable = 1;
8998	}
8999
9000	/*
9001	 * Announce driver version and other information
9002	 */
9003	pr_info("megasas: %s\n", MEGASAS_VERSION);
9004
9005	spin_lock_init(&poll_aen_lock);
9006
9007	support_poll_for_event = 2;
9008	support_device_change = 1;
9009	support_nvme_encapsulation = true;
9010	support_pci_lane_margining = true;
9011
9012	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
9013
9014	/*
9015	 * Register character device node
9016	 */
9017	rval = register_chrdev(0, "megaraid_sas_ioctl", &megasas_mgmt_fops);
9018
9019	if (rval < 0) {
9020		printk(KERN_DEBUG "megasas: failed to open device node\n");
9021		return rval;
9022	}
9023
9024	megasas_mgmt_majorno = rval;
9025
9026	megasas_init_debugfs();
9027
9028	/*
9029	 * Register ourselves as PCI hotplug module
9030	 */
9031	rval = pci_register_driver(&megasas_pci_driver);
9032
9033	if (rval) {
9034		printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
9035		goto err_pcidrv;
9036	}
9037
9038	if ((event_log_level < MFI_EVT_CLASS_DEBUG) ||
9039	    (event_log_level > MFI_EVT_CLASS_DEAD)) {
9040		pr_warn("megaraid_sas: provided event log level is out of range, setting it to default 2(CLASS_CRITICAL), permissible range is: -2 to 4\n");
9041		event_log_level = MFI_EVT_CLASS_CRITICAL;
9042	}
9043
9044	rval = driver_create_file(&megasas_pci_driver.driver,
9045				  &driver_attr_version);
9046	if (rval)
9047		goto err_dcf_attr_ver;
9048
9049	rval = driver_create_file(&megasas_pci_driver.driver,
9050				  &driver_attr_release_date);
9051	if (rval)
9052		goto err_dcf_rel_date;
9053
9054	rval = driver_create_file(&megasas_pci_driver.driver,
9055				&driver_attr_support_poll_for_event);
9056	if (rval)
9057		goto err_dcf_support_poll_for_event;
9058
9059	rval = driver_create_file(&megasas_pci_driver.driver,
9060				  &driver_attr_dbg_lvl);
9061	if (rval)
9062		goto err_dcf_dbg_lvl;
9063	rval = driver_create_file(&megasas_pci_driver.driver,
9064				&driver_attr_support_device_change);
9065	if (rval)
9066		goto err_dcf_support_device_change;
9067
9068	rval = driver_create_file(&megasas_pci_driver.driver,
9069				  &driver_attr_support_nvme_encapsulation);
9070	if (rval)
9071		goto err_dcf_support_nvme_encapsulation;
9072
9073	rval = driver_create_file(&megasas_pci_driver.driver,
9074				  &driver_attr_support_pci_lane_margining);
9075	if (rval)
9076		goto err_dcf_support_pci_lane_margining;
9077
9078	return rval;
9079
9080err_dcf_support_pci_lane_margining:
9081	driver_remove_file(&megasas_pci_driver.driver,
9082			   &driver_attr_support_nvme_encapsulation);
9083
9084err_dcf_support_nvme_encapsulation:
9085	driver_remove_file(&megasas_pci_driver.driver,
9086			   &driver_attr_support_device_change);
9087
9088err_dcf_support_device_change:
9089	driver_remove_file(&megasas_pci_driver.driver,
9090			   &driver_attr_dbg_lvl);
9091err_dcf_dbg_lvl:
9092	driver_remove_file(&megasas_pci_driver.driver,
9093			&driver_attr_support_poll_for_event);
9094err_dcf_support_poll_for_event:
9095	driver_remove_file(&megasas_pci_driver.driver,
9096			   &driver_attr_release_date);
9097err_dcf_rel_date:
9098	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9099err_dcf_attr_ver:
9100	pci_unregister_driver(&megasas_pci_driver);
9101err_pcidrv:
9102	megasas_exit_debugfs();
9103	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9104	return rval;
9105}
9106
9107/**
9108 * megasas_exit - Driver unload entry point
9109 */
9110static void __exit megasas_exit(void)
9111{
9112	driver_remove_file(&megasas_pci_driver.driver,
9113			   &driver_attr_dbg_lvl);
9114	driver_remove_file(&megasas_pci_driver.driver,
9115			&driver_attr_support_poll_for_event);
9116	driver_remove_file(&megasas_pci_driver.driver,
9117			&driver_attr_support_device_change);
9118	driver_remove_file(&megasas_pci_driver.driver,
9119			   &driver_attr_release_date);
9120	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
9121	driver_remove_file(&megasas_pci_driver.driver,
9122			   &driver_attr_support_nvme_encapsulation);
9123	driver_remove_file(&megasas_pci_driver.driver,
9124			   &driver_attr_support_pci_lane_margining);
9125
9126	pci_unregister_driver(&megasas_pci_driver);
9127	megasas_exit_debugfs();
9128	unregister_chrdev(megasas_mgmt_majorno, "megaraid_sas_ioctl");
9129}
9130
9131module_init(megasas_init);
9132module_exit(megasas_exit);
9133