1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *  Linux MegaRAID driver for SAS based RAID controllers
4 *
5 *  Copyright (c) 2009-2013  LSI Corporation
6 *  Copyright (c) 2013-2016  Avago Technologies
7 *  Copyright (c) 2016-2018  Broadcom Inc.
8 *
9 *  FILE: megaraid_sas_fusion.c
10 *
11 *  Authors: Broadcom Inc.
12 *           Sumant Patro
13 *           Adam Radford
14 *           Kashyap Desai <kashyap.desai@broadcom.com>
15 *           Sumit Saxena <sumit.saxena@broadcom.com>
16 *
17 *  Send feedback to: megaraidlinux.pdl@broadcom.com
18 */
19
20#include <linux/kernel.h>
21#include <linux/types.h>
22#include <linux/pci.h>
23#include <linux/list.h>
24#include <linux/moduleparam.h>
25#include <linux/module.h>
26#include <linux/spinlock.h>
27#include <linux/interrupt.h>
28#include <linux/delay.h>
29#include <linux/uio.h>
30#include <linux/uaccess.h>
31#include <linux/fs.h>
32#include <linux/compat.h>
33#include <linux/blkdev.h>
34#include <linux/mutex.h>
35#include <linux/poll.h>
36#include <linux/vmalloc.h>
37#include <linux/workqueue.h>
38#include <linux/irq_poll.h>
39
40#include <scsi/scsi.h>
41#include <scsi/scsi_cmnd.h>
42#include <scsi/scsi_device.h>
43#include <scsi/scsi_host.h>
44#include <scsi/scsi_dbg.h>
45#include <linux/dmi.h>
46
47#include "megaraid_sas_fusion.h"
48#include "megaraid_sas.h"
49
50
51extern void
52megasas_complete_cmd(struct megasas_instance *instance,
53		     struct megasas_cmd *cmd, u8 alt_status);
54int
55wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
56	      int seconds);
57
58int
59megasas_clear_intr_fusion(struct megasas_instance *instance);
60
61int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
62
63extern u32 megasas_dbg_lvl;
64int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
65				  int initial);
66extern struct megasas_mgmt_info megasas_mgmt_info;
67extern unsigned int resetwaittime;
68extern unsigned int dual_qdepth_disable;
69static void megasas_free_rdpq_fusion(struct megasas_instance *instance);
70static void megasas_free_reply_fusion(struct megasas_instance *instance);
71static inline
72void megasas_configure_queue_sizes(struct megasas_instance *instance);
73static void megasas_fusion_crash_dump(struct megasas_instance *instance);
74
75/**
76 * megasas_adp_reset_wait_for_ready -	initiate chip reset and wait for
77 *					controller to come to ready state
78 * @instance:				adapter's soft state
79 * @do_adp_reset:			If true, do a chip reset
80 * @ocr_context:			If called from OCR context this will
81 *					be set to 1, else 0
82 *
83 * This function initates a chip reset followed by a wait for controller to
84 * transition to ready state.
85 * During this, driver will block all access to PCI config space from userspace
86 */
87int
88megasas_adp_reset_wait_for_ready(struct megasas_instance *instance,
89				 bool do_adp_reset,
90				 int ocr_context)
91{
92	int ret = FAILED;
93
94	/*
95	 * Block access to PCI config space from userspace
96	 * when diag reset is initiated from driver
97	 */
98	if (megasas_dbg_lvl & OCR_DEBUG)
99		dev_info(&instance->pdev->dev,
100			 "Block access to PCI config space %s %d\n",
101			 __func__, __LINE__);
102
103	pci_cfg_access_lock(instance->pdev);
104
105	if (do_adp_reset) {
106		if (instance->instancet->adp_reset
107			(instance, instance->reg_set))
108			goto out;
109	}
110
111	/* Wait for FW to become ready */
112	if (megasas_transition_to_ready(instance, ocr_context)) {
113		dev_warn(&instance->pdev->dev,
114			 "Failed to transition controller to ready for scsi%d.\n",
115			 instance->host->host_no);
116		goto out;
117	}
118
119	ret = SUCCESS;
120out:
121	if (megasas_dbg_lvl & OCR_DEBUG)
122		dev_info(&instance->pdev->dev,
123			 "Unlock access to PCI config space %s %d\n",
124			 __func__, __LINE__);
125
126	pci_cfg_access_unlock(instance->pdev);
127
128	return ret;
129}
130
131/**
132 * megasas_check_same_4gb_region -	check if allocation
133 *					crosses same 4GB boundary or not
134 * @instance:				adapter's soft instance
135 * @start_addr:				start address of DMA allocation
136 * @size:				size of allocation in bytes
137 * @return:				true : allocation does not cross same
138 *					4GB boundary
139 *					false: allocation crosses same
140 *					4GB boundary
141 */
142static inline bool megasas_check_same_4gb_region
143	(struct megasas_instance *instance, dma_addr_t start_addr, size_t size)
144{
145	dma_addr_t end_addr;
146
147	end_addr = start_addr + size;
148
149	if (upper_32_bits(start_addr) != upper_32_bits(end_addr)) {
150		dev_err(&instance->pdev->dev,
151			"Failed to get same 4GB boundary: start_addr: 0x%llx end_addr: 0x%llx\n",
152			(unsigned long long)start_addr,
153			(unsigned long long)end_addr);
154		return false;
155	}
156
157	return true;
158}
159
160/**
161 * megasas_enable_intr_fusion -	Enables interrupts
162 * @instance:	adapter's soft instance
163 */
164static void
165megasas_enable_intr_fusion(struct megasas_instance *instance)
166{
167	struct megasas_register_set __iomem *regs;
168	regs = instance->reg_set;
169
170	instance->mask_interrupts = 0;
171	/* For Thunderbolt/Invader also clear intr on enable */
172	writel(~0, &regs->outbound_intr_status);
173	readl(&regs->outbound_intr_status);
174
175	writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask);
176
177	/* Dummy readl to force pci flush */
178	dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
179		 __func__, readl(&regs->outbound_intr_mask));
180}
181
182/**
183 * megasas_disable_intr_fusion - Disables interrupt
184 * @instance:	adapter's soft instance
185 */
186static void
187megasas_disable_intr_fusion(struct megasas_instance *instance)
188{
189	u32 mask = 0xFFFFFFFF;
190	struct megasas_register_set __iomem *regs;
191	regs = instance->reg_set;
192	instance->mask_interrupts = 1;
193
194	writel(mask, &regs->outbound_intr_mask);
195	/* Dummy readl to force pci flush */
196	dev_info(&instance->pdev->dev, "%s is called outbound_intr_mask:0x%08x\n",
197		 __func__, readl(&regs->outbound_intr_mask));
198}
199
200int
201megasas_clear_intr_fusion(struct megasas_instance *instance)
202{
203	u32 status;
204	struct megasas_register_set __iomem *regs;
205	regs = instance->reg_set;
206	/*
207	 * Check if it is our interrupt
208	 */
209	status = megasas_readl(instance,
210			       &regs->outbound_intr_status);
211
212	if (status & 1) {
213		writel(status, &regs->outbound_intr_status);
214		readl(&regs->outbound_intr_status);
215		return 1;
216	}
217	if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK))
218		return 0;
219
220	return 1;
221}
222
223/**
224 * megasas_get_cmd_fusion -	Get a command from the free pool
225 * @instance:		Adapter soft state
226 * @blk_tag:		Command tag
227 *
228 * Returns a blk_tag indexed mpt frame
229 */
230inline struct megasas_cmd_fusion *megasas_get_cmd_fusion(struct megasas_instance
231						  *instance, u32 blk_tag)
232{
233	struct fusion_context *fusion;
234
235	fusion = instance->ctrl_context;
236	return fusion->cmd_list[blk_tag];
237}
238
239/**
240 * megasas_return_cmd_fusion -	Return a cmd to free command pool
241 * @instance:		Adapter soft state
242 * @cmd:		Command packet to be returned to free command pool
243 */
244inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
245	struct megasas_cmd_fusion *cmd)
246{
247	cmd->scmd = NULL;
248	memset(cmd->io_request, 0, MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
249	cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
250	cmd->cmd_completed = false;
251}
252
253/**
254 * megasas_write_64bit_req_desc -	PCI writes 64bit request descriptor
255 * @instance:				Adapter soft state
256 * @req_desc:				64bit Request descriptor
257 */
258static void
259megasas_write_64bit_req_desc(struct megasas_instance *instance,
260		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
261{
262#if defined(writeq) && defined(CONFIG_64BIT)
263	u64 req_data = (((u64)le32_to_cpu(req_desc->u.high) << 32) |
264		le32_to_cpu(req_desc->u.low));
265	writeq(req_data, &instance->reg_set->inbound_low_queue_port);
266#else
267	unsigned long flags;
268	spin_lock_irqsave(&instance->hba_lock, flags);
269	writel(le32_to_cpu(req_desc->u.low),
270		&instance->reg_set->inbound_low_queue_port);
271	writel(le32_to_cpu(req_desc->u.high),
272		&instance->reg_set->inbound_high_queue_port);
273	spin_unlock_irqrestore(&instance->hba_lock, flags);
274#endif
275}
276
277/**
278 * megasas_fire_cmd_fusion -	Sends command to the FW
279 * @instance:			Adapter soft state
280 * @req_desc:			32bit or 64bit Request descriptor
281 *
282 * Perform PCI Write. AERO SERIES supports 32 bit Descriptor.
283 * Prior to AERO_SERIES support 64 bit Descriptor.
284 */
285static void
286megasas_fire_cmd_fusion(struct megasas_instance *instance,
287		union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc)
288{
289	if (instance->atomic_desc_support)
290		writel(le32_to_cpu(req_desc->u.low),
291			&instance->reg_set->inbound_single_queue_port);
292	else
293		megasas_write_64bit_req_desc(instance, req_desc);
294}
295
296/**
297 * megasas_fusion_update_can_queue -	Do all Adapter Queue depth related calculations here
298 * @instance:		Adapter soft state
299 * @fw_boot_context:	Whether this function called during probe or after OCR
300 *
301 * This function is only for fusion controllers.
302 * Update host can queue, if firmware downgrade max supported firmware commands.
303 * Firmware upgrade case will be skiped because underlying firmware has
304 * more resource than exposed to the OS.
305 *
306 */
307static void
308megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_context)
309{
310	u16 cur_max_fw_cmds = 0;
311	u16 ldio_threshold = 0;
312
313	/* ventura FW does not fill outbound_scratch_pad_2 with queue depth */
314	if (instance->adapter_type < VENTURA_SERIES)
315		cur_max_fw_cmds =
316		megasas_readl(instance,
317			      &instance->reg_set->outbound_scratch_pad_2) & 0x00FFFF;
318
319	if (dual_qdepth_disable || !cur_max_fw_cmds)
320		cur_max_fw_cmds = instance->instancet->read_fw_status_reg(instance) & 0x00FFFF;
321	else
322		ldio_threshold =
323			(instance->instancet->read_fw_status_reg(instance) & 0x00FFFF) - MEGASAS_FUSION_IOCTL_CMDS;
324
325	dev_info(&instance->pdev->dev,
326		 "Current firmware supports maximum commands: %d\t LDIO threshold: %d\n",
327		 cur_max_fw_cmds, ldio_threshold);
328
329	if (fw_boot_context == OCR_CONTEXT) {
330		cur_max_fw_cmds = cur_max_fw_cmds - 1;
331		if (cur_max_fw_cmds < instance->max_fw_cmds) {
332			instance->cur_can_queue =
333				cur_max_fw_cmds - (MEGASAS_FUSION_INTERNAL_CMDS +
334						MEGASAS_FUSION_IOCTL_CMDS);
335			instance->host->can_queue = instance->cur_can_queue;
336			instance->ldio_threshold = ldio_threshold;
337		}
338	} else {
339		instance->max_fw_cmds = cur_max_fw_cmds;
340		instance->ldio_threshold = ldio_threshold;
341
342		if (reset_devices)
343			instance->max_fw_cmds = min(instance->max_fw_cmds,
344						(u16)MEGASAS_KDUMP_QUEUE_DEPTH);
345		/*
346		* Reduce the max supported cmds by 1. This is to ensure that the
347		* reply_q_sz (1 more than the max cmd that driver may send)
348		* does not exceed max cmds that the FW can support
349		*/
350		instance->max_fw_cmds = instance->max_fw_cmds-1;
351	}
352}
353
354static inline void
355megasas_get_msix_index(struct megasas_instance *instance,
356		       struct scsi_cmnd *scmd,
357		       struct megasas_cmd_fusion *cmd,
358		       u8 data_arms)
359{
360	int sdev_busy;
361
362	/* TBD - if sml remove device_busy in future, driver
363	 * should track counter in internal structure.
364	 */
365	sdev_busy = atomic_read(&scmd->device->device_busy);
366
367	if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
368	    sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
369		cmd->request_desc->SCSIIO.MSIxIndex =
370			mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
371					MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
372	} else if (instance->msix_load_balance) {
373		cmd->request_desc->SCSIIO.MSIxIndex =
374			(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
375				instance->msix_vectors));
376	} else if (instance->host->nr_hw_queues > 1) {
377		u32 tag = blk_mq_unique_tag(scmd->request);
378
379		cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
380			instance->low_latency_index_start;
381	} else {
382		cmd->request_desc->SCSIIO.MSIxIndex =
383			instance->reply_map[raw_smp_processor_id()];
384	}
385}
386
387/**
388 * megasas_free_cmds_fusion -	Free all the cmds in the free cmd pool
389 * @instance:		Adapter soft state
390 */
391void
392megasas_free_cmds_fusion(struct megasas_instance *instance)
393{
394	int i;
395	struct fusion_context *fusion = instance->ctrl_context;
396	struct megasas_cmd_fusion *cmd;
397
398	if (fusion->sense)
399		dma_pool_free(fusion->sense_dma_pool, fusion->sense,
400			      fusion->sense_phys_addr);
401
402	/* SG */
403	if (fusion->cmd_list) {
404		for (i = 0; i < instance->max_mpt_cmds; i++) {
405			cmd = fusion->cmd_list[i];
406			if (cmd) {
407				if (cmd->sg_frame)
408					dma_pool_free(fusion->sg_dma_pool,
409						      cmd->sg_frame,
410						      cmd->sg_frame_phys_addr);
411			}
412			kfree(cmd);
413		}
414		kfree(fusion->cmd_list);
415	}
416
417	if (fusion->sg_dma_pool) {
418		dma_pool_destroy(fusion->sg_dma_pool);
419		fusion->sg_dma_pool = NULL;
420	}
421	if (fusion->sense_dma_pool) {
422		dma_pool_destroy(fusion->sense_dma_pool);
423		fusion->sense_dma_pool = NULL;
424	}
425
426
427	/* Reply Frame, Desc*/
428	if (instance->is_rdpq)
429		megasas_free_rdpq_fusion(instance);
430	else
431		megasas_free_reply_fusion(instance);
432
433	/* Request Frame, Desc*/
434	if (fusion->req_frames_desc)
435		dma_free_coherent(&instance->pdev->dev,
436			fusion->request_alloc_sz, fusion->req_frames_desc,
437			fusion->req_frames_desc_phys);
438	if (fusion->io_request_frames)
439		dma_pool_free(fusion->io_request_frames_pool,
440			fusion->io_request_frames,
441			fusion->io_request_frames_phys);
442	if (fusion->io_request_frames_pool) {
443		dma_pool_destroy(fusion->io_request_frames_pool);
444		fusion->io_request_frames_pool = NULL;
445	}
446}
447
448/**
449 * megasas_create_sg_sense_fusion -	Creates DMA pool for cmd frames
450 * @instance:			Adapter soft state
451 *
452 */
453static int megasas_create_sg_sense_fusion(struct megasas_instance *instance)
454{
455	int i;
456	u16 max_cmd;
457	struct fusion_context *fusion;
458	struct megasas_cmd_fusion *cmd;
459	int sense_sz;
460	u32 offset;
461
462	fusion = instance->ctrl_context;
463	max_cmd = instance->max_fw_cmds;
464	sense_sz = instance->max_mpt_cmds * SCSI_SENSE_BUFFERSIZE;
465
466	fusion->sg_dma_pool =
467			dma_pool_create("mr_sg", &instance->pdev->dev,
468				instance->max_chain_frame_sz,
469				MR_DEFAULT_NVME_PAGE_SIZE, 0);
470	/* SCSI_SENSE_BUFFERSIZE  = 96 bytes */
471	fusion->sense_dma_pool =
472			dma_pool_create("mr_sense", &instance->pdev->dev,
473				sense_sz, 64, 0);
474
475	if (!fusion->sense_dma_pool || !fusion->sg_dma_pool) {
476		dev_err(&instance->pdev->dev,
477			"Failed from %s %d\n",  __func__, __LINE__);
478		return -ENOMEM;
479	}
480
481	fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
482				       GFP_KERNEL, &fusion->sense_phys_addr);
483	if (!fusion->sense) {
484		dev_err(&instance->pdev->dev,
485			"failed from %s %d\n",  __func__, __LINE__);
486		return -ENOMEM;
487	}
488
489	/* sense buffer, request frame and reply desc pool requires to be in
490	 * same 4 gb region. Below function will check this.
491	 * In case of failure, new pci pool will be created with updated
492	 * alignment.
493	 * Older allocation and pool will be destroyed.
494	 * Alignment will be used such a way that next allocation if success,
495	 * will always meet same 4gb region requirement.
496	 * Actual requirement is not alignment, but we need start and end of
497	 * DMA address must have same upper 32 bit address.
498	 */
499
500	if (!megasas_check_same_4gb_region(instance, fusion->sense_phys_addr,
501					   sense_sz)) {
502		dma_pool_free(fusion->sense_dma_pool, fusion->sense,
503			      fusion->sense_phys_addr);
504		fusion->sense = NULL;
505		dma_pool_destroy(fusion->sense_dma_pool);
506
507		fusion->sense_dma_pool =
508			dma_pool_create("mr_sense_align", &instance->pdev->dev,
509					sense_sz, roundup_pow_of_two(sense_sz),
510					0);
511		if (!fusion->sense_dma_pool) {
512			dev_err(&instance->pdev->dev,
513				"Failed from %s %d\n",  __func__, __LINE__);
514			return -ENOMEM;
515		}
516		fusion->sense = dma_pool_alloc(fusion->sense_dma_pool,
517					       GFP_KERNEL,
518					       &fusion->sense_phys_addr);
519		if (!fusion->sense) {
520			dev_err(&instance->pdev->dev,
521				"failed from %s %d\n",  __func__, __LINE__);
522			return -ENOMEM;
523		}
524	}
525
526	/*
527	 * Allocate and attach a frame to each of the commands in cmd_list
528	 */
529	for (i = 0; i < max_cmd; i++) {
530		cmd = fusion->cmd_list[i];
531		cmd->sg_frame = dma_pool_alloc(fusion->sg_dma_pool,
532					GFP_KERNEL, &cmd->sg_frame_phys_addr);
533
534		offset = SCSI_SENSE_BUFFERSIZE * i;
535		cmd->sense = (u8 *)fusion->sense + offset;
536		cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
537
538		if (!cmd->sg_frame) {
539			dev_err(&instance->pdev->dev,
540				"Failed from %s %d\n",  __func__, __LINE__);
541			return -ENOMEM;
542		}
543	}
544
545	/* create sense buffer for the raid 1/10 fp */
546	for (i = max_cmd; i < instance->max_mpt_cmds; i++) {
547		cmd = fusion->cmd_list[i];
548		offset = SCSI_SENSE_BUFFERSIZE * i;
549		cmd->sense = (u8 *)fusion->sense + offset;
550		cmd->sense_phys_addr = fusion->sense_phys_addr + offset;
551
552	}
553
554	return 0;
555}
556
557static int
558megasas_alloc_cmdlist_fusion(struct megasas_instance *instance)
559{
560	u32 max_mpt_cmd, i, j;
561	struct fusion_context *fusion;
562
563	fusion = instance->ctrl_context;
564
565	max_mpt_cmd = instance->max_mpt_cmds;
566
567	/*
568	 * fusion->cmd_list is an array of struct megasas_cmd_fusion pointers.
569	 * Allocate the dynamic array first and then allocate individual
570	 * commands.
571	 */
572	fusion->cmd_list =
573		kcalloc(max_mpt_cmd, sizeof(struct megasas_cmd_fusion *),
574			GFP_KERNEL);
575	if (!fusion->cmd_list) {
576		dev_err(&instance->pdev->dev,
577			"Failed from %s %d\n",  __func__, __LINE__);
578		return -ENOMEM;
579	}
580
581	for (i = 0; i < max_mpt_cmd; i++) {
582		fusion->cmd_list[i] = kzalloc(sizeof(struct megasas_cmd_fusion),
583					      GFP_KERNEL);
584		if (!fusion->cmd_list[i]) {
585			for (j = 0; j < i; j++)
586				kfree(fusion->cmd_list[j]);
587			kfree(fusion->cmd_list);
588			dev_err(&instance->pdev->dev,
589				"Failed from %s %d\n",  __func__, __LINE__);
590			return -ENOMEM;
591		}
592	}
593
594	return 0;
595}
596
597static int
598megasas_alloc_request_fusion(struct megasas_instance *instance)
599{
600	struct fusion_context *fusion;
601
602	fusion = instance->ctrl_context;
603
604retry_alloc:
605	fusion->io_request_frames_pool =
606			dma_pool_create("mr_ioreq", &instance->pdev->dev,
607				fusion->io_frames_alloc_sz, 16, 0);
608
609	if (!fusion->io_request_frames_pool) {
610		dev_err(&instance->pdev->dev,
611			"Failed from %s %d\n",  __func__, __LINE__);
612		return -ENOMEM;
613	}
614
615	fusion->io_request_frames =
616			dma_pool_alloc(fusion->io_request_frames_pool,
617				GFP_KERNEL | __GFP_NOWARN,
618				&fusion->io_request_frames_phys);
619	if (!fusion->io_request_frames) {
620		if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
621			instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
622			dma_pool_destroy(fusion->io_request_frames_pool);
623			megasas_configure_queue_sizes(instance);
624			goto retry_alloc;
625		} else {
626			dev_err(&instance->pdev->dev,
627				"Failed from %s %d\n",  __func__, __LINE__);
628			return -ENOMEM;
629		}
630	}
631
632	if (!megasas_check_same_4gb_region(instance,
633					   fusion->io_request_frames_phys,
634					   fusion->io_frames_alloc_sz)) {
635		dma_pool_free(fusion->io_request_frames_pool,
636			      fusion->io_request_frames,
637			      fusion->io_request_frames_phys);
638		fusion->io_request_frames = NULL;
639		dma_pool_destroy(fusion->io_request_frames_pool);
640
641		fusion->io_request_frames_pool =
642			dma_pool_create("mr_ioreq_align",
643					&instance->pdev->dev,
644					fusion->io_frames_alloc_sz,
645					roundup_pow_of_two(fusion->io_frames_alloc_sz),
646					0);
647
648		if (!fusion->io_request_frames_pool) {
649			dev_err(&instance->pdev->dev,
650				"Failed from %s %d\n",  __func__, __LINE__);
651			return -ENOMEM;
652		}
653
654		fusion->io_request_frames =
655			dma_pool_alloc(fusion->io_request_frames_pool,
656				       GFP_KERNEL | __GFP_NOWARN,
657				       &fusion->io_request_frames_phys);
658
659		if (!fusion->io_request_frames) {
660			dev_err(&instance->pdev->dev,
661				"Failed from %s %d\n",  __func__, __LINE__);
662			return -ENOMEM;
663		}
664	}
665
666	fusion->req_frames_desc =
667		dma_alloc_coherent(&instance->pdev->dev,
668				   fusion->request_alloc_sz,
669				   &fusion->req_frames_desc_phys, GFP_KERNEL);
670	if (!fusion->req_frames_desc) {
671		dev_err(&instance->pdev->dev,
672			"Failed from %s %d\n",  __func__, __LINE__);
673		return -ENOMEM;
674	}
675
676	return 0;
677}
678
679static int
680megasas_alloc_reply_fusion(struct megasas_instance *instance)
681{
682	int i, count;
683	struct fusion_context *fusion;
684	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
685	fusion = instance->ctrl_context;
686
687	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
688	fusion->reply_frames_desc_pool =
689			dma_pool_create("mr_reply", &instance->pdev->dev,
690				fusion->reply_alloc_sz * count, 16, 0);
691
692	if (!fusion->reply_frames_desc_pool) {
693		dev_err(&instance->pdev->dev,
694			"Failed from %s %d\n",  __func__, __LINE__);
695		return -ENOMEM;
696	}
697
698	fusion->reply_frames_desc[0] =
699		dma_pool_alloc(fusion->reply_frames_desc_pool,
700			GFP_KERNEL, &fusion->reply_frames_desc_phys[0]);
701	if (!fusion->reply_frames_desc[0]) {
702		dev_err(&instance->pdev->dev,
703			"Failed from %s %d\n",  __func__, __LINE__);
704		return -ENOMEM;
705	}
706
707	if (!megasas_check_same_4gb_region(instance,
708					   fusion->reply_frames_desc_phys[0],
709					   (fusion->reply_alloc_sz * count))) {
710		dma_pool_free(fusion->reply_frames_desc_pool,
711			      fusion->reply_frames_desc[0],
712			      fusion->reply_frames_desc_phys[0]);
713		fusion->reply_frames_desc[0] = NULL;
714		dma_pool_destroy(fusion->reply_frames_desc_pool);
715
716		fusion->reply_frames_desc_pool =
717			dma_pool_create("mr_reply_align",
718					&instance->pdev->dev,
719					fusion->reply_alloc_sz * count,
720					roundup_pow_of_two(fusion->reply_alloc_sz * count),
721					0);
722
723		if (!fusion->reply_frames_desc_pool) {
724			dev_err(&instance->pdev->dev,
725				"Failed from %s %d\n",  __func__, __LINE__);
726			return -ENOMEM;
727		}
728
729		fusion->reply_frames_desc[0] =
730			dma_pool_alloc(fusion->reply_frames_desc_pool,
731				       GFP_KERNEL,
732				       &fusion->reply_frames_desc_phys[0]);
733
734		if (!fusion->reply_frames_desc[0]) {
735			dev_err(&instance->pdev->dev,
736				"Failed from %s %d\n",  __func__, __LINE__);
737			return -ENOMEM;
738		}
739	}
740
741	reply_desc = fusion->reply_frames_desc[0];
742	for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++)
743		reply_desc->Words = cpu_to_le64(ULLONG_MAX);
744
745	/* This is not a rdpq mode, but driver still populate
746	 * reply_frame_desc array to use same msix index in ISR path.
747	 */
748	for (i = 0; i < (count - 1); i++)
749		fusion->reply_frames_desc[i + 1] =
750			fusion->reply_frames_desc[i] +
751			(fusion->reply_alloc_sz)/sizeof(union MPI2_REPLY_DESCRIPTORS_UNION);
752
753	return 0;
754}
755
756static int
757megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
758{
759	int i, j, k, msix_count;
760	struct fusion_context *fusion;
761	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
762	union MPI2_REPLY_DESCRIPTORS_UNION *rdpq_chunk_virt[RDPQ_MAX_CHUNK_COUNT];
763	dma_addr_t rdpq_chunk_phys[RDPQ_MAX_CHUNK_COUNT];
764	u8 dma_alloc_count, abs_index;
765	u32 chunk_size, array_size, offset;
766
767	fusion = instance->ctrl_context;
768	chunk_size = fusion->reply_alloc_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
769	array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
770		     MAX_MSIX_QUEUES_FUSION;
771
772	fusion->rdpq_virt = dma_alloc_coherent(&instance->pdev->dev,
773					       array_size, &fusion->rdpq_phys,
774					       GFP_KERNEL);
775	if (!fusion->rdpq_virt) {
776		dev_err(&instance->pdev->dev,
777			"Failed from %s %d\n",  __func__, __LINE__);
778		return -ENOMEM;
779	}
780
781	msix_count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
782
783	fusion->reply_frames_desc_pool = dma_pool_create("mr_rdpq",
784							 &instance->pdev->dev,
785							 chunk_size, 16, 0);
786	fusion->reply_frames_desc_pool_align =
787				dma_pool_create("mr_rdpq_align",
788						&instance->pdev->dev,
789						chunk_size,
790						roundup_pow_of_two(chunk_size),
791						0);
792
793	if (!fusion->reply_frames_desc_pool ||
794	    !fusion->reply_frames_desc_pool_align) {
795		dev_err(&instance->pdev->dev,
796			"Failed from %s %d\n",  __func__, __LINE__);
797		return -ENOMEM;
798	}
799
800/*
801 * For INVADER_SERIES each set of 8 reply queues(0-7, 8-15, ..) and
802 * VENTURA_SERIES each set of 16 reply queues(0-15, 16-31, ..) should be
803 * within 4GB boundary and also reply queues in a set must have same
804 * upper 32-bits in their memory address. so here driver is allocating the
805 * DMA'able memory for reply queues according. Driver uses limitation of
806 * VENTURA_SERIES to manage INVADER_SERIES as well.
807 */
808	dma_alloc_count = DIV_ROUND_UP(msix_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK);
809
810	for (i = 0; i < dma_alloc_count; i++) {
811		rdpq_chunk_virt[i] =
812			dma_pool_alloc(fusion->reply_frames_desc_pool,
813				       GFP_KERNEL, &rdpq_chunk_phys[i]);
814		if (!rdpq_chunk_virt[i]) {
815			dev_err(&instance->pdev->dev,
816				"Failed from %s %d\n",  __func__, __LINE__);
817			return -ENOMEM;
818		}
819		/* reply desc pool requires to be in same 4 gb region.
820		 * Below function will check this.
821		 * In case of failure, new pci pool will be created with updated
822		 * alignment.
823		 * For RDPQ buffers, driver always allocate two separate pci pool.
824		 * Alignment will be used such a way that next allocation if
825		 * success, will always meet same 4gb region requirement.
826		 * rdpq_tracker keep track of each buffer's physical,
827		 * virtual address and pci pool descriptor. It will help driver
828		 * while freeing the resources.
829		 *
830		 */
831		if (!megasas_check_same_4gb_region(instance, rdpq_chunk_phys[i],
832						   chunk_size)) {
833			dma_pool_free(fusion->reply_frames_desc_pool,
834				      rdpq_chunk_virt[i],
835				      rdpq_chunk_phys[i]);
836
837			rdpq_chunk_virt[i] =
838				dma_pool_alloc(fusion->reply_frames_desc_pool_align,
839					       GFP_KERNEL, &rdpq_chunk_phys[i]);
840			if (!rdpq_chunk_virt[i]) {
841				dev_err(&instance->pdev->dev,
842					"Failed from %s %d\n",
843					__func__, __LINE__);
844				return -ENOMEM;
845			}
846			fusion->rdpq_tracker[i].dma_pool_ptr =
847					fusion->reply_frames_desc_pool_align;
848		} else {
849			fusion->rdpq_tracker[i].dma_pool_ptr =
850					fusion->reply_frames_desc_pool;
851		}
852
853		fusion->rdpq_tracker[i].pool_entry_phys = rdpq_chunk_phys[i];
854		fusion->rdpq_tracker[i].pool_entry_virt = rdpq_chunk_virt[i];
855	}
856
857	for (k = 0; k < dma_alloc_count; k++) {
858		for (i = 0; i < RDPQ_MAX_INDEX_IN_ONE_CHUNK; i++) {
859			abs_index = (k * RDPQ_MAX_INDEX_IN_ONE_CHUNK) + i;
860
861			if (abs_index == msix_count)
862				break;
863			offset = fusion->reply_alloc_sz * i;
864			fusion->rdpq_virt[abs_index].RDPQBaseAddress =
865					cpu_to_le64(rdpq_chunk_phys[k] + offset);
866			fusion->reply_frames_desc_phys[abs_index] =
867					rdpq_chunk_phys[k] + offset;
868			fusion->reply_frames_desc[abs_index] =
869					(union MPI2_REPLY_DESCRIPTORS_UNION *)((u8 *)rdpq_chunk_virt[k] + offset);
870
871			reply_desc = fusion->reply_frames_desc[abs_index];
872			for (j = 0; j < fusion->reply_q_depth; j++, reply_desc++)
873				reply_desc->Words = ULLONG_MAX;
874		}
875	}
876
877	return 0;
878}
879
880static void
881megasas_free_rdpq_fusion(struct megasas_instance *instance) {
882
883	int i;
884	struct fusion_context *fusion;
885
886	fusion = instance->ctrl_context;
887
888	for (i = 0; i < RDPQ_MAX_CHUNK_COUNT; i++) {
889		if (fusion->rdpq_tracker[i].pool_entry_virt)
890			dma_pool_free(fusion->rdpq_tracker[i].dma_pool_ptr,
891				      fusion->rdpq_tracker[i].pool_entry_virt,
892				      fusion->rdpq_tracker[i].pool_entry_phys);
893
894	}
895
896	dma_pool_destroy(fusion->reply_frames_desc_pool);
897	dma_pool_destroy(fusion->reply_frames_desc_pool_align);
898
899	if (fusion->rdpq_virt)
900		dma_free_coherent(&instance->pdev->dev,
901			sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
902			fusion->rdpq_virt, fusion->rdpq_phys);
903}
904
905static void
906megasas_free_reply_fusion(struct megasas_instance *instance) {
907
908	struct fusion_context *fusion;
909
910	fusion = instance->ctrl_context;
911
912	if (fusion->reply_frames_desc[0])
913		dma_pool_free(fusion->reply_frames_desc_pool,
914			fusion->reply_frames_desc[0],
915			fusion->reply_frames_desc_phys[0]);
916
917	dma_pool_destroy(fusion->reply_frames_desc_pool);
918
919}
920
921
922/**
923 * megasas_alloc_cmds_fusion -	Allocates the command packets
924 * @instance:		Adapter soft state
925 *
926 *
927 * Each frame has a 32-bit field called context. This context is used to get
928 * back the megasas_cmd_fusion from the frame when a frame gets completed
929 * In this driver, the 32 bit values are the indices into an array cmd_list.
930 * This array is used only to look up the megasas_cmd_fusion given the context.
931 * The free commands themselves are maintained in a linked list called cmd_pool.
932 *
933 * cmds are formed in the io_request and sg_frame members of the
934 * megasas_cmd_fusion. The context field is used to get a request descriptor
935 * and is used as SMID of the cmd.
936 * SMID value range is from 1 to max_fw_cmds.
937 */
938static int
939megasas_alloc_cmds_fusion(struct megasas_instance *instance)
940{
941	int i;
942	struct fusion_context *fusion;
943	struct megasas_cmd_fusion *cmd;
944	u32 offset;
945	dma_addr_t io_req_base_phys;
946	u8 *io_req_base;
947
948
949	fusion = instance->ctrl_context;
950
951	if (megasas_alloc_request_fusion(instance))
952		goto fail_exit;
953
954	if (instance->is_rdpq) {
955		if (megasas_alloc_rdpq_fusion(instance))
956			goto fail_exit;
957	} else
958		if (megasas_alloc_reply_fusion(instance))
959			goto fail_exit;
960
961	if (megasas_alloc_cmdlist_fusion(instance))
962		goto fail_exit;
963
964	/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
965	io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
966	io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
967
968	/*
969	 * Add all the commands to command pool (fusion->cmd_pool)
970	 */
971
972	/* SMID 0 is reserved. Set SMID/index from 1 */
973	for (i = 0; i < instance->max_mpt_cmds; i++) {
974		cmd = fusion->cmd_list[i];
975		offset = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i;
976		memset(cmd, 0, sizeof(struct megasas_cmd_fusion));
977		cmd->index = i + 1;
978		cmd->scmd = NULL;
979		cmd->sync_cmd_idx =
980		(i >= instance->max_scsi_cmds && i < instance->max_fw_cmds) ?
981				(i - instance->max_scsi_cmds) :
982				(u32)ULONG_MAX; /* Set to Invalid */
983		cmd->instance = instance;
984		cmd->io_request =
985			(struct MPI2_RAID_SCSI_IO_REQUEST *)
986		  (io_req_base + offset);
987		memset(cmd->io_request, 0,
988		       sizeof(struct MPI2_RAID_SCSI_IO_REQUEST));
989		cmd->io_request_phys_addr = io_req_base_phys + offset;
990		cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
991	}
992
993	if (megasas_create_sg_sense_fusion(instance))
994		goto fail_exit;
995
996	return 0;
997
998fail_exit:
999	megasas_free_cmds_fusion(instance);
1000	return -ENOMEM;
1001}
1002
1003/**
1004 * wait_and_poll -	Issues a polling command
1005 * @instance:			Adapter soft state
1006 * @cmd:			Command packet to be issued
1007 * @seconds:			Maximum poll time
1008 *
1009 * For polling, MFI requires the cmd_status to be set to 0xFF before posting.
1010 */
1011int
1012wait_and_poll(struct megasas_instance *instance, struct megasas_cmd *cmd,
1013	int seconds)
1014{
1015	int i;
1016	struct megasas_header *frame_hdr = &cmd->frame->hdr;
1017	u32 status_reg;
1018
1019	u32 msecs = seconds * 1000;
1020
1021	/*
1022	 * Wait for cmd_status to change
1023	 */
1024	for (i = 0; (i < msecs) && (frame_hdr->cmd_status == 0xff); i += 20) {
1025		rmb();
1026		msleep(20);
1027		if (!(i % 5000)) {
1028			status_reg = instance->instancet->read_fw_status_reg(instance)
1029					& MFI_STATE_MASK;
1030			if (status_reg == MFI_STATE_FAULT)
1031				break;
1032		}
1033	}
1034
1035	if (frame_hdr->cmd_status == MFI_STAT_INVALID_STATUS)
1036		return DCMD_TIMEOUT;
1037	else if (frame_hdr->cmd_status == MFI_STAT_OK)
1038		return DCMD_SUCCESS;
1039	else
1040		return DCMD_FAILED;
1041}
1042
1043/**
1044 * megasas_ioc_init_fusion -	Initializes the FW
1045 * @instance:		Adapter soft state
1046 *
1047 * Issues the IOC Init cmd
1048 */
1049int
1050megasas_ioc_init_fusion(struct megasas_instance *instance)
1051{
1052	struct megasas_init_frame *init_frame;
1053	struct MPI2_IOC_INIT_REQUEST *IOCInitMessage = NULL;
1054	dma_addr_t	ioc_init_handle;
1055	struct megasas_cmd *cmd;
1056	u8 ret, cur_rdpq_mode;
1057	struct fusion_context *fusion;
1058	union MEGASAS_REQUEST_DESCRIPTOR_UNION req_desc;
1059	int i;
1060	struct megasas_header *frame_hdr;
1061	const char *sys_info;
1062	MFI_CAPABILITIES *drv_ops;
1063	u32 scratch_pad_1;
1064	ktime_t time;
1065	bool cur_fw_64bit_dma_capable;
1066	bool cur_intr_coalescing;
1067
1068	fusion = instance->ctrl_context;
1069
1070	ioc_init_handle = fusion->ioc_init_request_phys;
1071	IOCInitMessage = fusion->ioc_init_request;
1072
1073	cmd = fusion->ioc_init_cmd;
1074
1075	scratch_pad_1 = megasas_readl
1076		(instance, &instance->reg_set->outbound_scratch_pad_1);
1077
1078	cur_rdpq_mode = (scratch_pad_1 & MR_RDPQ_MODE_OFFSET) ? 1 : 0;
1079
1080	if (instance->adapter_type == INVADER_SERIES) {
1081		cur_fw_64bit_dma_capable =
1082			(scratch_pad_1 & MR_CAN_HANDLE_64_BIT_DMA_OFFSET) ? true : false;
1083
1084		if (instance->consistent_mask_64bit && !cur_fw_64bit_dma_capable) {
1085			dev_err(&instance->pdev->dev, "Driver was operating on 64bit "
1086				"DMA mask, but upcoming FW does not support 64bit DMA mask\n");
1087			megaraid_sas_kill_hba(instance);
1088			ret = 1;
1089			goto fail_fw_init;
1090		}
1091	}
1092
1093	if (instance->is_rdpq && !cur_rdpq_mode) {
1094		dev_err(&instance->pdev->dev, "Firmware downgrade *NOT SUPPORTED*"
1095			" from RDPQ mode to non RDPQ mode\n");
1096		ret = 1;
1097		goto fail_fw_init;
1098	}
1099
1100	cur_intr_coalescing = (scratch_pad_1 & MR_INTR_COALESCING_SUPPORT_OFFSET) ?
1101							true : false;
1102
1103	if ((instance->low_latency_index_start ==
1104		MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
1105		instance->perf_mode = MR_BALANCED_PERF_MODE;
1106
1107	dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
1108		MEGASAS_PERF_MODE_2STR(instance->perf_mode),
1109		instance->low_latency_index_start);
1110
1111	instance->fw_sync_cache_support = (scratch_pad_1 &
1112		MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
1113	dev_info(&instance->pdev->dev, "FW supports sync cache\t: %s\n",
1114		 instance->fw_sync_cache_support ? "Yes" : "No");
1115
1116	memset(IOCInitMessage, 0, sizeof(struct MPI2_IOC_INIT_REQUEST));
1117
1118	IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT;
1119	IOCInitMessage->WhoInit	= MPI2_WHOINIT_HOST_DRIVER;
1120	IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION);
1121	IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
1122	IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4);
1123
1124	IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth);
1125	IOCInitMessage->ReplyDescriptorPostQueueAddress = instance->is_rdpq ?
1126			cpu_to_le64(fusion->rdpq_phys) :
1127			cpu_to_le64(fusion->reply_frames_desc_phys[0]);
1128	IOCInitMessage->MsgFlags = instance->is_rdpq ?
1129			MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE : 0;
1130	IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys);
1131	IOCInitMessage->SenseBufferAddressHigh = cpu_to_le32(upper_32_bits(fusion->sense_phys_addr));
1132	IOCInitMessage->HostMSIxVectors = instance->msix_vectors;
1133	IOCInitMessage->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
1134
1135	time = ktime_get_real();
1136	/* Convert to milliseconds as per FW requirement */
1137	IOCInitMessage->TimeStamp = cpu_to_le64(ktime_to_ms(time));
1138
1139	init_frame = (struct megasas_init_frame *)cmd->frame;
1140	memset(init_frame, 0, IOC_INIT_FRAME_SIZE);
1141
1142	frame_hdr = &cmd->frame->hdr;
1143	frame_hdr->cmd_status = 0xFF;
1144	frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1145
1146	init_frame->cmd	= MFI_CMD_INIT;
1147	init_frame->cmd_status = 0xFF;
1148
1149	drv_ops = (MFI_CAPABILITIES *) &(init_frame->driver_operations);
1150
1151	/* driver support Extended MSIX */
1152	if (instance->adapter_type >= INVADER_SERIES)
1153		drv_ops->mfi_capabilities.support_additional_msix = 1;
1154	/* driver supports HA / Remote LUN over Fast Path interface */
1155	drv_ops->mfi_capabilities.support_fp_remote_lun = 1;
1156
1157	drv_ops->mfi_capabilities.support_max_255lds = 1;
1158	drv_ops->mfi_capabilities.support_ndrive_r1_lb = 1;
1159	drv_ops->mfi_capabilities.security_protocol_cmds_fw = 1;
1160
1161	if (instance->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN)
1162		drv_ops->mfi_capabilities.support_ext_io_size = 1;
1163
1164	drv_ops->mfi_capabilities.support_fp_rlbypass = 1;
1165	if (!dual_qdepth_disable)
1166		drv_ops->mfi_capabilities.support_ext_queue_depth = 1;
1167
1168	drv_ops->mfi_capabilities.support_qd_throttling = 1;
1169	drv_ops->mfi_capabilities.support_pd_map_target_id = 1;
1170	drv_ops->mfi_capabilities.support_nvme_passthru = 1;
1171	drv_ops->mfi_capabilities.support_fw_exposed_dev_list = 1;
1172
1173	if (instance->consistent_mask_64bit)
1174		drv_ops->mfi_capabilities.support_64bit_mode = 1;
1175
1176	/* Convert capability to LE32 */
1177	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
1178
1179	sys_info = dmi_get_system_info(DMI_PRODUCT_UUID);
1180	if (instance->system_info_buf && sys_info) {
1181		memcpy(instance->system_info_buf->systemId, sys_info,
1182			strlen(sys_info) > 64 ? 64 : strlen(sys_info));
1183		instance->system_info_buf->systemIdLength =
1184			strlen(sys_info) > 64 ? 64 : strlen(sys_info);
1185		init_frame->system_info_lo = cpu_to_le32(lower_32_bits(instance->system_info_h));
1186		init_frame->system_info_hi = cpu_to_le32(upper_32_bits(instance->system_info_h));
1187	}
1188
1189	init_frame->queue_info_new_phys_addr_hi =
1190		cpu_to_le32(upper_32_bits(ioc_init_handle));
1191	init_frame->queue_info_new_phys_addr_lo =
1192		cpu_to_le32(lower_32_bits(ioc_init_handle));
1193	init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST));
1194
1195	/*
1196	 * Each bit in replyqueue_mask represents one group of MSI-x vectors
1197	 * (each group has 8 vectors)
1198	 */
1199	switch (instance->perf_mode) {
1200	case MR_BALANCED_PERF_MODE:
1201		init_frame->replyqueue_mask =
1202		       cpu_to_le16(~(~0 << instance->low_latency_index_start/8));
1203		break;
1204	case MR_IOPS_PERF_MODE:
1205		init_frame->replyqueue_mask =
1206		       cpu_to_le16(~(~0 << instance->msix_vectors/8));
1207		break;
1208	}
1209
1210
1211	req_desc.u.low = cpu_to_le32(lower_32_bits(cmd->frame_phys_addr));
1212	req_desc.u.high = cpu_to_le32(upper_32_bits(cmd->frame_phys_addr));
1213	req_desc.MFAIo.RequestFlags =
1214		(MEGASAS_REQ_DESCRIPT_FLAGS_MFA <<
1215		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
1216
1217	/*
1218	 * disable the intr before firing the init frame
1219	 */
1220	instance->instancet->disable_intr(instance);
1221
1222	for (i = 0; i < (10 * 1000); i += 20) {
1223		if (megasas_readl(instance, &instance->reg_set->doorbell) & 1)
1224			msleep(20);
1225		else
1226			break;
1227	}
1228
1229	/* For AERO also, IOC_INIT requires 64 bit descriptor write */
1230	megasas_write_64bit_req_desc(instance, &req_desc);
1231
1232	wait_and_poll(instance, cmd, MFI_IO_TIMEOUT_SECS);
1233
1234	frame_hdr = &cmd->frame->hdr;
1235	if (frame_hdr->cmd_status != 0) {
1236		ret = 1;
1237		goto fail_fw_init;
1238	}
1239
1240	if (instance->adapter_type >= AERO_SERIES) {
1241		scratch_pad_1 = megasas_readl
1242			(instance, &instance->reg_set->outbound_scratch_pad_1);
1243
1244		instance->atomic_desc_support =
1245			(scratch_pad_1 & MR_ATOMIC_DESCRIPTOR_SUPPORT_OFFSET) ? 1 : 0;
1246
1247		dev_info(&instance->pdev->dev, "FW supports atomic descriptor\t: %s\n",
1248			instance->atomic_desc_support ? "Yes" : "No");
1249	}
1250
1251	return 0;
1252
1253fail_fw_init:
1254	dev_err(&instance->pdev->dev,
1255		"Init cmd return status FAILED for SCSI host %d\n",
1256		instance->host->host_no);
1257
1258	return ret;
1259}
1260
1261/**
1262 * megasas_sync_pd_seq_num -	JBOD SEQ MAP
1263 * @instance:		Adapter soft state
1264 * @pend:		set to 1, if it is pended jbod map.
1265 *
1266 * Issue Jbod map to the firmware. If it is pended command,
1267 * issue command and return. If it is first instance of jbod map
1268 * issue and receive command.
1269 */
1270int
1271megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) {
1272	int ret = 0;
1273	size_t pd_seq_map_sz;
1274	struct megasas_cmd *cmd;
1275	struct megasas_dcmd_frame *dcmd;
1276	struct fusion_context *fusion = instance->ctrl_context;
1277	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
1278	dma_addr_t pd_seq_h;
1279
1280	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id & 1)];
1281	pd_seq_h = fusion->pd_seq_phys[(instance->pd_seq_map_id & 1)];
1282	pd_seq_map_sz = struct_size(pd_sync, seq, MAX_PHYSICAL_DEVICES - 1);
1283
1284	cmd = megasas_get_cmd(instance);
1285	if (!cmd) {
1286		dev_err(&instance->pdev->dev,
1287			"Could not get mfi cmd. Fail from %s %d\n",
1288			__func__, __LINE__);
1289		return -ENOMEM;
1290	}
1291
1292	dcmd = &cmd->frame->dcmd;
1293
1294	memset(pd_sync, 0, pd_seq_map_sz);
1295	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1296
1297	if (pend) {
1298		dcmd->mbox.b[0] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1299		dcmd->flags = MFI_FRAME_DIR_WRITE;
1300		instance->jbod_seq_cmd = cmd;
1301	} else {
1302		dcmd->flags = MFI_FRAME_DIR_READ;
1303	}
1304
1305	dcmd->cmd = MFI_CMD_DCMD;
1306	dcmd->cmd_status = 0xFF;
1307	dcmd->sge_count = 1;
1308	dcmd->timeout = 0;
1309	dcmd->pad_0 = 0;
1310	dcmd->data_xfer_len = cpu_to_le32(pd_seq_map_sz);
1311	dcmd->opcode = cpu_to_le32(MR_DCMD_SYSTEM_PD_MAP_GET_INFO);
1312
1313	megasas_set_dma_settings(instance, dcmd, pd_seq_h, pd_seq_map_sz);
1314
1315	if (pend) {
1316		instance->instancet->issue_dcmd(instance, cmd);
1317		return 0;
1318	}
1319
1320	/* Below code is only for non pended DCMD */
1321	if (!instance->mask_interrupts)
1322		ret = megasas_issue_blocked_cmd(instance, cmd,
1323			MFI_IO_TIMEOUT_SECS);
1324	else
1325		ret = megasas_issue_polled(instance, cmd);
1326
1327	if (le32_to_cpu(pd_sync->count) > MAX_PHYSICAL_DEVICES) {
1328		dev_warn(&instance->pdev->dev,
1329			"driver supports max %d JBOD, but FW reports %d\n",
1330			MAX_PHYSICAL_DEVICES, le32_to_cpu(pd_sync->count));
1331		ret = -EINVAL;
1332	}
1333
1334	if (ret == DCMD_TIMEOUT)
1335		dev_warn(&instance->pdev->dev,
1336			 "%s DCMD timed out, continue without JBOD sequence map\n",
1337			 __func__);
1338
1339	if (ret == DCMD_SUCCESS)
1340		instance->pd_seq_map_id++;
1341
1342	megasas_return_cmd(instance, cmd);
1343	return ret;
1344}
1345
1346/*
1347 * megasas_get_ld_map_info -	Returns FW's ld_map structure
1348 * @instance:				Adapter soft state
1349 * @pend:				Pend the command or not
1350 * Issues an internal command (DCMD) to get the FW's controller PD
1351 * list structure.  This information is mainly used to find out SYSTEM
1352 * supported by the FW.
1353 * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
1354 * dcmd.mbox.b[0]	- number of LDs being sync'd
1355 * dcmd.mbox.b[1]	- 0 - complete command immediately.
1356 *			- 1 - pend till config change
1357 * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
1358 *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
1359 *				uses extended struct MR_FW_RAID_MAP_EXT
1360 */
1361static int
1362megasas_get_ld_map_info(struct megasas_instance *instance)
1363{
1364	int ret = 0;
1365	struct megasas_cmd *cmd;
1366	struct megasas_dcmd_frame *dcmd;
1367	void *ci;
1368	dma_addr_t ci_h = 0;
1369	u32 size_map_info;
1370	struct fusion_context *fusion;
1371
1372	cmd = megasas_get_cmd(instance);
1373
1374	if (!cmd) {
1375		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for map info\n");
1376		return -ENOMEM;
1377	}
1378
1379	fusion = instance->ctrl_context;
1380
1381	if (!fusion) {
1382		megasas_return_cmd(instance, cmd);
1383		return -ENXIO;
1384	}
1385
1386	dcmd = &cmd->frame->dcmd;
1387
1388	size_map_info = fusion->current_map_sz;
1389
1390	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
1391	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
1392
1393	if (!ci) {
1394		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to alloc mem for ld_map_info\n");
1395		megasas_return_cmd(instance, cmd);
1396		return -ENOMEM;
1397	}
1398
1399	memset(ci, 0, fusion->max_map_sz);
1400	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1401	dcmd->cmd = MFI_CMD_DCMD;
1402	dcmd->cmd_status = 0xFF;
1403	dcmd->sge_count = 1;
1404	dcmd->flags = MFI_FRAME_DIR_READ;
1405	dcmd->timeout = 0;
1406	dcmd->pad_0 = 0;
1407	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1408	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1409
1410	megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1411
1412	if (!instance->mask_interrupts)
1413		ret = megasas_issue_blocked_cmd(instance, cmd,
1414			MFI_IO_TIMEOUT_SECS);
1415	else
1416		ret = megasas_issue_polled(instance, cmd);
1417
1418	if (ret == DCMD_TIMEOUT)
1419		dev_warn(&instance->pdev->dev,
1420			 "%s DCMD timed out, RAID map is disabled\n",
1421			 __func__);
1422
1423	megasas_return_cmd(instance, cmd);
1424
1425	return ret;
1426}
1427
1428u8
1429megasas_get_map_info(struct megasas_instance *instance)
1430{
1431	struct fusion_context *fusion = instance->ctrl_context;
1432
1433	fusion->fast_path_io = 0;
1434	if (!megasas_get_ld_map_info(instance)) {
1435		if (MR_ValidateMapInfo(instance, instance->map_id)) {
1436			fusion->fast_path_io = 1;
1437			return 0;
1438		}
1439	}
1440	return 1;
1441}
1442
1443/*
1444 * megasas_sync_map_info -	Returns FW's ld_map structure
1445 * @instance:				Adapter soft state
1446 *
1447 * Issues an internal command (DCMD) to get the FW's controller PD
1448 * list structure.  This information is mainly used to find out SYSTEM
1449 * supported by the FW.
1450 */
1451int
1452megasas_sync_map_info(struct megasas_instance *instance)
1453{
1454	int i;
1455	struct megasas_cmd *cmd;
1456	struct megasas_dcmd_frame *dcmd;
1457	u16 num_lds;
1458	struct fusion_context *fusion;
1459	struct MR_LD_TARGET_SYNC *ci = NULL;
1460	struct MR_DRV_RAID_MAP_ALL *map;
1461	struct MR_LD_RAID  *raid;
1462	struct MR_LD_TARGET_SYNC *ld_sync;
1463	dma_addr_t ci_h = 0;
1464	u32 size_map_info;
1465
1466	cmd = megasas_get_cmd(instance);
1467
1468	if (!cmd) {
1469		dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to get cmd for sync info\n");
1470		return -ENOMEM;
1471	}
1472
1473	fusion = instance->ctrl_context;
1474
1475	if (!fusion) {
1476		megasas_return_cmd(instance, cmd);
1477		return 1;
1478	}
1479
1480	map = fusion->ld_drv_map[instance->map_id & 1];
1481
1482	num_lds = le16_to_cpu(map->raidMap.ldCount);
1483
1484	dcmd = &cmd->frame->dcmd;
1485
1486	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
1487
1488	ci = (struct MR_LD_TARGET_SYNC *)
1489	  fusion->ld_map[(instance->map_id - 1) & 1];
1490	memset(ci, 0, fusion->max_map_sz);
1491
1492	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
1493
1494	ld_sync = (struct MR_LD_TARGET_SYNC *)ci;
1495
1496	for (i = 0; i < num_lds; i++, ld_sync++) {
1497		raid = MR_LdRaidGet(i, map);
1498		ld_sync->targetId = MR_GetLDTgtId(i, map);
1499		ld_sync->seqNum = raid->seqNum;
1500	}
1501
1502	size_map_info = fusion->current_map_sz;
1503
1504	dcmd->cmd = MFI_CMD_DCMD;
1505	dcmd->cmd_status = 0xFF;
1506	dcmd->sge_count = 1;
1507	dcmd->flags = MFI_FRAME_DIR_WRITE;
1508	dcmd->timeout = 0;
1509	dcmd->pad_0 = 0;
1510	dcmd->data_xfer_len = cpu_to_le32(size_map_info);
1511	dcmd->mbox.b[0] = num_lds;
1512	dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG;
1513	dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO);
1514
1515	megasas_set_dma_settings(instance, dcmd, ci_h, size_map_info);
1516
1517	instance->map_update_cmd = cmd;
1518
1519	instance->instancet->issue_dcmd(instance, cmd);
1520
1521	return 0;
1522}
1523
1524/*
1525 * meagasas_display_intel_branding - Display branding string
1526 * @instance: per adapter object
1527 *
1528 * Return nothing.
1529 */
1530static void
1531megasas_display_intel_branding(struct megasas_instance *instance)
1532{
1533	if (instance->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
1534		return;
1535
1536	switch (instance->pdev->device) {
1537	case PCI_DEVICE_ID_LSI_INVADER:
1538		switch (instance->pdev->subsystem_device) {
1539		case MEGARAID_INTEL_RS3DC080_SSDID:
1540			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1541				instance->host->host_no,
1542				MEGARAID_INTEL_RS3DC080_BRANDING);
1543			break;
1544		case MEGARAID_INTEL_RS3DC040_SSDID:
1545			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1546				instance->host->host_no,
1547				MEGARAID_INTEL_RS3DC040_BRANDING);
1548			break;
1549		case MEGARAID_INTEL_RS3SC008_SSDID:
1550			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1551				instance->host->host_no,
1552				MEGARAID_INTEL_RS3SC008_BRANDING);
1553			break;
1554		case MEGARAID_INTEL_RS3MC044_SSDID:
1555			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1556				instance->host->host_no,
1557				MEGARAID_INTEL_RS3MC044_BRANDING);
1558			break;
1559		default:
1560			break;
1561		}
1562		break;
1563	case PCI_DEVICE_ID_LSI_FURY:
1564		switch (instance->pdev->subsystem_device) {
1565		case MEGARAID_INTEL_RS3WC080_SSDID:
1566			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1567				instance->host->host_no,
1568				MEGARAID_INTEL_RS3WC080_BRANDING);
1569			break;
1570		case MEGARAID_INTEL_RS3WC040_SSDID:
1571			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1572				instance->host->host_no,
1573				MEGARAID_INTEL_RS3WC040_BRANDING);
1574			break;
1575		default:
1576			break;
1577		}
1578		break;
1579	case PCI_DEVICE_ID_LSI_CUTLASS_52:
1580	case PCI_DEVICE_ID_LSI_CUTLASS_53:
1581		switch (instance->pdev->subsystem_device) {
1582		case MEGARAID_INTEL_RMS3BC160_SSDID:
1583			dev_info(&instance->pdev->dev, "scsi host %d: %s\n",
1584				instance->host->host_no,
1585				MEGARAID_INTEL_RMS3BC160_BRANDING);
1586			break;
1587		default:
1588			break;
1589		}
1590		break;
1591	default:
1592		break;
1593	}
1594}
1595
1596/**
1597 * megasas_allocate_raid_maps -	Allocate memory for RAID maps
1598 * @instance:				Adapter soft state
1599 *
1600 * return:				if success: return 0
1601 *					failed:  return -ENOMEM
1602 */
1603static inline int megasas_allocate_raid_maps(struct megasas_instance *instance)
1604{
1605	struct fusion_context *fusion;
1606	int i = 0;
1607
1608	fusion = instance->ctrl_context;
1609
1610	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
1611
1612	for (i = 0; i < 2; i++) {
1613		fusion->ld_map[i] = NULL;
1614
1615		fusion->ld_drv_map[i] = (void *)
1616			__get_free_pages(__GFP_ZERO | GFP_KERNEL,
1617					 fusion->drv_map_pages);
1618
1619		if (!fusion->ld_drv_map[i]) {
1620			fusion->ld_drv_map[i] = vzalloc(fusion->drv_map_sz);
1621
1622			if (!fusion->ld_drv_map[i]) {
1623				dev_err(&instance->pdev->dev,
1624					"Could not allocate memory for local map"
1625					" size requested: %d\n",
1626					fusion->drv_map_sz);
1627				goto ld_drv_map_alloc_fail;
1628			}
1629		}
1630	}
1631
1632	for (i = 0; i < 2; i++) {
1633		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
1634						       fusion->max_map_sz,
1635						       &fusion->ld_map_phys[i],
1636						       GFP_KERNEL);
1637		if (!fusion->ld_map[i]) {
1638			dev_err(&instance->pdev->dev,
1639				"Could not allocate memory for map info %s:%d\n",
1640				__func__, __LINE__);
1641			goto ld_map_alloc_fail;
1642		}
1643	}
1644
1645	return 0;
1646
1647ld_map_alloc_fail:
1648	for (i = 0; i < 2; i++) {
1649		if (fusion->ld_map[i])
1650			dma_free_coherent(&instance->pdev->dev,
1651					  fusion->max_map_sz,
1652					  fusion->ld_map[i],
1653					  fusion->ld_map_phys[i]);
1654	}
1655
1656ld_drv_map_alloc_fail:
1657	for (i = 0; i < 2; i++) {
1658		if (fusion->ld_drv_map[i]) {
1659			if (is_vmalloc_addr(fusion->ld_drv_map[i]))
1660				vfree(fusion->ld_drv_map[i]);
1661			else
1662				free_pages((ulong)fusion->ld_drv_map[i],
1663					   fusion->drv_map_pages);
1664		}
1665	}
1666
1667	return -ENOMEM;
1668}
1669
1670/**
1671 * megasas_configure_queue_sizes -	Calculate size of request desc queue,
1672 *					reply desc queue,
1673 *					IO request frame queue, set can_queue.
1674 * @instance:				Adapter soft state
1675 * @return:				void
1676 */
1677static inline
1678void megasas_configure_queue_sizes(struct megasas_instance *instance)
1679{
1680	struct fusion_context *fusion;
1681	u16 max_cmd;
1682
1683	fusion = instance->ctrl_context;
1684	max_cmd = instance->max_fw_cmds;
1685
1686	if (instance->adapter_type >= VENTURA_SERIES)
1687		instance->max_mpt_cmds = instance->max_fw_cmds * RAID_1_PEER_CMDS;
1688	else
1689		instance->max_mpt_cmds = instance->max_fw_cmds;
1690
1691	instance->max_scsi_cmds = instance->max_fw_cmds - instance->max_mfi_cmds;
1692	instance->cur_can_queue = instance->max_scsi_cmds;
1693	instance->host->can_queue = instance->cur_can_queue;
1694
1695	fusion->reply_q_depth = 2 * ((max_cmd + 1 + 15) / 16) * 16;
1696
1697	fusion->request_alloc_sz = sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *
1698					  instance->max_mpt_cmds;
1699	fusion->reply_alloc_sz = sizeof(union MPI2_REPLY_DESCRIPTORS_UNION) *
1700					(fusion->reply_q_depth);
1701	fusion->io_frames_alloc_sz = MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE +
1702		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1703		 * (instance->max_mpt_cmds + 1)); /* Extra 1 for SMID 0 */
1704}
1705
1706static int megasas_alloc_ioc_init_frame(struct megasas_instance *instance)
1707{
1708	struct fusion_context *fusion;
1709	struct megasas_cmd *cmd;
1710
1711	fusion = instance->ctrl_context;
1712
1713	cmd = kzalloc(sizeof(struct megasas_cmd), GFP_KERNEL);
1714
1715	if (!cmd) {
1716		dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1717			__func__, __LINE__);
1718		return -ENOMEM;
1719	}
1720
1721	cmd->frame = dma_alloc_coherent(&instance->pdev->dev,
1722					IOC_INIT_FRAME_SIZE,
1723					&cmd->frame_phys_addr, GFP_KERNEL);
1724
1725	if (!cmd->frame) {
1726		dev_err(&instance->pdev->dev, "Failed from func: %s line: %d\n",
1727			__func__, __LINE__);
1728		kfree(cmd);
1729		return -ENOMEM;
1730	}
1731
1732	fusion->ioc_init_cmd = cmd;
1733	return 0;
1734}
1735
1736/**
1737 * megasas_free_ioc_init_cmd -	Free IOC INIT command frame
1738 * @instance:		Adapter soft state
1739 */
1740static inline void megasas_free_ioc_init_cmd(struct megasas_instance *instance)
1741{
1742	struct fusion_context *fusion;
1743
1744	fusion = instance->ctrl_context;
1745
1746	if (fusion->ioc_init_cmd && fusion->ioc_init_cmd->frame)
1747		dma_free_coherent(&instance->pdev->dev,
1748				  IOC_INIT_FRAME_SIZE,
1749				  fusion->ioc_init_cmd->frame,
1750				  fusion->ioc_init_cmd->frame_phys_addr);
1751
1752	kfree(fusion->ioc_init_cmd);
1753}
1754
1755/**
1756 * megasas_init_adapter_fusion -	Initializes the FW
1757 * @instance:		Adapter soft state
1758 *
1759 * This is the main function for initializing firmware.
1760 */
1761static u32
1762megasas_init_adapter_fusion(struct megasas_instance *instance)
1763{
1764	struct fusion_context *fusion;
1765	u32 scratch_pad_1;
1766	int i = 0, count;
1767	u32 status_reg;
1768
1769	fusion = instance->ctrl_context;
1770
1771	megasas_fusion_update_can_queue(instance, PROBE_CONTEXT);
1772
1773	/*
1774	 * Only Driver's internal DCMDs and IOCTL DCMDs needs to have MFI frames
1775	 */
1776	instance->max_mfi_cmds =
1777		MEGASAS_FUSION_INTERNAL_CMDS + MEGASAS_FUSION_IOCTL_CMDS;
1778
1779	megasas_configure_queue_sizes(instance);
1780
1781	scratch_pad_1 = megasas_readl(instance,
1782				      &instance->reg_set->outbound_scratch_pad_1);
1783	/* If scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set,
1784	 * Firmware support extended IO chain frame which is 4 times more than
1785	 * legacy Firmware.
1786	 * Legacy Firmware - Frame size is (8 * 128) = 1K
1787	 * 1M IO Firmware  - Frame size is (8 * 128 * 4)  = 4K
1788	 */
1789	if (scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK)
1790		instance->max_chain_frame_sz =
1791			((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1792			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_1MB_IO;
1793	else
1794		instance->max_chain_frame_sz =
1795			((scratch_pad_1 & MEGASAS_MAX_CHAIN_SIZE_MASK) >>
1796			MEGASAS_MAX_CHAIN_SHIFT) * MEGASAS_256K_IO;
1797
1798	if (instance->max_chain_frame_sz < MEGASAS_CHAIN_FRAME_SZ_MIN) {
1799		dev_warn(&instance->pdev->dev, "frame size %d invalid, fall back to legacy max frame size %d\n",
1800			instance->max_chain_frame_sz,
1801			MEGASAS_CHAIN_FRAME_SZ_MIN);
1802		instance->max_chain_frame_sz = MEGASAS_CHAIN_FRAME_SZ_MIN;
1803	}
1804
1805	fusion->max_sge_in_main_msg =
1806		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE
1807			- offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL))/16;
1808
1809	fusion->max_sge_in_chain =
1810		instance->max_chain_frame_sz
1811			/ sizeof(union MPI2_SGE_IO_UNION);
1812
1813	instance->max_num_sge =
1814		rounddown_pow_of_two(fusion->max_sge_in_main_msg
1815			+ fusion->max_sge_in_chain - 2);
1816
1817	/* Used for pass thru MFI frame (DCMD) */
1818	fusion->chain_offset_mfi_pthru =
1819		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL)/16;
1820
1821	fusion->chain_offset_io_request =
1822		(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE -
1823		 sizeof(union MPI2_SGE_IO_UNION))/16;
1824
1825	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
1826	for (i = 0 ; i < count; i++)
1827		fusion->last_reply_idx[i] = 0;
1828
1829	/*
1830	 * For fusion adapters, 3 commands for IOCTL and 8 commands
1831	 * for driver's internal DCMDs.
1832	 */
1833	instance->max_scsi_cmds = instance->max_fw_cmds -
1834				(MEGASAS_FUSION_INTERNAL_CMDS +
1835				MEGASAS_FUSION_IOCTL_CMDS);
1836	sema_init(&instance->ioctl_sem, MEGASAS_FUSION_IOCTL_CMDS);
1837
1838	if (megasas_alloc_ioc_init_frame(instance))
1839		return 1;
1840
1841	/*
1842	 * Allocate memory for descriptors
1843	 * Create a pool of commands
1844	 */
1845	if (megasas_alloc_cmds(instance))
1846		goto fail_alloc_mfi_cmds;
1847	if (megasas_alloc_cmds_fusion(instance))
1848		goto fail_alloc_cmds;
1849
1850	if (megasas_ioc_init_fusion(instance)) {
1851		status_reg = instance->instancet->read_fw_status_reg(instance);
1852		if (((status_reg & MFI_STATE_MASK) == MFI_STATE_FAULT) &&
1853		    (status_reg & MFI_RESET_ADAPTER)) {
1854			/* Do a chip reset and then retry IOC INIT once */
1855			if (megasas_adp_reset_wait_for_ready
1856				(instance, true, 0) == FAILED)
1857				goto fail_ioc_init;
1858
1859			if (megasas_ioc_init_fusion(instance))
1860				goto fail_ioc_init;
1861		} else {
1862			goto fail_ioc_init;
1863		}
1864	}
1865
1866	megasas_display_intel_branding(instance);
1867	if (megasas_get_ctrl_info(instance)) {
1868		dev_err(&instance->pdev->dev,
1869			"Could not get controller info. Fail from %s %d\n",
1870			__func__, __LINE__);
1871		goto fail_ioc_init;
1872	}
1873
1874	instance->flag_ieee = 1;
1875	instance->r1_ldio_hint_default =  MR_R1_LDIO_PIGGYBACK_DEFAULT;
1876	instance->threshold_reply_count = instance->max_fw_cmds / 4;
1877	fusion->fast_path_io = 0;
1878
1879	if (megasas_allocate_raid_maps(instance))
1880		goto fail_ioc_init;
1881
1882	if (!megasas_get_map_info(instance))
1883		megasas_sync_map_info(instance);
1884
1885	return 0;
1886
1887fail_ioc_init:
1888	megasas_free_cmds_fusion(instance);
1889fail_alloc_cmds:
1890	megasas_free_cmds(instance);
1891fail_alloc_mfi_cmds:
1892	megasas_free_ioc_init_cmd(instance);
1893	return 1;
1894}
1895
1896/**
1897 * megasas_fault_detect_work	-	Worker function of
1898 *					FW fault handling workqueue.
1899 * @work:	FW fault work struct
1900 */
1901static void
1902megasas_fault_detect_work(struct work_struct *work)
1903{
1904	struct megasas_instance *instance =
1905		container_of(work, struct megasas_instance,
1906			     fw_fault_work.work);
1907	u32 fw_state, dma_state, status;
1908
1909	/* Check the fw state */
1910	fw_state = instance->instancet->read_fw_status_reg(instance) &
1911			MFI_STATE_MASK;
1912
1913	if (fw_state == MFI_STATE_FAULT) {
1914		dma_state = instance->instancet->read_fw_status_reg(instance) &
1915				MFI_STATE_DMADONE;
1916		/* Start collecting crash, if DMA bit is done */
1917		if (instance->crash_dump_drv_support &&
1918		    instance->crash_dump_app_support && dma_state) {
1919			megasas_fusion_crash_dump(instance);
1920		} else {
1921			if (instance->unload == 0) {
1922				status = megasas_reset_fusion(instance->host, 0);
1923				if (status != SUCCESS) {
1924					dev_err(&instance->pdev->dev,
1925						"Failed from %s %d, do not re-arm timer\n",
1926						__func__, __LINE__);
1927					return;
1928				}
1929			}
1930		}
1931	}
1932
1933	if (instance->fw_fault_work_q)
1934		queue_delayed_work(instance->fw_fault_work_q,
1935			&instance->fw_fault_work,
1936			msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1937}
1938
1939int
1940megasas_fusion_start_watchdog(struct megasas_instance *instance)
1941{
1942	/* Check if the Fault WQ is already started */
1943	if (instance->fw_fault_work_q)
1944		return SUCCESS;
1945
1946	INIT_DELAYED_WORK(&instance->fw_fault_work, megasas_fault_detect_work);
1947
1948	snprintf(instance->fault_handler_work_q_name,
1949		 sizeof(instance->fault_handler_work_q_name),
1950		 "poll_megasas%d_status", instance->host->host_no);
1951
1952	instance->fw_fault_work_q =
1953		create_singlethread_workqueue(instance->fault_handler_work_q_name);
1954	if (!instance->fw_fault_work_q) {
1955		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
1956			__func__, __LINE__);
1957		return FAILED;
1958	}
1959
1960	queue_delayed_work(instance->fw_fault_work_q,
1961			   &instance->fw_fault_work,
1962			   msecs_to_jiffies(MEGASAS_WATCHDOG_THREAD_INTERVAL));
1963
1964	return SUCCESS;
1965}
1966
1967void
1968megasas_fusion_stop_watchdog(struct megasas_instance *instance)
1969{
1970	struct workqueue_struct *wq;
1971
1972	if (instance->fw_fault_work_q) {
1973		wq = instance->fw_fault_work_q;
1974		instance->fw_fault_work_q = NULL;
1975		if (!cancel_delayed_work_sync(&instance->fw_fault_work))
1976			flush_workqueue(wq);
1977		destroy_workqueue(wq);
1978	}
1979}
1980
1981/**
1982 * map_cmd_status -	Maps FW cmd status to OS cmd status
1983 * @fusion:		fusion context
1984 * @scmd:		Pointer to cmd
1985 * @status:		status of cmd returned by FW
1986 * @ext_status:		ext status of cmd returned by FW
1987 * @data_length:	command data length
1988 * @sense:		command sense data
1989 */
1990static void
1991map_cmd_status(struct fusion_context *fusion,
1992		struct scsi_cmnd *scmd, u8 status, u8 ext_status,
1993		u32 data_length, u8 *sense)
1994{
1995	u8 cmd_type;
1996	int resid;
1997
1998	cmd_type = megasas_cmd_type(scmd);
1999	switch (status) {
2000
2001	case MFI_STAT_OK:
2002		scmd->result = DID_OK << 16;
2003		break;
2004
2005	case MFI_STAT_SCSI_IO_FAILED:
2006	case MFI_STAT_LD_INIT_IN_PROGRESS:
2007		scmd->result = (DID_ERROR << 16) | ext_status;
2008		break;
2009
2010	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2011
2012		scmd->result = (DID_OK << 16) | ext_status;
2013		if (ext_status == SAM_STAT_CHECK_CONDITION) {
2014			memset(scmd->sense_buffer, 0,
2015			       SCSI_SENSE_BUFFERSIZE);
2016			memcpy(scmd->sense_buffer, sense,
2017			       SCSI_SENSE_BUFFERSIZE);
2018			scmd->result |= DRIVER_SENSE << 24;
2019		}
2020
2021		/*
2022		 * If the  IO request is partially completed, then MR FW will
2023		 * update "io_request->DataLength" field with actual number of
2024		 * bytes transferred.Driver will set residual bytes count in
2025		 * SCSI command structure.
2026		 */
2027		resid = (scsi_bufflen(scmd) - data_length);
2028		scsi_set_resid(scmd, resid);
2029
2030		if (resid &&
2031			((cmd_type == READ_WRITE_LDIO) ||
2032			(cmd_type == READ_WRITE_SYSPDIO)))
2033			scmd_printk(KERN_INFO, scmd, "BRCM Debug mfi stat 0x%x, data len"
2034				" requested/completed 0x%x/0x%x\n",
2035				status, scsi_bufflen(scmd), data_length);
2036		break;
2037
2038	case MFI_STAT_LD_OFFLINE:
2039	case MFI_STAT_DEVICE_NOT_FOUND:
2040		scmd->result = DID_BAD_TARGET << 16;
2041		break;
2042	case MFI_STAT_CONFIG_SEQ_MISMATCH:
2043		scmd->result = DID_IMM_RETRY << 16;
2044		break;
2045	default:
2046		scmd->result = DID_ERROR << 16;
2047		break;
2048	}
2049}
2050
2051/**
2052 * megasas_is_prp_possible -
2053 * Checks if native NVMe PRPs can be built for the IO
2054 *
2055 * @instance:		Adapter soft state
2056 * @scmd:		SCSI command from the mid-layer
2057 * @sge_count:		scatter gather element count.
2058 *
2059 * Returns:		true: PRPs can be built
2060 *			false: IEEE SGLs needs to be built
2061 */
2062static bool
2063megasas_is_prp_possible(struct megasas_instance *instance,
2064			struct scsi_cmnd *scmd, int sge_count)
2065{
2066	u32 data_length = 0;
2067	struct scatterlist *sg_scmd;
2068	bool build_prp = false;
2069	u32 mr_nvme_pg_size;
2070
2071	mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
2072				MR_DEFAULT_NVME_PAGE_SIZE);
2073	data_length = scsi_bufflen(scmd);
2074	sg_scmd = scsi_sglist(scmd);
2075
2076	/*
2077	 * NVMe uses one PRP for each page (or part of a page)
2078	 * look at the data length - if 4 pages or less then IEEE is OK
2079	 * if  > 5 pages then we need to build a native SGL
2080	 * if > 4 and <= 5 pages, then check physical address of 1st SG entry
2081	 * if this first size in the page is >= the residual beyond 4 pages
2082	 * then use IEEE, otherwise use native SGL
2083	 */
2084
2085	if (data_length > (mr_nvme_pg_size * 5)) {
2086		build_prp = true;
2087	} else if ((data_length > (mr_nvme_pg_size * 4)) &&
2088			(data_length <= (mr_nvme_pg_size * 5)))  {
2089		/* check if 1st SG entry size is < residual beyond 4 pages */
2090		if (sg_dma_len(sg_scmd) < (data_length - (mr_nvme_pg_size * 4)))
2091			build_prp = true;
2092	}
2093
2094	return build_prp;
2095}
2096
2097/**
2098 * megasas_make_prp_nvme -
2099 * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
2100 *
2101 * @instance:		Adapter soft state
2102 * @scmd:		SCSI command from the mid-layer
2103 * @sgl_ptr:		SGL to be filled in
2104 * @cmd:		Fusion command frame
2105 * @sge_count:		scatter gather element count.
2106 *
2107 * Returns:		true: PRPs are built
2108 *			false: IEEE SGLs needs to be built
2109 */
2110static bool
2111megasas_make_prp_nvme(struct megasas_instance *instance, struct scsi_cmnd *scmd,
2112		      struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
2113		      struct megasas_cmd_fusion *cmd, int sge_count)
2114{
2115	int sge_len, offset, num_prp_in_chain = 0;
2116	struct MPI25_IEEE_SGE_CHAIN64 *main_chain_element, *ptr_first_sgl;
2117	u64 *ptr_sgl;
2118	dma_addr_t ptr_sgl_phys;
2119	u64 sge_addr;
2120	u32 page_mask, page_mask_result;
2121	struct scatterlist *sg_scmd;
2122	u32 first_prp_len;
2123	bool build_prp = false;
2124	int data_len = scsi_bufflen(scmd);
2125	u32 mr_nvme_pg_size = max_t(u32, instance->nvme_page_size,
2126					MR_DEFAULT_NVME_PAGE_SIZE);
2127
2128	build_prp = megasas_is_prp_possible(instance, scmd, sge_count);
2129
2130	if (!build_prp)
2131		return false;
2132
2133	/*
2134	 * Nvme has a very convoluted prp format.  One prp is required
2135	 * for each page or partial page. Driver need to split up OS sg_list
2136	 * entries if it is longer than one page or cross a page
2137	 * boundary.  Driver also have to insert a PRP list pointer entry as
2138	 * the last entry in each physical page of the PRP list.
2139	 *
2140	 * NOTE: The first PRP "entry" is actually placed in the first
2141	 * SGL entry in the main message as IEEE 64 format.  The 2nd
2142	 * entry in the main message is the chain element, and the rest
2143	 * of the PRP entries are built in the contiguous pcie buffer.
2144	 */
2145	page_mask = mr_nvme_pg_size - 1;
2146	ptr_sgl = (u64 *)cmd->sg_frame;
2147	ptr_sgl_phys = cmd->sg_frame_phys_addr;
2148	memset(ptr_sgl, 0, instance->max_chain_frame_sz);
2149
2150	/* Build chain frame element which holds all prps except first*/
2151	main_chain_element = (struct MPI25_IEEE_SGE_CHAIN64 *)
2152	    ((u8 *)sgl_ptr + sizeof(struct MPI25_IEEE_SGE_CHAIN64));
2153
2154	main_chain_element->Address = cpu_to_le64(ptr_sgl_phys);
2155	main_chain_element->NextChainOffset = 0;
2156	main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2157					IEEE_SGE_FLAGS_SYSTEM_ADDR |
2158					MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2159
2160	/* Build first prp, sge need not to be page aligned*/
2161	ptr_first_sgl = sgl_ptr;
2162	sg_scmd = scsi_sglist(scmd);
2163	sge_addr = sg_dma_address(sg_scmd);
2164	sge_len = sg_dma_len(sg_scmd);
2165
2166	offset = (u32)(sge_addr & page_mask);
2167	first_prp_len = mr_nvme_pg_size - offset;
2168
2169	ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2170	ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2171
2172	data_len -= first_prp_len;
2173
2174	if (sge_len > first_prp_len) {
2175		sge_addr += first_prp_len;
2176		sge_len -= first_prp_len;
2177	} else if (sge_len == first_prp_len) {
2178		sg_scmd = sg_next(sg_scmd);
2179		sge_addr = sg_dma_address(sg_scmd);
2180		sge_len = sg_dma_len(sg_scmd);
2181	}
2182
2183	for (;;) {
2184		offset = (u32)(sge_addr & page_mask);
2185
2186		/* Put PRP pointer due to page boundary*/
2187		page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
2188		if (unlikely(!page_mask_result)) {
2189			scmd_printk(KERN_NOTICE,
2190				    scmd, "page boundary ptr_sgl: 0x%p\n",
2191				    ptr_sgl);
2192			ptr_sgl_phys += 8;
2193			*ptr_sgl = cpu_to_le64(ptr_sgl_phys);
2194			ptr_sgl++;
2195			num_prp_in_chain++;
2196		}
2197
2198		*ptr_sgl = cpu_to_le64(sge_addr);
2199		ptr_sgl++;
2200		ptr_sgl_phys += 8;
2201		num_prp_in_chain++;
2202
2203		sge_addr += mr_nvme_pg_size;
2204		sge_len -= mr_nvme_pg_size;
2205		data_len -= mr_nvme_pg_size;
2206
2207		if (data_len <= 0)
2208			break;
2209
2210		if (sge_len > 0)
2211			continue;
2212
2213		sg_scmd = sg_next(sg_scmd);
2214		sge_addr = sg_dma_address(sg_scmd);
2215		sge_len = sg_dma_len(sg_scmd);
2216	}
2217
2218	main_chain_element->Length =
2219			cpu_to_le32(num_prp_in_chain * sizeof(u64));
2220
2221	return build_prp;
2222}
2223
2224/**
2225 * megasas_make_sgl_fusion -	Prepares 32-bit SGL
2226 * @instance:		Adapter soft state
2227 * @scp:		SCSI command from the mid-layer
2228 * @sgl_ptr:		SGL to be filled in
2229 * @cmd:		cmd we are working on
2230 * @sge_count:		sge count
2231 *
2232 */
2233static void
2234megasas_make_sgl_fusion(struct megasas_instance *instance,
2235			struct scsi_cmnd *scp,
2236			struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr,
2237			struct megasas_cmd_fusion *cmd, int sge_count)
2238{
2239	int i, sg_processed;
2240	struct scatterlist *os_sgl;
2241	struct fusion_context *fusion;
2242
2243	fusion = instance->ctrl_context;
2244
2245	if (instance->adapter_type >= INVADER_SERIES) {
2246		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr;
2247		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
2248		sgl_ptr_end->Flags = 0;
2249	}
2250
2251	scsi_for_each_sg(scp, os_sgl, sge_count, i) {
2252		sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl));
2253		sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl));
2254		sgl_ptr->Flags = 0;
2255		if (instance->adapter_type >= INVADER_SERIES)
2256			if (i == sge_count - 1)
2257				sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST;
2258		sgl_ptr++;
2259		sg_processed = i + 1;
2260
2261		if ((sg_processed ==  (fusion->max_sge_in_main_msg - 1)) &&
2262		    (sge_count > fusion->max_sge_in_main_msg)) {
2263
2264			struct MPI25_IEEE_SGE_CHAIN64 *sg_chain;
2265			if (instance->adapter_type >= INVADER_SERIES) {
2266				if ((le16_to_cpu(cmd->io_request->IoFlags) &
2267					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) !=
2268					MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
2269					cmd->io_request->ChainOffset =
2270						fusion->
2271						chain_offset_io_request;
2272				else
2273					cmd->io_request->ChainOffset = 0;
2274			} else
2275				cmd->io_request->ChainOffset =
2276					fusion->chain_offset_io_request;
2277
2278			sg_chain = sgl_ptr;
2279			/* Prepare chain element */
2280			sg_chain->NextChainOffset = 0;
2281			if (instance->adapter_type >= INVADER_SERIES)
2282				sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT;
2283			else
2284				sg_chain->Flags =
2285					(IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2286					 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR);
2287			sg_chain->Length =  cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed)));
2288			sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr);
2289
2290			sgl_ptr =
2291			  (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame;
2292			memset(sgl_ptr, 0, instance->max_chain_frame_sz);
2293		}
2294	}
2295}
2296
2297/**
2298 * megasas_make_sgl -	Build Scatter Gather List(SGLs)
2299 * @scp:		SCSI command pointer
2300 * @instance:		Soft instance of controller
2301 * @cmd:		Fusion command pointer
2302 *
2303 * This function will build sgls based on device type.
2304 * For nvme drives, there is different way of building sgls in nvme native
2305 * format- PRPs(Physical Region Page).
2306 *
2307 * Returns the number of sg lists actually used, zero if the sg lists
2308 * is NULL, or -ENOMEM if the mapping failed
2309 */
2310static
2311int megasas_make_sgl(struct megasas_instance *instance, struct scsi_cmnd *scp,
2312		     struct megasas_cmd_fusion *cmd)
2313{
2314	int sge_count;
2315	bool build_prp = false;
2316	struct MPI25_IEEE_SGE_CHAIN64 *sgl_chain64;
2317
2318	sge_count = scsi_dma_map(scp);
2319
2320	if ((sge_count > instance->max_num_sge) || (sge_count <= 0))
2321		return sge_count;
2322
2323	sgl_chain64 = (struct MPI25_IEEE_SGE_CHAIN64 *)&cmd->io_request->SGL;
2324	if ((le16_to_cpu(cmd->io_request->IoFlags) &
2325	    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
2326	    (cmd->pd_interface == NVME_PD))
2327		build_prp = megasas_make_prp_nvme(instance, scp, sgl_chain64,
2328						  cmd, sge_count);
2329
2330	if (!build_prp)
2331		megasas_make_sgl_fusion(instance, scp, sgl_chain64,
2332					cmd, sge_count);
2333
2334	return sge_count;
2335}
2336
2337/**
2338 * megasas_set_pd_lba -	Sets PD LBA
2339 * @io_request:		IO request
2340 * @cdb_len:		cdb length
2341 * @io_info:		IO information
2342 * @scp:		SCSI command
2343 * @local_map_ptr:	Raid map
2344 * @ref_tag:		Primary reference tag
2345 *
2346 * Used to set the PD LBA in CDB for FP IOs
2347 */
2348static void
2349megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
2350		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
2351		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
2352{
2353	struct MR_LD_RAID *raid;
2354	u16 ld;
2355	u64 start_blk = io_info->pdBlock;
2356	u8 *cdb = io_request->CDB.CDB32;
2357	u32 num_blocks = io_info->numBlocks;
2358	u8 opcode = 0, flagvals = 0, groupnum = 0, control = 0;
2359
2360	/* Check if T10 PI (DIF) is enabled for this LD */
2361	ld = MR_TargetIdToLdGet(io_info->ldTgtId, local_map_ptr);
2362	raid = MR_LdRaidGet(ld, local_map_ptr);
2363	if (raid->capability.ldPiMode == MR_PROT_INFO_TYPE_CONTROLLER) {
2364		memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2365		cdb[0] =  MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
2366		cdb[7] =  MEGASAS_SCSI_ADDL_CDB_LEN;
2367
2368		if (scp->sc_data_direction == DMA_FROM_DEVICE)
2369			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
2370		else
2371			cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
2372		cdb[10] = MEGASAS_RD_WR_PROTECT_CHECK_ALL;
2373
2374		/* LBA */
2375		cdb[12] = (u8)((start_blk >> 56) & 0xff);
2376		cdb[13] = (u8)((start_blk >> 48) & 0xff);
2377		cdb[14] = (u8)((start_blk >> 40) & 0xff);
2378		cdb[15] = (u8)((start_blk >> 32) & 0xff);
2379		cdb[16] = (u8)((start_blk >> 24) & 0xff);
2380		cdb[17] = (u8)((start_blk >> 16) & 0xff);
2381		cdb[18] = (u8)((start_blk >> 8) & 0xff);
2382		cdb[19] = (u8)(start_blk & 0xff);
2383
2384		/* Logical block reference tag */
2385		io_request->CDB.EEDP32.PrimaryReferenceTag =
2386			cpu_to_be32(ref_tag);
2387		io_request->CDB.EEDP32.PrimaryApplicationTagMask = cpu_to_be16(0xffff);
2388		io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */
2389
2390		/* Transfer length */
2391		cdb[28] = (u8)((num_blocks >> 24) & 0xff);
2392		cdb[29] = (u8)((num_blocks >> 16) & 0xff);
2393		cdb[30] = (u8)((num_blocks >> 8) & 0xff);
2394		cdb[31] = (u8)(num_blocks & 0xff);
2395
2396		/* set SCSI IO EEDPFlags */
2397		if (scp->sc_data_direction == DMA_FROM_DEVICE) {
2398			io_request->EEDPFlags = cpu_to_le16(
2399				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG  |
2400				MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
2401				MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP |
2402				MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG |
2403				MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE |
2404				MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD);
2405		} else {
2406			io_request->EEDPFlags = cpu_to_le16(
2407				MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
2408				MPI2_SCSIIO_EEDPFLAGS_INSERT_OP);
2409		}
2410		io_request->Control |= cpu_to_le32((0x4 << 26));
2411		io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size);
2412	} else {
2413		/* Some drives don't support 16/12 byte CDB's, convert to 10 */
2414		if (((cdb_len == 12) || (cdb_len == 16)) &&
2415		    (start_blk <= 0xffffffff)) {
2416			if (cdb_len == 16) {
2417				opcode = cdb[0] == READ_16 ? READ_10 : WRITE_10;
2418				flagvals = cdb[1];
2419				groupnum = cdb[14];
2420				control = cdb[15];
2421			} else {
2422				opcode = cdb[0] == READ_12 ? READ_10 : WRITE_10;
2423				flagvals = cdb[1];
2424				groupnum = cdb[10];
2425				control = cdb[11];
2426			}
2427
2428			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2429
2430			cdb[0] = opcode;
2431			cdb[1] = flagvals;
2432			cdb[6] = groupnum;
2433			cdb[9] = control;
2434
2435			/* Transfer length */
2436			cdb[8] = (u8)(num_blocks & 0xff);
2437			cdb[7] = (u8)((num_blocks >> 8) & 0xff);
2438
2439			io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */
2440			cdb_len = 10;
2441		} else if ((cdb_len < 16) && (start_blk > 0xffffffff)) {
2442			/* Convert to 16 byte CDB for large LBA's */
2443			switch (cdb_len) {
2444			case 6:
2445				opcode = cdb[0] == READ_6 ? READ_16 : WRITE_16;
2446				control = cdb[5];
2447				break;
2448			case 10:
2449				opcode =
2450					cdb[0] == READ_10 ? READ_16 : WRITE_16;
2451				flagvals = cdb[1];
2452				groupnum = cdb[6];
2453				control = cdb[9];
2454				break;
2455			case 12:
2456				opcode =
2457					cdb[0] == READ_12 ? READ_16 : WRITE_16;
2458				flagvals = cdb[1];
2459				groupnum = cdb[10];
2460				control = cdb[11];
2461				break;
2462			}
2463
2464			memset(cdb, 0, sizeof(io_request->CDB.CDB32));
2465
2466			cdb[0] = opcode;
2467			cdb[1] = flagvals;
2468			cdb[14] = groupnum;
2469			cdb[15] = control;
2470
2471			/* Transfer length */
2472			cdb[13] = (u8)(num_blocks & 0xff);
2473			cdb[12] = (u8)((num_blocks >> 8) & 0xff);
2474			cdb[11] = (u8)((num_blocks >> 16) & 0xff);
2475			cdb[10] = (u8)((num_blocks >> 24) & 0xff);
2476
2477			io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */
2478			cdb_len = 16;
2479		}
2480
2481		/* Normal case, just load LBA here */
2482		switch (cdb_len) {
2483		case 6:
2484		{
2485			u8 val = cdb[1] & 0xE0;
2486			cdb[3] = (u8)(start_blk & 0xff);
2487			cdb[2] = (u8)((start_blk >> 8) & 0xff);
2488			cdb[1] = val | ((u8)(start_blk >> 16) & 0x1f);
2489			break;
2490		}
2491		case 10:
2492			cdb[5] = (u8)(start_blk & 0xff);
2493			cdb[4] = (u8)((start_blk >> 8) & 0xff);
2494			cdb[3] = (u8)((start_blk >> 16) & 0xff);
2495			cdb[2] = (u8)((start_blk >> 24) & 0xff);
2496			break;
2497		case 12:
2498			cdb[5]    = (u8)(start_blk & 0xff);
2499			cdb[4]    = (u8)((start_blk >> 8) & 0xff);
2500			cdb[3]    = (u8)((start_blk >> 16) & 0xff);
2501			cdb[2]    = (u8)((start_blk >> 24) & 0xff);
2502			break;
2503		case 16:
2504			cdb[9]    = (u8)(start_blk & 0xff);
2505			cdb[8]    = (u8)((start_blk >> 8) & 0xff);
2506			cdb[7]    = (u8)((start_blk >> 16) & 0xff);
2507			cdb[6]    = (u8)((start_blk >> 24) & 0xff);
2508			cdb[5]    = (u8)((start_blk >> 32) & 0xff);
2509			cdb[4]    = (u8)((start_blk >> 40) & 0xff);
2510			cdb[3]    = (u8)((start_blk >> 48) & 0xff);
2511			cdb[2]    = (u8)((start_blk >> 56) & 0xff);
2512			break;
2513		}
2514	}
2515}
2516
2517/**
2518 * megasas_stream_detect -	stream detection on read and and write IOs
2519 * @instance:		Adapter soft state
2520 * @cmd:		    Command to be prepared
2521 * @io_info:		IO Request info
2522 *
2523 */
2524
2525/** stream detection on read and and write IOs */
2526static void megasas_stream_detect(struct megasas_instance *instance,
2527				  struct megasas_cmd_fusion *cmd,
2528				  struct IO_REQUEST_INFO *io_info)
2529{
2530	struct fusion_context *fusion = instance->ctrl_context;
2531	u32 device_id = io_info->ldTgtId;
2532	struct LD_STREAM_DETECT *current_ld_sd
2533		= fusion->stream_detect_by_ld[device_id];
2534	u32 *track_stream = &current_ld_sd->mru_bit_map, stream_num;
2535	u32 shifted_values, unshifted_values;
2536	u32 index_value_mask, shifted_values_mask;
2537	int i;
2538	bool is_read_ahead = false;
2539	struct STREAM_DETECT *current_sd;
2540	/* find possible stream */
2541	for (i = 0; i < MAX_STREAMS_TRACKED; ++i) {
2542		stream_num = (*track_stream >>
2543			(i * BITS_PER_INDEX_STREAM)) &
2544			STREAM_MASK;
2545		current_sd = &current_ld_sd->stream_track[stream_num];
2546		/* if we found a stream, update the raid
2547		 *  context and also update the mruBitMap
2548		 */
2549		/*	boundary condition */
2550		if ((current_sd->next_seq_lba) &&
2551		    (io_info->ldStartBlock >= current_sd->next_seq_lba) &&
2552		    (io_info->ldStartBlock <= (current_sd->next_seq_lba + 32)) &&
2553		    (current_sd->is_read == io_info->isRead)) {
2554
2555			if ((io_info->ldStartBlock != current_sd->next_seq_lba)	&&
2556			    ((!io_info->isRead) || (!is_read_ahead)))
2557				/*
2558				 * Once the API availible we need to change this.
2559				 * At this point we are not allowing any gap
2560				 */
2561				continue;
2562
2563			SET_STREAM_DETECTED(cmd->io_request->RaidContext.raid_context_g35);
2564			current_sd->next_seq_lba =
2565			io_info->ldStartBlock + io_info->numBlocks;
2566			/*
2567			 *	update the mruBitMap LRU
2568			 */
2569			shifted_values_mask =
2570				(1 <<  i * BITS_PER_INDEX_STREAM) - 1;
2571			shifted_values = ((*track_stream & shifted_values_mask)
2572						<< BITS_PER_INDEX_STREAM);
2573			index_value_mask =
2574				STREAM_MASK << i * BITS_PER_INDEX_STREAM;
2575			unshifted_values =
2576				*track_stream & ~(shifted_values_mask |
2577				index_value_mask);
2578			*track_stream =
2579				unshifted_values | shifted_values | stream_num;
2580			return;
2581		}
2582	}
2583	/*
2584	 * if we did not find any stream, create a new one
2585	 * from the least recently used
2586	 */
2587	stream_num = (*track_stream >>
2588		((MAX_STREAMS_TRACKED - 1) * BITS_PER_INDEX_STREAM)) &
2589		STREAM_MASK;
2590	current_sd = &current_ld_sd->stream_track[stream_num];
2591	current_sd->is_read = io_info->isRead;
2592	current_sd->next_seq_lba = io_info->ldStartBlock + io_info->numBlocks;
2593	*track_stream = (((*track_stream & ZERO_LAST_STREAM) << 4) | stream_num);
2594	return;
2595}
2596
2597/**
2598 * megasas_set_raidflag_cpu_affinity - This function sets the cpu
2599 * affinity (cpu of the controller) and raid_flags in the raid context
2600 * based on IO type.
2601 *
2602 * @fusion:		Fusion context
2603 * @praid_context:	IO RAID context
2604 * @raid:		LD raid map
2605 * @fp_possible:	Is fast path possible?
2606 * @is_read:		Is read IO?
2607 * @scsi_buff_len:	SCSI command buffer length
2608 *
2609 */
2610static void
2611megasas_set_raidflag_cpu_affinity(struct fusion_context *fusion,
2612				union RAID_CONTEXT_UNION *praid_context,
2613				struct MR_LD_RAID *raid, bool fp_possible,
2614				u8 is_read, u32 scsi_buff_len)
2615{
2616	u8 cpu_sel = MR_RAID_CTX_CPUSEL_0;
2617	struct RAID_CONTEXT_G35 *rctx_g35;
2618
2619	rctx_g35 = &praid_context->raid_context_g35;
2620	if (fp_possible) {
2621		if (is_read) {
2622			if ((raid->cpuAffinity.pdRead.cpu0) &&
2623			    (raid->cpuAffinity.pdRead.cpu1))
2624				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2625			else if (raid->cpuAffinity.pdRead.cpu1)
2626				cpu_sel = MR_RAID_CTX_CPUSEL_1;
2627		} else {
2628			if ((raid->cpuAffinity.pdWrite.cpu0) &&
2629			    (raid->cpuAffinity.pdWrite.cpu1))
2630				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2631			else if (raid->cpuAffinity.pdWrite.cpu1)
2632				cpu_sel = MR_RAID_CTX_CPUSEL_1;
2633			/* Fast path cache by pass capable R0/R1 VD */
2634			if ((raid->level <= 1) &&
2635			    (raid->capability.fp_cache_bypass_capable)) {
2636				rctx_g35->routing_flags |=
2637					(1 << MR_RAID_CTX_ROUTINGFLAGS_SLD_SHIFT);
2638				rctx_g35->raid_flags =
2639					(MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS
2640					<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2641			}
2642		}
2643	} else {
2644		if (is_read) {
2645			if ((raid->cpuAffinity.ldRead.cpu0) &&
2646			    (raid->cpuAffinity.ldRead.cpu1))
2647				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2648			else if (raid->cpuAffinity.ldRead.cpu1)
2649				cpu_sel = MR_RAID_CTX_CPUSEL_1;
2650		} else {
2651			if ((raid->cpuAffinity.ldWrite.cpu0) &&
2652			    (raid->cpuAffinity.ldWrite.cpu1))
2653				cpu_sel = MR_RAID_CTX_CPUSEL_FCFS;
2654			else if (raid->cpuAffinity.ldWrite.cpu1)
2655				cpu_sel = MR_RAID_CTX_CPUSEL_1;
2656
2657			if (is_stream_detected(rctx_g35) &&
2658			    ((raid->level == 5) || (raid->level == 6)) &&
2659			    (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) &&
2660			    (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS))
2661				cpu_sel = MR_RAID_CTX_CPUSEL_0;
2662		}
2663	}
2664
2665	rctx_g35->routing_flags |=
2666		(cpu_sel << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2667
2668	/* Always give priority to MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2669	 * vs MR_RAID_FLAGS_IO_SUB_TYPE_CACHE_BYPASS.
2670	 * IO Subtype is not bitmap.
2671	 */
2672	if ((fusion->pcie_bw_limitation) && (raid->level == 1) && (!is_read) &&
2673			(scsi_buff_len > MR_LARGE_IO_MIN_SIZE)) {
2674		praid_context->raid_context_g35.raid_flags =
2675			(MR_RAID_FLAGS_IO_SUB_TYPE_LDIO_BW_LIMIT
2676			<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT);
2677	}
2678}
2679
2680/**
2681 * megasas_build_ldio_fusion -	Prepares IOs to devices
2682 * @instance:		Adapter soft state
2683 * @scp:		SCSI command
2684 * @cmd:		Command to be prepared
2685 *
2686 * Prepares the io_request and chain elements (sg_frame) for IO
2687 * The IO can be for PD (Fast Path) or LD
2688 */
2689static void
2690megasas_build_ldio_fusion(struct megasas_instance *instance,
2691			  struct scsi_cmnd *scp,
2692			  struct megasas_cmd_fusion *cmd)
2693{
2694	bool fp_possible;
2695	u16 ld;
2696	u32 start_lba_lo, start_lba_hi, device_id, datalength = 0;
2697	u32 scsi_buff_len;
2698	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2699	struct IO_REQUEST_INFO io_info;
2700	struct fusion_context *fusion;
2701	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2702	u8 *raidLUN;
2703	unsigned long spinlock_flags;
2704	struct MR_LD_RAID *raid = NULL;
2705	struct MR_PRIV_DEVICE *mrdev_priv;
2706	struct RAID_CONTEXT *rctx;
2707	struct RAID_CONTEXT_G35 *rctx_g35;
2708
2709	device_id = MEGASAS_DEV_INDEX(scp);
2710
2711	fusion = instance->ctrl_context;
2712
2713	io_request = cmd->io_request;
2714	rctx = &io_request->RaidContext.raid_context;
2715	rctx_g35 = &io_request->RaidContext.raid_context_g35;
2716
2717	rctx->virtual_disk_tgt_id = cpu_to_le16(device_id);
2718	rctx->status = 0;
2719	rctx->ex_status = 0;
2720
2721	start_lba_lo = 0;
2722	start_lba_hi = 0;
2723	fp_possible = false;
2724
2725	/*
2726	 * 6-byte READ(0x08) or WRITE(0x0A) cdb
2727	 */
2728	if (scp->cmd_len == 6) {
2729		datalength = (u32) scp->cmnd[4];
2730		start_lba_lo = ((u32) scp->cmnd[1] << 16) |
2731			((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3];
2732
2733		start_lba_lo &= 0x1FFFFF;
2734	}
2735
2736	/*
2737	 * 10-byte READ(0x28) or WRITE(0x2A) cdb
2738	 */
2739	else if (scp->cmd_len == 10) {
2740		datalength = (u32) scp->cmnd[8] |
2741			((u32) scp->cmnd[7] << 8);
2742		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2743			((u32) scp->cmnd[3] << 16) |
2744			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2745	}
2746
2747	/*
2748	 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
2749	 */
2750	else if (scp->cmd_len == 12) {
2751		datalength = ((u32) scp->cmnd[6] << 24) |
2752			((u32) scp->cmnd[7] << 16) |
2753			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2754		start_lba_lo = ((u32) scp->cmnd[2] << 24) |
2755			((u32) scp->cmnd[3] << 16) |
2756			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2757	}
2758
2759	/*
2760	 * 16-byte READ(0x88) or WRITE(0x8A) cdb
2761	 */
2762	else if (scp->cmd_len == 16) {
2763		datalength = ((u32) scp->cmnd[10] << 24) |
2764			((u32) scp->cmnd[11] << 16) |
2765			((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13];
2766		start_lba_lo = ((u32) scp->cmnd[6] << 24) |
2767			((u32) scp->cmnd[7] << 16) |
2768			((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9];
2769
2770		start_lba_hi = ((u32) scp->cmnd[2] << 24) |
2771			((u32) scp->cmnd[3] << 16) |
2772			((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5];
2773	}
2774
2775	memset(&io_info, 0, sizeof(struct IO_REQUEST_INFO));
2776	io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo;
2777	io_info.numBlocks = datalength;
2778	io_info.ldTgtId = device_id;
2779	io_info.r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2780	scsi_buff_len = scsi_bufflen(scp);
2781	io_request->DataLength = cpu_to_le32(scsi_buff_len);
2782	io_info.data_arms = 1;
2783
2784	if (scp->sc_data_direction == DMA_FROM_DEVICE)
2785		io_info.isRead = 1;
2786
2787	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2788	ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2789
2790	if (ld < instance->fw_supported_vd_count)
2791		raid = MR_LdRaidGet(ld, local_map_ptr);
2792
2793	if (!raid || (!fusion->fast_path_io)) {
2794		rctx->reg_lock_flags  = 0;
2795		fp_possible = false;
2796	} else {
2797		if (MR_BuildRaidContext(instance, &io_info, rctx,
2798					local_map_ptr, &raidLUN))
2799			fp_possible = (io_info.fpOkForIo > 0) ? true : false;
2800	}
2801
2802	megasas_get_msix_index(instance, scp, cmd, io_info.data_arms);
2803
2804	if (instance->adapter_type >= VENTURA_SERIES) {
2805		/* FP for Optimal raid level 1.
2806		 * All large RAID-1 writes (> 32 KiB, both WT and WB modes)
2807		 * are built by the driver as LD I/Os.
2808		 * All small RAID-1 WT writes (<= 32 KiB) are built as FP I/Os
2809		 * (there is never a reason to process these as buffered writes)
2810		 * All small RAID-1 WB writes (<= 32 KiB) are built as FP I/Os
2811		 * with the SLD bit asserted.
2812		 */
2813		if (io_info.r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
2814			mrdev_priv = scp->device->hostdata;
2815
2816			if (atomic_inc_return(&instance->fw_outstanding) >
2817				(instance->host->can_queue)) {
2818				fp_possible = false;
2819				atomic_dec(&instance->fw_outstanding);
2820			} else if (fusion->pcie_bw_limitation &&
2821				((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) ||
2822				   (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0))) {
2823				fp_possible = false;
2824				atomic_dec(&instance->fw_outstanding);
2825				if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE)
2826					atomic_set(&mrdev_priv->r1_ldio_hint,
2827						   instance->r1_ldio_hint_default);
2828			}
2829		}
2830
2831		if (!fp_possible ||
2832		    (io_info.isRead && io_info.ra_capable)) {
2833			spin_lock_irqsave(&instance->stream_lock,
2834					  spinlock_flags);
2835			megasas_stream_detect(instance, cmd, &io_info);
2836			spin_unlock_irqrestore(&instance->stream_lock,
2837					       spinlock_flags);
2838			/* In ventura if stream detected for a read and it is
2839			 * read ahead capable make this IO as LDIO
2840			 */
2841			if (is_stream_detected(rctx_g35))
2842				fp_possible = false;
2843		}
2844
2845		/* If raid is NULL, set CPU affinity to default CPU0 */
2846		if (raid)
2847			megasas_set_raidflag_cpu_affinity(fusion, &io_request->RaidContext,
2848				raid, fp_possible, io_info.isRead,
2849				scsi_buff_len);
2850		else
2851			rctx_g35->routing_flags |=
2852				(MR_RAID_CTX_CPUSEL_0 << MR_RAID_CTX_ROUTINGFLAGS_CPUSEL_SHIFT);
2853	}
2854
2855	if (fp_possible) {
2856		megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
2857				   local_map_ptr, start_lba_lo);
2858		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
2859		cmd->request_desc->SCSIIO.RequestFlags =
2860			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO
2861			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2862		if (instance->adapter_type == INVADER_SERIES) {
2863			rctx->type = MPI2_TYPE_CUDA;
2864			rctx->nseg = 0x1;
2865			io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2866			rctx->reg_lock_flags |=
2867			  (MR_RL_FLAGS_GRANT_DESTINATION_CUDA |
2868			   MR_RL_FLAGS_SEQ_NUM_ENABLE);
2869		} else if (instance->adapter_type >= VENTURA_SERIES) {
2870			rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2871			rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2872			rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2873			io_request->IoFlags |=
2874				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
2875		}
2876		if (fusion->load_balance_info &&
2877			(fusion->load_balance_info[device_id].loadBalanceFlag) &&
2878			(io_info.isRead)) {
2879			io_info.devHandle =
2880				get_updated_dev_handle(instance,
2881					&fusion->load_balance_info[device_id],
2882					&io_info, local_map_ptr);
2883			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
2884			cmd->pd_r1_lb = io_info.pd_after_lb;
2885			if (instance->adapter_type >= VENTURA_SERIES)
2886				rctx_g35->span_arm = io_info.span_arm;
2887			else
2888				rctx->span_arm = io_info.span_arm;
2889
2890		} else
2891			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
2892
2893		if (instance->adapter_type >= VENTURA_SERIES)
2894			cmd->r1_alt_dev_handle = io_info.r1_alt_dev_handle;
2895		else
2896			cmd->r1_alt_dev_handle = MR_DEVHANDLE_INVALID;
2897
2898		if ((raidLUN[0] == 1) &&
2899			(local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].validHandles > 1)) {
2900			instance->dev_handle = !(instance->dev_handle);
2901			io_info.devHandle =
2902				local_map_ptr->raidMap.devHndlInfo[io_info.pd_after_lb].devHandle[instance->dev_handle];
2903		}
2904
2905		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
2906		io_request->DevHandle = io_info.devHandle;
2907		cmd->pd_interface = io_info.pd_interface;
2908		/* populate the LUN field */
2909		memcpy(io_request->LUN, raidLUN, 8);
2910	} else {
2911		rctx->timeout_value =
2912			cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec);
2913		cmd->request_desc->SCSIIO.RequestFlags =
2914			(MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO
2915			 << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2916		if (instance->adapter_type == INVADER_SERIES) {
2917			if (io_info.do_fp_rlbypass ||
2918			(rctx->reg_lock_flags == REGION_TYPE_UNUSED))
2919				cmd->request_desc->SCSIIO.RequestFlags =
2920					(MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK <<
2921					MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2922			rctx->type = MPI2_TYPE_CUDA;
2923			rctx->reg_lock_flags |=
2924				(MR_RL_FLAGS_GRANT_DESTINATION_CPU0 |
2925					MR_RL_FLAGS_SEQ_NUM_ENABLE);
2926			rctx->nseg = 0x1;
2927		} else if (instance->adapter_type >= VENTURA_SERIES) {
2928			rctx_g35->routing_flags |= (1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
2929			rctx_g35->nseg_type |= (1 << RAID_CONTEXT_NSEG_SHIFT);
2930			rctx_g35->nseg_type |= (MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
2931		}
2932		io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2933		io_request->DevHandle = cpu_to_le16(device_id);
2934
2935	} /* Not FP */
2936}
2937
2938/**
2939 * megasas_build_ld_nonrw_fusion - prepares non rw ios for virtual disk
2940 * @instance:		Adapter soft state
2941 * @scmd:		SCSI command
2942 * @cmd:		Command to be prepared
2943 *
2944 * Prepares the io_request frame for non-rw io cmds for vd.
2945 */
2946static void megasas_build_ld_nonrw_fusion(struct megasas_instance *instance,
2947			  struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd)
2948{
2949	u32 device_id;
2950	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
2951	u16 ld;
2952	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
2953	struct fusion_context *fusion = instance->ctrl_context;
2954	u8                          span, physArm;
2955	__le16                      devHandle;
2956	u32                         arRef, pd;
2957	struct MR_LD_RAID                  *raid;
2958	struct RAID_CONTEXT                *pRAID_Context;
2959	u8 fp_possible = 1;
2960
2961	io_request = cmd->io_request;
2962	device_id = MEGASAS_DEV_INDEX(scmd);
2963	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
2964	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
2965	/* get RAID_Context pointer */
2966	pRAID_Context = &io_request->RaidContext.raid_context;
2967	/* Check with FW team */
2968	pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
2969	pRAID_Context->reg_lock_row_lba    = 0;
2970	pRAID_Context->reg_lock_length    = 0;
2971
2972	if (fusion->fast_path_io && (
2973		device_id < instance->fw_supported_vd_count)) {
2974
2975		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
2976		if (ld >= instance->fw_supported_vd_count - 1)
2977			fp_possible = 0;
2978		else {
2979			raid = MR_LdRaidGet(ld, local_map_ptr);
2980			if (!(raid->capability.fpNonRWCapable))
2981				fp_possible = 0;
2982		}
2983	} else
2984		fp_possible = 0;
2985
2986	if (!fp_possible) {
2987		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
2988		io_request->DevHandle = cpu_to_le16(device_id);
2989		io_request->LUN[1] = scmd->device->lun;
2990		pRAID_Context->timeout_value =
2991			cpu_to_le16 (scmd->request->timeout / HZ);
2992		cmd->request_desc->SCSIIO.RequestFlags =
2993			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
2994			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
2995	} else {
2996
2997		/* set RAID context values */
2998		pRAID_Context->config_seq_num = raid->seqNum;
2999		if (instance->adapter_type < VENTURA_SERIES)
3000			pRAID_Context->reg_lock_flags = REGION_TYPE_SHARED_READ;
3001		pRAID_Context->timeout_value =
3002			cpu_to_le16(raid->fpIoTimeoutForLd);
3003
3004		/* get the DevHandle for the PD (since this is
3005		   fpNonRWCapable, this is a single disk RAID0) */
3006		span = physArm = 0;
3007		arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr);
3008		pd = MR_ArPdGet(arRef, physArm, local_map_ptr);
3009		devHandle = MR_PdDevHandleGet(pd, local_map_ptr);
3010
3011		/* build request descriptor */
3012		cmd->request_desc->SCSIIO.RequestFlags =
3013			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
3014			MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3015		cmd->request_desc->SCSIIO.DevHandle = devHandle;
3016
3017		/* populate the LUN field */
3018		memcpy(io_request->LUN, raid->LUN, 8);
3019
3020		/* build the raidScsiIO structure */
3021		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
3022		io_request->DevHandle = devHandle;
3023	}
3024}
3025
3026/**
3027 * megasas_build_syspd_fusion - prepares rw/non-rw ios for syspd
3028 * @instance:		Adapter soft state
3029 * @scmd:		SCSI command
3030 * @cmd:		Command to be prepared
3031 * @fp_possible:	parameter to detect fast path or firmware path io.
3032 *
3033 * Prepares the io_request frame for rw/non-rw io cmds for syspds
3034 */
3035static void
3036megasas_build_syspd_fusion(struct megasas_instance *instance,
3037	struct scsi_cmnd *scmd, struct megasas_cmd_fusion *cmd,
3038	bool fp_possible)
3039{
3040	u32 device_id;
3041	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
3042	u16 pd_index = 0;
3043	u16 os_timeout_value;
3044	u16 timeout_limit;
3045	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
3046	struct RAID_CONTEXT	*pRAID_Context;
3047	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
3048	struct MR_PRIV_DEVICE *mr_device_priv_data;
3049	struct fusion_context *fusion = instance->ctrl_context;
3050	pd_sync = (void *)fusion->pd_seq_sync[(instance->pd_seq_map_id - 1) & 1];
3051
3052	device_id = MEGASAS_DEV_INDEX(scmd);
3053	pd_index = MEGASAS_PD_INDEX(scmd);
3054	os_timeout_value = scmd->request->timeout / HZ;
3055	mr_device_priv_data = scmd->device->hostdata;
3056	cmd->pd_interface = mr_device_priv_data->interface_type;
3057
3058	io_request = cmd->io_request;
3059	/* get RAID_Context pointer */
3060	pRAID_Context = &io_request->RaidContext.raid_context;
3061	pRAID_Context->reg_lock_flags = 0;
3062	pRAID_Context->reg_lock_row_lba = 0;
3063	pRAID_Context->reg_lock_length = 0;
3064	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
3065	io_request->LUN[1] = scmd->device->lun;
3066	pRAID_Context->raid_flags = MR_RAID_FLAGS_IO_SUB_TYPE_SYSTEM_PD
3067		<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
3068
3069	/* If FW supports PD sequence number */
3070	if (instance->support_seqnum_jbod_fp) {
3071		if (instance->use_seqnum_jbod_fp &&
3072			instance->pd_list[pd_index].driveType == TYPE_DISK) {
3073
3074			/* More than 256 PD/JBOD support for Ventura */
3075			if (instance->support_morethan256jbod)
3076				pRAID_Context->virtual_disk_tgt_id =
3077					pd_sync->seq[pd_index].pd_target_id;
3078			else
3079				pRAID_Context->virtual_disk_tgt_id =
3080					cpu_to_le16(device_id +
3081					(MAX_PHYSICAL_DEVICES - 1));
3082			pRAID_Context->config_seq_num =
3083				pd_sync->seq[pd_index].seqNum;
3084			io_request->DevHandle =
3085				pd_sync->seq[pd_index].devHandle;
3086			if (instance->adapter_type >= VENTURA_SERIES) {
3087				io_request->RaidContext.raid_context_g35.routing_flags |=
3088					(1 << MR_RAID_CTX_ROUTINGFLAGS_SQN_SHIFT);
3089				io_request->RaidContext.raid_context_g35.nseg_type |=
3090					(1 << RAID_CONTEXT_NSEG_SHIFT);
3091				io_request->RaidContext.raid_context_g35.nseg_type |=
3092					(MPI2_TYPE_CUDA << RAID_CONTEXT_TYPE_SHIFT);
3093			} else {
3094				pRAID_Context->type = MPI2_TYPE_CUDA;
3095				pRAID_Context->nseg = 0x1;
3096				pRAID_Context->reg_lock_flags |=
3097					(MR_RL_FLAGS_SEQ_NUM_ENABLE |
3098					 MR_RL_FLAGS_GRANT_DESTINATION_CUDA);
3099			}
3100		} else {
3101			pRAID_Context->virtual_disk_tgt_id =
3102				cpu_to_le16(device_id +
3103				(MAX_PHYSICAL_DEVICES - 1));
3104			pRAID_Context->config_seq_num = 0;
3105			io_request->DevHandle = cpu_to_le16(0xFFFF);
3106		}
3107	} else {
3108		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
3109		pRAID_Context->config_seq_num = 0;
3110
3111		if (fusion->fast_path_io) {
3112			local_map_ptr =
3113				fusion->ld_drv_map[(instance->map_id & 1)];
3114			io_request->DevHandle =
3115				local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
3116		} else {
3117			io_request->DevHandle = cpu_to_le16(0xFFFF);
3118		}
3119	}
3120
3121	cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
3122
3123	megasas_get_msix_index(instance, scmd, cmd, 1);
3124
3125	if (!fp_possible) {
3126		/* system pd firmware path */
3127		io_request->Function  = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST;
3128		cmd->request_desc->SCSIIO.RequestFlags =
3129			(MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3130				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3131		pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value);
3132		pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id);
3133	} else {
3134		if (os_timeout_value)
3135			os_timeout_value++;
3136
3137		/* system pd Fast Path */
3138		io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
3139		timeout_limit = (scmd->device->type == TYPE_DISK) ?
3140				255 : 0xFFFF;
3141		pRAID_Context->timeout_value =
3142			cpu_to_le16((os_timeout_value > timeout_limit) ?
3143			timeout_limit : os_timeout_value);
3144		if (instance->adapter_type >= INVADER_SERIES)
3145			io_request->IoFlags |=
3146				cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH);
3147
3148		cmd->request_desc->SCSIIO.RequestFlags =
3149			(MPI2_REQ_DESCRIPT_FLAGS_FP_IO <<
3150				MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3151	}
3152}
3153
3154/**
3155 * megasas_build_io_fusion -	Prepares IOs to devices
3156 * @instance:		Adapter soft state
3157 * @scp:		SCSI command
3158 * @cmd:		Command to be prepared
3159 *
3160 * Invokes helper functions to prepare request frames
3161 * and sets flags appropriate for IO/Non-IO cmd
3162 */
3163static int
3164megasas_build_io_fusion(struct megasas_instance *instance,
3165			struct scsi_cmnd *scp,
3166			struct megasas_cmd_fusion *cmd)
3167{
3168	int sge_count;
3169	u8  cmd_type;
3170	struct MPI2_RAID_SCSI_IO_REQUEST *io_request = cmd->io_request;
3171	struct MR_PRIV_DEVICE *mr_device_priv_data;
3172	mr_device_priv_data = scp->device->hostdata;
3173
3174	/* Zero out some fields so they don't get reused */
3175	memset(io_request->LUN, 0x0, 8);
3176	io_request->CDB.EEDP32.PrimaryReferenceTag = 0;
3177	io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0;
3178	io_request->EEDPFlags = 0;
3179	io_request->Control = 0;
3180	io_request->EEDPBlockSize = 0;
3181	io_request->ChainOffset = 0;
3182	io_request->RaidContext.raid_context.raid_flags = 0;
3183	io_request->RaidContext.raid_context.type = 0;
3184	io_request->RaidContext.raid_context.nseg = 0;
3185
3186	memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len);
3187	/*
3188	 * Just the CDB length,rest of the Flags are zero
3189	 * This will be modified for FP in build_ldio_fusion
3190	 */
3191	io_request->IoFlags = cpu_to_le16(scp->cmd_len);
3192
3193	switch (cmd_type = megasas_cmd_type(scp)) {
3194	case READ_WRITE_LDIO:
3195		megasas_build_ldio_fusion(instance, scp, cmd);
3196		break;
3197	case NON_READ_WRITE_LDIO:
3198		megasas_build_ld_nonrw_fusion(instance, scp, cmd);
3199		break;
3200	case READ_WRITE_SYSPDIO:
3201		megasas_build_syspd_fusion(instance, scp, cmd, true);
3202		break;
3203	case NON_READ_WRITE_SYSPDIO:
3204		if (instance->secure_jbod_support ||
3205		    mr_device_priv_data->is_tm_capable)
3206			megasas_build_syspd_fusion(instance, scp, cmd, false);
3207		else
3208			megasas_build_syspd_fusion(instance, scp, cmd, true);
3209		break;
3210	default:
3211		break;
3212	}
3213
3214	/*
3215	 * Construct SGL
3216	 */
3217
3218	sge_count = megasas_make_sgl(instance, scp, cmd);
3219
3220	if (sge_count > instance->max_num_sge || (sge_count < 0)) {
3221		dev_err(&instance->pdev->dev,
3222			"%s %d sge_count (%d) is out of range. Range is:  0-%d\n",
3223			__func__, __LINE__, sge_count, instance->max_num_sge);
3224		return 1;
3225	}
3226
3227	if (instance->adapter_type >= VENTURA_SERIES) {
3228		set_num_sge(&io_request->RaidContext.raid_context_g35, sge_count);
3229		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.routing_flags);
3230		cpu_to_le16s(&io_request->RaidContext.raid_context_g35.nseg_type);
3231	} else {
3232		/* numSGE store lower 8 bit of sge_count.
3233		 * numSGEExt store higher 8 bit of sge_count
3234		 */
3235		io_request->RaidContext.raid_context.num_sge = sge_count;
3236		io_request->RaidContext.raid_context.num_sge_ext =
3237			(u8)(sge_count >> 8);
3238	}
3239
3240	io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
3241
3242	if (scp->sc_data_direction == DMA_TO_DEVICE)
3243		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
3244	else if (scp->sc_data_direction == DMA_FROM_DEVICE)
3245		io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
3246
3247	io_request->SGLOffset0 =
3248		offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4;
3249
3250	io_request->SenseBufferLowAddress =
3251		cpu_to_le32(lower_32_bits(cmd->sense_phys_addr));
3252	io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
3253
3254	cmd->scmd = scp;
3255	scp->SCp.ptr = (char *)cmd;
3256
3257	return 0;
3258}
3259
3260static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3261megasas_get_request_descriptor(struct megasas_instance *instance, u16 index)
3262{
3263	u8 *p;
3264	struct fusion_context *fusion;
3265
3266	fusion = instance->ctrl_context;
3267	p = fusion->req_frames_desc +
3268		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) * index;
3269
3270	return (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)p;
3271}
3272
3273
3274/* megasas_prepate_secondRaid1_IO
3275 *  It prepares the raid 1 second IO
3276 */
3277static void megasas_prepare_secondRaid1_IO(struct megasas_instance *instance,
3278					   struct megasas_cmd_fusion *cmd,
3279					   struct megasas_cmd_fusion *r1_cmd)
3280{
3281	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc, *req_desc2 = NULL;
3282	struct fusion_context *fusion;
3283	fusion = instance->ctrl_context;
3284	req_desc = cmd->request_desc;
3285	/* copy the io request frame as well as 8 SGEs data for r1 command*/
3286	memcpy(r1_cmd->io_request, cmd->io_request,
3287	       (sizeof(struct MPI2_RAID_SCSI_IO_REQUEST)));
3288	memcpy(&r1_cmd->io_request->SGL, &cmd->io_request->SGL,
3289	       (fusion->max_sge_in_main_msg * sizeof(union MPI2_SGE_IO_UNION)));
3290	/*sense buffer is different for r1 command*/
3291	r1_cmd->io_request->SenseBufferLowAddress =
3292			cpu_to_le32(lower_32_bits(r1_cmd->sense_phys_addr));
3293	r1_cmd->scmd = cmd->scmd;
3294	req_desc2 = megasas_get_request_descriptor(instance,
3295						   (r1_cmd->index - 1));
3296	req_desc2->Words = 0;
3297	r1_cmd->request_desc = req_desc2;
3298	req_desc2->SCSIIO.SMID = cpu_to_le16(r1_cmd->index);
3299	req_desc2->SCSIIO.RequestFlags = req_desc->SCSIIO.RequestFlags;
3300	r1_cmd->request_desc->SCSIIO.DevHandle = cmd->r1_alt_dev_handle;
3301	r1_cmd->io_request->DevHandle = cmd->r1_alt_dev_handle;
3302	r1_cmd->r1_alt_dev_handle = cmd->io_request->DevHandle;
3303	cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
3304			cpu_to_le16(r1_cmd->index);
3305	r1_cmd->io_request->RaidContext.raid_context_g35.flow_specific.peer_smid =
3306			cpu_to_le16(cmd->index);
3307	/*MSIxIndex of both commands request descriptors should be same*/
3308	r1_cmd->request_desc->SCSIIO.MSIxIndex =
3309			cmd->request_desc->SCSIIO.MSIxIndex;
3310	/*span arm is different for r1 cmd*/
3311	r1_cmd->io_request->RaidContext.raid_context_g35.span_arm =
3312			cmd->io_request->RaidContext.raid_context_g35.span_arm + 1;
3313}
3314
3315/**
3316 * megasas_build_and_issue_cmd_fusion -Main routine for building and
3317 *                                     issuing non IOCTL cmd
3318 * @instance:			Adapter soft state
3319 * @scmd:			pointer to scsi cmd from OS
3320 */
3321static u32
3322megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance,
3323				   struct scsi_cmnd *scmd)
3324{
3325	struct megasas_cmd_fusion *cmd, *r1_cmd = NULL;
3326	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3327	u32 index;
3328
3329	if ((megasas_cmd_type(scmd) == READ_WRITE_LDIO) &&
3330		instance->ldio_threshold &&
3331		(atomic_inc_return(&instance->ldio_outstanding) >
3332		instance->ldio_threshold)) {
3333		atomic_dec(&instance->ldio_outstanding);
3334		return SCSI_MLQUEUE_DEVICE_BUSY;
3335	}
3336
3337	if (atomic_inc_return(&instance->fw_outstanding) >
3338			instance->host->can_queue) {
3339		atomic_dec(&instance->fw_outstanding);
3340		return SCSI_MLQUEUE_HOST_BUSY;
3341	}
3342
3343	cmd = megasas_get_cmd_fusion(instance, scmd->request->tag);
3344
3345	if (!cmd) {
3346		atomic_dec(&instance->fw_outstanding);
3347		return SCSI_MLQUEUE_HOST_BUSY;
3348	}
3349
3350	index = cmd->index;
3351
3352	req_desc = megasas_get_request_descriptor(instance, index-1);
3353
3354	req_desc->Words = 0;
3355	cmd->request_desc = req_desc;
3356
3357	if (megasas_build_io_fusion(instance, scmd, cmd)) {
3358		megasas_return_cmd_fusion(instance, cmd);
3359		dev_err(&instance->pdev->dev, "Error building command\n");
3360		cmd->request_desc = NULL;
3361		atomic_dec(&instance->fw_outstanding);
3362		return SCSI_MLQUEUE_HOST_BUSY;
3363	}
3364
3365	req_desc = cmd->request_desc;
3366	req_desc->SCSIIO.SMID = cpu_to_le16(index);
3367
3368	if (cmd->io_request->ChainOffset != 0 &&
3369	    cmd->io_request->ChainOffset != 0xF)
3370		dev_err(&instance->pdev->dev, "The chain offset value is not "
3371		       "correct : %x\n", cmd->io_request->ChainOffset);
3372	/*
3373	 *	if it is raid 1/10 fp write capable.
3374	 *	try to get second command from pool and construct it.
3375	 *	From FW, it has confirmed that lba values of two PDs
3376	 *	corresponds to single R1/10 LD are always same
3377	 *
3378	 */
3379	/*	driver side count always should be less than max_fw_cmds
3380	 *	to get new command
3381	 */
3382	if (cmd->r1_alt_dev_handle != MR_DEVHANDLE_INVALID) {
3383		r1_cmd = megasas_get_cmd_fusion(instance,
3384				(scmd->request->tag + instance->max_fw_cmds));
3385		megasas_prepare_secondRaid1_IO(instance, cmd, r1_cmd);
3386	}
3387
3388
3389	/*
3390	 * Issue the command to the FW
3391	 */
3392
3393	megasas_fire_cmd_fusion(instance, req_desc);
3394
3395	if (r1_cmd)
3396		megasas_fire_cmd_fusion(instance, r1_cmd->request_desc);
3397
3398
3399	return 0;
3400}
3401
3402/**
3403 * megasas_complete_r1_command -
3404 * completes R1 FP write commands which has valid peer smid
3405 * @instance:			Adapter soft state
3406 * @cmd:			MPT command frame
3407 *
3408 */
3409static inline void
3410megasas_complete_r1_command(struct megasas_instance *instance,
3411			    struct megasas_cmd_fusion *cmd)
3412{
3413	u8 *sense, status, ex_status;
3414	u32 data_length;
3415	u16 peer_smid;
3416	struct fusion_context *fusion;
3417	struct megasas_cmd_fusion *r1_cmd = NULL;
3418	struct scsi_cmnd *scmd_local = NULL;
3419	struct RAID_CONTEXT_G35 *rctx_g35;
3420
3421	rctx_g35 = &cmd->io_request->RaidContext.raid_context_g35;
3422	fusion = instance->ctrl_context;
3423	peer_smid = le16_to_cpu(rctx_g35->flow_specific.peer_smid);
3424
3425	r1_cmd = fusion->cmd_list[peer_smid - 1];
3426	scmd_local = cmd->scmd;
3427	status = rctx_g35->status;
3428	ex_status = rctx_g35->ex_status;
3429	data_length = cmd->io_request->DataLength;
3430	sense = cmd->sense;
3431
3432	cmd->cmd_completed = true;
3433
3434	/* Check if peer command is completed or not*/
3435	if (r1_cmd->cmd_completed) {
3436		rctx_g35 = &r1_cmd->io_request->RaidContext.raid_context_g35;
3437		if (rctx_g35->status != MFI_STAT_OK) {
3438			status = rctx_g35->status;
3439			ex_status = rctx_g35->ex_status;
3440			data_length = r1_cmd->io_request->DataLength;
3441			sense = r1_cmd->sense;
3442		}
3443
3444		megasas_return_cmd_fusion(instance, r1_cmd);
3445		map_cmd_status(fusion, scmd_local, status, ex_status,
3446			       le32_to_cpu(data_length), sense);
3447		if (instance->ldio_threshold &&
3448		    megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
3449			atomic_dec(&instance->ldio_outstanding);
3450		scmd_local->SCp.ptr = NULL;
3451		megasas_return_cmd_fusion(instance, cmd);
3452		scsi_dma_unmap(scmd_local);
3453		scmd_local->scsi_done(scmd_local);
3454	}
3455}
3456
3457/**
3458 * complete_cmd_fusion -	Completes command
3459 * @instance:			Adapter soft state
3460 * @MSIxIndex:			MSI number
3461 * @irq_context:		IRQ context
3462 *
3463 * Completes all commands that is in reply descriptor queue
3464 */
3465static int
3466complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex,
3467		    struct megasas_irq_context *irq_context)
3468{
3469	union MPI2_REPLY_DESCRIPTORS_UNION *desc;
3470	struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc;
3471	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
3472	struct fusion_context *fusion;
3473	struct megasas_cmd *cmd_mfi;
3474	struct megasas_cmd_fusion *cmd_fusion;
3475	u16 smid, num_completed;
3476	u8 reply_descript_type, *sense, status, extStatus;
3477	u32 device_id, data_length;
3478	union desc_value d_val;
3479	struct LD_LOAD_BALANCE_INFO *lbinfo;
3480	int threshold_reply_count = 0;
3481	struct scsi_cmnd *scmd_local = NULL;
3482	struct MR_TASK_MANAGE_REQUEST *mr_tm_req;
3483	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req;
3484
3485	fusion = instance->ctrl_context;
3486
3487	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3488		return IRQ_HANDLED;
3489
3490	desc = fusion->reply_frames_desc[MSIxIndex] +
3491				fusion->last_reply_idx[MSIxIndex];
3492
3493	reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3494
3495	d_val.word = desc->Words;
3496
3497	reply_descript_type = reply_desc->ReplyFlags &
3498		MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3499
3500	if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3501		return IRQ_NONE;
3502
3503	num_completed = 0;
3504
3505	while (d_val.u.low != cpu_to_le32(UINT_MAX) &&
3506	       d_val.u.high != cpu_to_le32(UINT_MAX)) {
3507
3508		smid = le16_to_cpu(reply_desc->SMID);
3509		cmd_fusion = fusion->cmd_list[smid - 1];
3510		scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
3511						cmd_fusion->io_request;
3512
3513		scmd_local = cmd_fusion->scmd;
3514		status = scsi_io_req->RaidContext.raid_context.status;
3515		extStatus = scsi_io_req->RaidContext.raid_context.ex_status;
3516		sense = cmd_fusion->sense;
3517		data_length = scsi_io_req->DataLength;
3518
3519		switch (scsi_io_req->Function) {
3520		case MPI2_FUNCTION_SCSI_TASK_MGMT:
3521			mr_tm_req = (struct MR_TASK_MANAGE_REQUEST *)
3522						cmd_fusion->io_request;
3523			mpi_tm_req = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *)
3524						&mr_tm_req->TmRequest;
3525			dev_dbg(&instance->pdev->dev, "TM completion:"
3526				"type: 0x%x TaskMID: 0x%x\n",
3527				mpi_tm_req->TaskType, mpi_tm_req->TaskMID);
3528			complete(&cmd_fusion->done);
3529			break;
3530		case MPI2_FUNCTION_SCSI_IO_REQUEST:  /*Fast Path IO.*/
3531			/* Update load balancing info */
3532			if (fusion->load_balance_info &&
3533			    (cmd_fusion->scmd->SCp.Status &
3534			    MEGASAS_LOAD_BALANCE_FLAG)) {
3535				device_id = MEGASAS_DEV_INDEX(scmd_local);
3536				lbinfo = &fusion->load_balance_info[device_id];
3537				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
3538				cmd_fusion->scmd->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
3539			}
3540			fallthrough;	/* and complete IO */
3541		case MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST: /* LD-IO Path */
3542			atomic_dec(&instance->fw_outstanding);
3543			if (cmd_fusion->r1_alt_dev_handle == MR_DEVHANDLE_INVALID) {
3544				map_cmd_status(fusion, scmd_local, status,
3545					       extStatus, le32_to_cpu(data_length),
3546					       sense);
3547				if (instance->ldio_threshold &&
3548				    (megasas_cmd_type(scmd_local) == READ_WRITE_LDIO))
3549					atomic_dec(&instance->ldio_outstanding);
3550				scmd_local->SCp.ptr = NULL;
3551				megasas_return_cmd_fusion(instance, cmd_fusion);
3552				scsi_dma_unmap(scmd_local);
3553				scmd_local->scsi_done(scmd_local);
3554			} else	/* Optimal VD - R1 FP command completion. */
3555				megasas_complete_r1_command(instance, cmd_fusion);
3556			break;
3557		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
3558			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
3559			/* Poll mode. Dummy free.
3560			 * In case of Interrupt mode, caller has reverse check.
3561			 */
3562			if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
3563				cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
3564				megasas_return_cmd(instance, cmd_mfi);
3565			} else
3566				megasas_complete_cmd(instance, cmd_mfi, DID_OK);
3567			break;
3568		}
3569
3570		fusion->last_reply_idx[MSIxIndex]++;
3571		if (fusion->last_reply_idx[MSIxIndex] >=
3572		    fusion->reply_q_depth)
3573			fusion->last_reply_idx[MSIxIndex] = 0;
3574
3575		desc->Words = cpu_to_le64(ULLONG_MAX);
3576		num_completed++;
3577		threshold_reply_count++;
3578
3579		/* Get the next reply descriptor */
3580		if (!fusion->last_reply_idx[MSIxIndex])
3581			desc = fusion->reply_frames_desc[MSIxIndex];
3582		else
3583			desc++;
3584
3585		reply_desc =
3586		  (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc;
3587
3588		d_val.word = desc->Words;
3589
3590		reply_descript_type = reply_desc->ReplyFlags &
3591			MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
3592
3593		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
3594			break;
3595		/*
3596		 * Write to reply post host index register after completing threshold
3597		 * number of reply counts and still there are more replies in reply queue
3598		 * pending to be completed
3599		 */
3600		if (threshold_reply_count >= instance->threshold_reply_count) {
3601			if (instance->msix_combined)
3602				writel(((MSIxIndex & 0x7) << 24) |
3603					fusion->last_reply_idx[MSIxIndex],
3604					instance->reply_post_host_index_addr[MSIxIndex/8]);
3605			else
3606				writel((MSIxIndex << 24) |
3607					fusion->last_reply_idx[MSIxIndex],
3608					instance->reply_post_host_index_addr[0]);
3609			threshold_reply_count = 0;
3610			if (irq_context) {
3611				if (!irq_context->irq_poll_scheduled) {
3612					irq_context->irq_poll_scheduled = true;
3613					irq_context->irq_line_enable = true;
3614					irq_poll_sched(&irq_context->irqpoll);
3615				}
3616				return num_completed;
3617			}
3618		}
3619	}
3620
3621	if (num_completed) {
3622		wmb();
3623		if (instance->msix_combined)
3624			writel(((MSIxIndex & 0x7) << 24) |
3625				fusion->last_reply_idx[MSIxIndex],
3626				instance->reply_post_host_index_addr[MSIxIndex/8]);
3627		else
3628			writel((MSIxIndex << 24) |
3629				fusion->last_reply_idx[MSIxIndex],
3630				instance->reply_post_host_index_addr[0]);
3631		megasas_check_and_restore_queue_depth(instance);
3632	}
3633	return num_completed;
3634}
3635
3636/**
3637 * megasas_enable_irq_poll() - enable irqpoll
3638 * @instance:			Adapter soft state
3639 */
3640static void megasas_enable_irq_poll(struct megasas_instance *instance)
3641{
3642	u32 count, i;
3643	struct megasas_irq_context *irq_ctx;
3644
3645	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3646
3647	for (i = 0; i < count; i++) {
3648		irq_ctx = &instance->irq_context[i];
3649		irq_poll_enable(&irq_ctx->irqpoll);
3650	}
3651}
3652
3653/**
3654 * megasas_sync_irqs -	Synchronizes all IRQs owned by adapter
3655 * @instance_addr:			Adapter soft state address
3656 */
3657static void megasas_sync_irqs(unsigned long instance_addr)
3658{
3659	u32 count, i;
3660	struct megasas_instance *instance =
3661		(struct megasas_instance *)instance_addr;
3662	struct megasas_irq_context *irq_ctx;
3663
3664	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3665
3666	for (i = 0; i < count; i++) {
3667		synchronize_irq(pci_irq_vector(instance->pdev, i));
3668		irq_ctx = &instance->irq_context[i];
3669		irq_poll_disable(&irq_ctx->irqpoll);
3670		if (irq_ctx->irq_poll_scheduled) {
3671			irq_ctx->irq_poll_scheduled = false;
3672			enable_irq(irq_ctx->os_irq);
3673			complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
3674		}
3675	}
3676}
3677
3678/**
3679 * megasas_irqpoll() - process a queue for completed reply descriptors
3680 * @irqpoll:	IRQ poll structure associated with queue to poll.
3681 * @budget:	Threshold of reply descriptors to process per poll.
3682 *
3683 * Return: The number of entries processed.
3684 */
3685
3686int megasas_irqpoll(struct irq_poll *irqpoll, int budget)
3687{
3688	struct megasas_irq_context *irq_ctx;
3689	struct megasas_instance *instance;
3690	int num_entries;
3691
3692	irq_ctx = container_of(irqpoll, struct megasas_irq_context, irqpoll);
3693	instance = irq_ctx->instance;
3694
3695	if (irq_ctx->irq_line_enable) {
3696		disable_irq_nosync(irq_ctx->os_irq);
3697		irq_ctx->irq_line_enable = false;
3698	}
3699
3700	num_entries = complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
3701	if (num_entries < budget) {
3702		irq_poll_complete(irqpoll);
3703		irq_ctx->irq_poll_scheduled = false;
3704		enable_irq(irq_ctx->os_irq);
3705		complete_cmd_fusion(instance, irq_ctx->MSIxIndex, irq_ctx);
3706	}
3707
3708	return num_entries;
3709}
3710
3711/**
3712 * megasas_complete_cmd_dpc_fusion -	Completes command
3713 * @instance_addr:			Adapter soft state address
3714 *
3715 * Tasklet to complete cmds
3716 */
3717static void
3718megasas_complete_cmd_dpc_fusion(unsigned long instance_addr)
3719{
3720	struct megasas_instance *instance =
3721		(struct megasas_instance *)instance_addr;
3722	struct megasas_irq_context *irq_ctx = NULL;
3723	u32 count, MSIxIndex;
3724
3725	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
3726
3727	/* If we have already declared adapter dead, donot complete cmds */
3728	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR)
3729		return;
3730
3731	for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) {
3732		irq_ctx = &instance->irq_context[MSIxIndex];
3733		complete_cmd_fusion(instance, MSIxIndex, irq_ctx);
3734	}
3735}
3736
3737/**
3738 * megasas_isr_fusion - isr entry point
3739 * @irq:	IRQ number
3740 * @devp:	IRQ context
3741 */
3742static irqreturn_t megasas_isr_fusion(int irq, void *devp)
3743{
3744	struct megasas_irq_context *irq_context = devp;
3745	struct megasas_instance *instance = irq_context->instance;
3746	u32 mfiStatus;
3747
3748	if (instance->mask_interrupts)
3749		return IRQ_NONE;
3750
3751	if (irq_context->irq_poll_scheduled)
3752		return IRQ_HANDLED;
3753
3754	if (!instance->msix_vectors) {
3755		mfiStatus = instance->instancet->clear_intr(instance);
3756		if (!mfiStatus)
3757			return IRQ_NONE;
3758	}
3759
3760	/* If we are resetting, bail */
3761	if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) {
3762		instance->instancet->clear_intr(instance);
3763		return IRQ_HANDLED;
3764	}
3765
3766	return complete_cmd_fusion(instance, irq_context->MSIxIndex, irq_context)
3767			? IRQ_HANDLED : IRQ_NONE;
3768}
3769
3770/**
3771 * build_mpt_mfi_pass_thru - builds a cmd fo MFI Pass thru
3772 * @instance:			Adapter soft state
3773 * @mfi_cmd:			megasas_cmd pointer
3774 *
3775 */
3776static void
3777build_mpt_mfi_pass_thru(struct megasas_instance *instance,
3778			struct megasas_cmd *mfi_cmd)
3779{
3780	struct MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain;
3781	struct MPI2_RAID_SCSI_IO_REQUEST *io_req;
3782	struct megasas_cmd_fusion *cmd;
3783	struct fusion_context *fusion;
3784	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
3785
3786	fusion = instance->ctrl_context;
3787
3788	cmd = megasas_get_cmd_fusion(instance,
3789			instance->max_scsi_cmds + mfi_cmd->index);
3790
3791	/*  Save the smid. To be used for returning the cmd */
3792	mfi_cmd->context.smid = cmd->index;
3793
3794	/*
3795	 * For cmds where the flag is set, store the flag and check
3796	 * on completion. For cmds with this flag, don't call
3797	 * megasas_complete_cmd
3798	 */
3799
3800	if (frame_hdr->flags & cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE))
3801		mfi_cmd->flags |= DRV_DCMD_POLLED_MODE;
3802
3803	io_req = cmd->io_request;
3804
3805	if (instance->adapter_type >= INVADER_SERIES) {
3806		struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end =
3807			(struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL;
3808		sgl_ptr_end += fusion->max_sge_in_main_msg - 1;
3809		sgl_ptr_end->Flags = 0;
3810	}
3811
3812	mpi25_ieee_chain =
3813	  (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain;
3814
3815	io_req->Function    = MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST;
3816	io_req->SGLOffset0  = offsetof(struct MPI2_RAID_SCSI_IO_REQUEST,
3817				       SGL) / 4;
3818	io_req->ChainOffset = fusion->chain_offset_mfi_pthru;
3819
3820	mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr);
3821
3822	mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
3823		MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR;
3824
3825	mpi25_ieee_chain->Length = cpu_to_le32(instance->mfi_frame_size);
3826}
3827
3828/**
3829 * build_mpt_cmd - Calls helper function to build a cmd MFI Pass thru cmd
3830 * @instance:			Adapter soft state
3831 * @cmd:			mfi cmd to build
3832 *
3833 */
3834static union MEGASAS_REQUEST_DESCRIPTOR_UNION *
3835build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
3836{
3837	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL;
3838	u16 index;
3839
3840	build_mpt_mfi_pass_thru(instance, cmd);
3841	index = cmd->context.smid;
3842
3843	req_desc = megasas_get_request_descriptor(instance, index - 1);
3844
3845	req_desc->Words = 0;
3846	req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO <<
3847					 MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
3848
3849	req_desc->SCSIIO.SMID = cpu_to_le16(index);
3850
3851	return req_desc;
3852}
3853
3854/**
3855 * megasas_issue_dcmd_fusion - Issues a MFI Pass thru cmd
3856 * @instance:			Adapter soft state
3857 * @cmd:			mfi cmd pointer
3858 *
3859 */
3860static void
3861megasas_issue_dcmd_fusion(struct megasas_instance *instance,
3862			  struct megasas_cmd *cmd)
3863{
3864	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
3865
3866	req_desc = build_mpt_cmd(instance, cmd);
3867
3868	megasas_fire_cmd_fusion(instance, req_desc);
3869	return;
3870}
3871
3872/**
3873 * megasas_release_fusion -	Reverses the FW initialization
3874 * @instance:			Adapter soft state
3875 */
3876void
3877megasas_release_fusion(struct megasas_instance *instance)
3878{
3879	megasas_free_ioc_init_cmd(instance);
3880	megasas_free_cmds(instance);
3881	megasas_free_cmds_fusion(instance);
3882
3883	iounmap(instance->reg_set);
3884
3885	pci_release_selected_regions(instance->pdev, 1<<instance->bar);
3886}
3887
3888/**
3889 * megasas_read_fw_status_reg_fusion - returns the current FW status value
3890 * @instance:			Adapter soft state
3891 */
3892static u32
3893megasas_read_fw_status_reg_fusion(struct megasas_instance *instance)
3894{
3895	return megasas_readl(instance, &instance->reg_set->outbound_scratch_pad_0);
3896}
3897
3898/**
3899 * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
3900 * @instance:				Controller's soft instance
3901 * @return:			        Number of allocated host crash buffers
3902 */
3903static void
3904megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
3905{
3906	unsigned int i;
3907
3908	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
3909		instance->crash_buf[i] = vzalloc(CRASH_DMA_BUF_SIZE);
3910		if (!instance->crash_buf[i]) {
3911			dev_info(&instance->pdev->dev, "Firmware crash dump "
3912				"memory allocation failed at index %d\n", i);
3913			break;
3914		}
3915	}
3916	instance->drv_buf_alloc = i;
3917}
3918
3919/**
3920 * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
3921 * @instance:				Controller's soft instance
3922 */
3923void
3924megasas_free_host_crash_buffer(struct megasas_instance *instance)
3925{
3926	unsigned int i;
3927	for (i = 0; i < instance->drv_buf_alloc; i++) {
3928		if (instance->crash_buf[i])
3929			vfree(instance->crash_buf[i]);
3930	}
3931	instance->drv_buf_index = 0;
3932	instance->drv_buf_alloc = 0;
3933	instance->fw_crash_state = UNAVAILABLE;
3934	instance->fw_crash_buffer_size = 0;
3935}
3936
3937/**
3938 * megasas_adp_reset_fusion -	For controller reset
3939 * @instance:				Controller's soft instance
3940 * @regs:				MFI register set
3941 */
3942static int
3943megasas_adp_reset_fusion(struct megasas_instance *instance,
3944			 struct megasas_register_set __iomem *regs)
3945{
3946	u32 host_diag, abs_state, retry;
3947
3948	/* Now try to reset the chip */
3949	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3950	writel(MPI2_WRSEQ_1ST_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3951	writel(MPI2_WRSEQ_2ND_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3952	writel(MPI2_WRSEQ_3RD_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3953	writel(MPI2_WRSEQ_4TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3954	writel(MPI2_WRSEQ_5TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3955	writel(MPI2_WRSEQ_6TH_KEY_VALUE, &instance->reg_set->fusion_seq_offset);
3956
3957	/* Check that the diag write enable (DRWE) bit is on */
3958	host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
3959	retry = 0;
3960	while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) {
3961		msleep(100);
3962		host_diag = megasas_readl(instance,
3963					  &instance->reg_set->fusion_host_diag);
3964		if (retry++ == 100) {
3965			dev_warn(&instance->pdev->dev,
3966				"Host diag unlock failed from %s %d\n",
3967				__func__, __LINE__);
3968			break;
3969		}
3970	}
3971	if (!(host_diag & HOST_DIAG_WRITE_ENABLE))
3972		return -1;
3973
3974	/* Send chip reset command */
3975	writel(host_diag | HOST_DIAG_RESET_ADAPTER,
3976		&instance->reg_set->fusion_host_diag);
3977	msleep(3000);
3978
3979	/* Make sure reset adapter bit is cleared */
3980	host_diag = megasas_readl(instance, &instance->reg_set->fusion_host_diag);
3981	retry = 0;
3982	while (host_diag & HOST_DIAG_RESET_ADAPTER) {
3983		msleep(100);
3984		host_diag = megasas_readl(instance,
3985					  &instance->reg_set->fusion_host_diag);
3986		if (retry++ == 1000) {
3987			dev_warn(&instance->pdev->dev,
3988				"Diag reset adapter never cleared %s %d\n",
3989				__func__, __LINE__);
3990			break;
3991		}
3992	}
3993	if (host_diag & HOST_DIAG_RESET_ADAPTER)
3994		return -1;
3995
3996	abs_state = instance->instancet->read_fw_status_reg(instance)
3997			& MFI_STATE_MASK;
3998	retry = 0;
3999
4000	while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) {
4001		msleep(100);
4002		abs_state = instance->instancet->
4003			read_fw_status_reg(instance) & MFI_STATE_MASK;
4004	}
4005	if (abs_state <= MFI_STATE_FW_INIT) {
4006		dev_warn(&instance->pdev->dev,
4007			"fw state < MFI_STATE_FW_INIT, state = 0x%x %s %d\n",
4008			abs_state, __func__, __LINE__);
4009		return -1;
4010	}
4011
4012	return 0;
4013}
4014
4015/**
4016 * megasas_check_reset_fusion -	For controller reset check
4017 * @instance:				Controller's soft instance
4018 * @regs:				MFI register set
4019 */
4020static int
4021megasas_check_reset_fusion(struct megasas_instance *instance,
4022			   struct megasas_register_set __iomem *regs)
4023{
4024	return 0;
4025}
4026
4027/**
4028 * megasas_trigger_snap_dump -	Trigger snap dump in FW
4029 * @instance:			Soft instance of adapter
4030 */
4031static inline void megasas_trigger_snap_dump(struct megasas_instance *instance)
4032{
4033	int j;
4034	u32 fw_state, abs_state;
4035
4036	if (!instance->disableOnlineCtrlReset) {
4037		dev_info(&instance->pdev->dev, "Trigger snap dump\n");
4038		writel(MFI_ADP_TRIGGER_SNAP_DUMP,
4039		       &instance->reg_set->doorbell);
4040		readl(&instance->reg_set->doorbell);
4041	}
4042
4043	for (j = 0; j < instance->snapdump_wait_time; j++) {
4044		abs_state = instance->instancet->read_fw_status_reg(instance);
4045		fw_state = abs_state & MFI_STATE_MASK;
4046		if (fw_state == MFI_STATE_FAULT) {
4047			dev_printk(KERN_ERR, &instance->pdev->dev,
4048				   "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
4049				   abs_state & MFI_STATE_FAULT_CODE,
4050				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4051			return;
4052		}
4053		msleep(1000);
4054	}
4055}
4056
4057/* This function waits for outstanding commands on fusion to complete */
4058static int
4059megasas_wait_for_outstanding_fusion(struct megasas_instance *instance,
4060				    int reason, int *convert)
4061{
4062	int i, outstanding, retval = 0, hb_seconds_missed = 0;
4063	u32 fw_state, abs_state;
4064	u32 waittime_for_io_completion;
4065
4066	waittime_for_io_completion =
4067		min_t(u32, resetwaittime,
4068			(resetwaittime - instance->snapdump_wait_time));
4069
4070	if (reason == MFI_IO_TIMEOUT_OCR) {
4071		dev_info(&instance->pdev->dev,
4072			"MFI command is timed out\n");
4073		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4074		if (instance->snapdump_wait_time)
4075			megasas_trigger_snap_dump(instance);
4076		retval = 1;
4077		goto out;
4078	}
4079
4080	for (i = 0; i < waittime_for_io_completion; i++) {
4081		/* Check if firmware is in fault state */
4082		abs_state = instance->instancet->read_fw_status_reg(instance);
4083		fw_state = abs_state & MFI_STATE_MASK;
4084		if (fw_state == MFI_STATE_FAULT) {
4085			dev_printk(KERN_ERR, &instance->pdev->dev,
4086				   "FW in FAULT state Fault code:0x%x subcode:0x%x func:%s\n",
4087				   abs_state & MFI_STATE_FAULT_CODE,
4088				   abs_state & MFI_STATE_FAULT_SUBCODE, __func__);
4089			megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4090			if (instance->requestorId && reason) {
4091				dev_warn(&instance->pdev->dev, "SR-IOV Found FW in FAULT"
4092				" state while polling during"
4093				" I/O timeout handling for %d\n",
4094				instance->host->host_no);
4095				*convert = 1;
4096			}
4097
4098			retval = 1;
4099			goto out;
4100		}
4101
4102
4103		/* If SR-IOV VF mode & heartbeat timeout, don't wait */
4104		if (instance->requestorId && !reason) {
4105			retval = 1;
4106			goto out;
4107		}
4108
4109		/* If SR-IOV VF mode & I/O timeout, check for HB timeout */
4110		if (instance->requestorId && (reason == SCSIIO_TIMEOUT_OCR)) {
4111			if (instance->hb_host_mem->HB.fwCounter !=
4112			    instance->hb_host_mem->HB.driverCounter) {
4113				instance->hb_host_mem->HB.driverCounter =
4114					instance->hb_host_mem->HB.fwCounter;
4115				hb_seconds_missed = 0;
4116			} else {
4117				hb_seconds_missed++;
4118				if (hb_seconds_missed ==
4119				    (MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF/HZ)) {
4120					dev_warn(&instance->pdev->dev, "SR-IOV:"
4121					       " Heartbeat never completed "
4122					       " while polling during I/O "
4123					       " timeout handling for "
4124					       "scsi%d.\n",
4125					       instance->host->host_no);
4126					       *convert = 1;
4127					       retval = 1;
4128					       goto out;
4129				}
4130			}
4131		}
4132
4133		megasas_complete_cmd_dpc_fusion((unsigned long)instance);
4134		outstanding = atomic_read(&instance->fw_outstanding);
4135		if (!outstanding)
4136			goto out;
4137
4138		if (!(i % MEGASAS_RESET_NOTICE_INTERVAL)) {
4139			dev_notice(&instance->pdev->dev, "[%2d]waiting for %d "
4140			       "commands to complete for scsi%d\n", i,
4141			       outstanding, instance->host->host_no);
4142		}
4143		msleep(1000);
4144	}
4145
4146	if (instance->snapdump_wait_time) {
4147		megasas_trigger_snap_dump(instance);
4148		retval = 1;
4149		goto out;
4150	}
4151
4152	if (atomic_read(&instance->fw_outstanding)) {
4153		dev_err(&instance->pdev->dev, "pending commands remain after waiting, "
4154		       "will reset adapter scsi%d.\n",
4155		       instance->host->host_no);
4156		*convert = 1;
4157		retval = 1;
4158	}
4159
4160out:
4161	return retval;
4162}
4163
4164void  megasas_reset_reply_desc(struct megasas_instance *instance)
4165{
4166	int i, j, count;
4167	struct fusion_context *fusion;
4168	union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc;
4169
4170	fusion = instance->ctrl_context;
4171	count = instance->msix_vectors > 0 ? instance->msix_vectors : 1;
4172	for (i = 0 ; i < count ; i++) {
4173		fusion->last_reply_idx[i] = 0;
4174		reply_desc = fusion->reply_frames_desc[i];
4175		for (j = 0 ; j < fusion->reply_q_depth; j++, reply_desc++)
4176			reply_desc->Words = cpu_to_le64(ULLONG_MAX);
4177	}
4178}
4179
4180/*
4181 * megasas_refire_mgmt_cmd :	Re-fire management commands
4182 * @instance:				Controller's soft instance
4183*/
4184static void megasas_refire_mgmt_cmd(struct megasas_instance *instance,
4185			     bool return_ioctl)
4186{
4187	int j;
4188	struct megasas_cmd_fusion *cmd_fusion;
4189	struct fusion_context *fusion;
4190	struct megasas_cmd *cmd_mfi;
4191	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
4192	struct MPI2_RAID_SCSI_IO_REQUEST *scsi_io_req;
4193	u16 smid;
4194	bool refire_cmd = false;
4195	u8 result;
4196	u32 opcode = 0;
4197
4198	fusion = instance->ctrl_context;
4199
4200	/* Re-fire management commands.
4201	 * Do not traverse complet MPT frame pool. Start from max_scsi_cmds.
4202	 */
4203	for (j = instance->max_scsi_cmds ; j < instance->max_fw_cmds; j++) {
4204		cmd_fusion = fusion->cmd_list[j];
4205		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
4206		smid = le16_to_cpu(cmd_mfi->context.smid);
4207		result = REFIRE_CMD;
4208
4209		if (!smid)
4210			continue;
4211
4212		req_desc = megasas_get_request_descriptor(instance, smid - 1);
4213
4214		switch (cmd_mfi->frame->hdr.cmd) {
4215		case MFI_CMD_DCMD:
4216			opcode = le32_to_cpu(cmd_mfi->frame->dcmd.opcode);
4217			 /* Do not refire shutdown command */
4218			if (opcode == MR_DCMD_CTRL_SHUTDOWN) {
4219				cmd_mfi->frame->dcmd.cmd_status = MFI_STAT_OK;
4220				result = COMPLETE_CMD;
4221				break;
4222			}
4223
4224			refire_cmd = ((opcode != MR_DCMD_LD_MAP_GET_INFO)) &&
4225				      (opcode != MR_DCMD_SYSTEM_PD_MAP_GET_INFO) &&
4226				      !(cmd_mfi->flags & DRV_DCMD_SKIP_REFIRE);
4227
4228			if (!refire_cmd)
4229				result = RETURN_CMD;
4230
4231			break;
4232		case MFI_CMD_NVME:
4233			if (!instance->support_nvme_passthru) {
4234				cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
4235				result = COMPLETE_CMD;
4236			}
4237
4238			break;
4239		case MFI_CMD_TOOLBOX:
4240			if (!instance->support_pci_lane_margining) {
4241				cmd_mfi->frame->hdr.cmd_status = MFI_STAT_INVALID_CMD;
4242				result = COMPLETE_CMD;
4243			}
4244
4245			break;
4246		default:
4247			break;
4248		}
4249
4250		if (return_ioctl && cmd_mfi->sync_cmd &&
4251		    cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) {
4252			dev_err(&instance->pdev->dev,
4253				"return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n",
4254				__func__, __LINE__, cmd_mfi->frame->hdr.cmd,
4255				le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
4256			cmd_mfi->cmd_status_drv = DCMD_BUSY;
4257			result = COMPLETE_CMD;
4258		}
4259
4260		scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *)
4261				cmd_fusion->io_request;
4262		if (scsi_io_req->Function == MPI2_FUNCTION_SCSI_TASK_MGMT)
4263			result = RETURN_CMD;
4264
4265		switch (result) {
4266		case REFIRE_CMD:
4267			megasas_fire_cmd_fusion(instance, req_desc);
4268			break;
4269		case RETURN_CMD:
4270			megasas_return_cmd(instance, cmd_mfi);
4271			break;
4272		case COMPLETE_CMD:
4273			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
4274			break;
4275		}
4276	}
4277}
4278
4279/*
4280 * megasas_return_polled_cmds: Return polled mode commands back to the pool
4281 *			       before initiating an OCR.
4282 * @instance:                  Controller's soft instance
4283 */
4284static void
4285megasas_return_polled_cmds(struct megasas_instance *instance)
4286{
4287	int i;
4288	struct megasas_cmd_fusion *cmd_fusion;
4289	struct fusion_context *fusion;
4290	struct megasas_cmd *cmd_mfi;
4291
4292	fusion = instance->ctrl_context;
4293
4294	for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) {
4295		cmd_fusion = fusion->cmd_list[i];
4296		cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
4297
4298		if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) {
4299			if (megasas_dbg_lvl & OCR_DEBUG)
4300				dev_info(&instance->pdev->dev,
4301					 "%s %d return cmd 0x%x opcode 0x%x\n",
4302					 __func__, __LINE__, cmd_mfi->frame->hdr.cmd,
4303					 le32_to_cpu(cmd_mfi->frame->dcmd.opcode));
4304			cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE;
4305			megasas_return_cmd(instance, cmd_mfi);
4306		}
4307	}
4308}
4309
4310/*
4311 * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device
4312 * @instance: per adapter struct
4313 * @channel: the channel assigned by the OS
4314 * @id: the id assigned by the OS
4315 *
4316 * Returns SUCCESS if no IOs pending to SCSI device, else return FAILED
4317 */
4318
4319static int megasas_track_scsiio(struct megasas_instance *instance,
4320		int id, int channel)
4321{
4322	int i, found = 0;
4323	struct megasas_cmd_fusion *cmd_fusion;
4324	struct fusion_context *fusion;
4325	fusion = instance->ctrl_context;
4326
4327	for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4328		cmd_fusion = fusion->cmd_list[i];
4329		if (cmd_fusion->scmd &&
4330			(cmd_fusion->scmd->device->id == id &&
4331			cmd_fusion->scmd->device->channel == channel)) {
4332			dev_info(&instance->pdev->dev,
4333				"SCSI commands pending to target"
4334				"channel %d id %d \tSMID: 0x%x\n",
4335				channel, id, cmd_fusion->index);
4336			scsi_print_command(cmd_fusion->scmd);
4337			found = 1;
4338			break;
4339		}
4340	}
4341
4342	return found ? FAILED : SUCCESS;
4343}
4344
4345/**
4346 * megasas_tm_response_code - translation of device response code
4347 * @instance:	Controller's soft instance
4348 * @mpi_reply:	MPI reply returned by firmware
4349 *
4350 * Return nothing.
4351 */
4352static void
4353megasas_tm_response_code(struct megasas_instance *instance,
4354		struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply)
4355{
4356	char *desc;
4357
4358	switch (mpi_reply->ResponseCode) {
4359	case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
4360		desc = "task management request completed";
4361		break;
4362	case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
4363		desc = "invalid frame";
4364		break;
4365	case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
4366		desc = "task management request not supported";
4367		break;
4368	case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
4369		desc = "task management request failed";
4370		break;
4371	case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
4372		desc = "task management request succeeded";
4373		break;
4374	case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
4375		desc = "invalid lun";
4376		break;
4377	case 0xA:
4378		desc = "overlapped tag attempted";
4379		break;
4380	case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
4381		desc = "task queued, however not sent to target";
4382		break;
4383	default:
4384		desc = "unknown";
4385		break;
4386	}
4387	dev_dbg(&instance->pdev->dev, "response_code(%01x): %s\n",
4388		mpi_reply->ResponseCode, desc);
4389	dev_dbg(&instance->pdev->dev,
4390		"TerminationCount/DevHandle/Function/TaskType/IOCStat/IOCLoginfo"
4391		" 0x%x/0x%x/0x%x/0x%x/0x%x/0x%x\n",
4392		mpi_reply->TerminationCount, mpi_reply->DevHandle,
4393		mpi_reply->Function, mpi_reply->TaskType,
4394		mpi_reply->IOCStatus, mpi_reply->IOCLogInfo);
4395}
4396
4397/**
4398 * megasas_issue_tm - main routine for sending tm requests
4399 * @instance: per adapter struct
4400 * @device_handle: device handle
4401 * @channel: the channel assigned by the OS
4402 * @id: the id assigned by the OS
4403 * @smid_task: smid assigned to the task
4404 * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in megaraid_sas_fusion.c)
4405 * @mr_device_priv_data: private data
4406 * Context: user
4407 *
4408 * MegaRaid use MPT interface for Task Magement request.
4409 * A generic API for sending task management requests to firmware.
4410 *
4411 * Return SUCCESS or FAILED.
4412 */
4413static int
4414megasas_issue_tm(struct megasas_instance *instance, u16 device_handle,
4415	uint channel, uint id, u16 smid_task, u8 type,
4416	struct MR_PRIV_DEVICE *mr_device_priv_data)
4417{
4418	struct MR_TASK_MANAGE_REQUEST *mr_request;
4419	struct MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_request;
4420	unsigned long timeleft;
4421	struct megasas_cmd_fusion *cmd_fusion;
4422	struct megasas_cmd *cmd_mfi;
4423	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
4424	struct fusion_context *fusion = NULL;
4425	struct megasas_cmd_fusion *scsi_lookup;
4426	int rc;
4427	int timeout = MEGASAS_DEFAULT_TM_TIMEOUT;
4428	struct MPI2_SCSI_TASK_MANAGE_REPLY *mpi_reply;
4429
4430	fusion = instance->ctrl_context;
4431
4432	cmd_mfi = megasas_get_cmd(instance);
4433
4434	if (!cmd_mfi) {
4435		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
4436			__func__, __LINE__);
4437		return -ENOMEM;
4438	}
4439
4440	cmd_fusion = megasas_get_cmd_fusion(instance,
4441			instance->max_scsi_cmds + cmd_mfi->index);
4442
4443	/*  Save the smid. To be used for returning the cmd */
4444	cmd_mfi->context.smid = cmd_fusion->index;
4445
4446	req_desc = megasas_get_request_descriptor(instance,
4447			(cmd_fusion->index - 1));
4448
4449	cmd_fusion->request_desc = req_desc;
4450	req_desc->Words = 0;
4451
4452	mr_request = (struct MR_TASK_MANAGE_REQUEST *) cmd_fusion->io_request;
4453	memset(mr_request, 0, sizeof(struct MR_TASK_MANAGE_REQUEST));
4454	mpi_request = (struct MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_request->TmRequest;
4455	mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
4456	mpi_request->DevHandle = cpu_to_le16(device_handle);
4457	mpi_request->TaskType = type;
4458	mpi_request->TaskMID = cpu_to_le16(smid_task);
4459	mpi_request->LUN[1] = 0;
4460
4461
4462	req_desc = cmd_fusion->request_desc;
4463	req_desc->HighPriority.SMID = cpu_to_le16(cmd_fusion->index);
4464	req_desc->HighPriority.RequestFlags =
4465		(MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY <<
4466		MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT);
4467	req_desc->HighPriority.MSIxIndex =  0;
4468	req_desc->HighPriority.LMID = 0;
4469	req_desc->HighPriority.Reserved1 = 0;
4470
4471	if (channel < MEGASAS_MAX_PD_CHANNELS)
4472		mr_request->tmReqFlags.isTMForPD = 1;
4473	else
4474		mr_request->tmReqFlags.isTMForLD = 1;
4475
4476	init_completion(&cmd_fusion->done);
4477	megasas_fire_cmd_fusion(instance, req_desc);
4478
4479	switch (type) {
4480	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
4481		timeout = mr_device_priv_data->task_abort_tmo;
4482		break;
4483	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
4484		timeout = mr_device_priv_data->target_reset_tmo;
4485		break;
4486	}
4487
4488	timeleft = wait_for_completion_timeout(&cmd_fusion->done, timeout * HZ);
4489
4490	if (!timeleft) {
4491		dev_err(&instance->pdev->dev,
4492			"task mgmt type 0x%x timed out\n", type);
4493		mutex_unlock(&instance->reset_mutex);
4494		rc = megasas_reset_fusion(instance->host, MFI_IO_TIMEOUT_OCR);
4495		mutex_lock(&instance->reset_mutex);
4496		return rc;
4497	}
4498
4499	mpi_reply = (struct MPI2_SCSI_TASK_MANAGE_REPLY *) &mr_request->TMReply;
4500	megasas_tm_response_code(instance, mpi_reply);
4501
4502	megasas_return_cmd(instance, cmd_mfi);
4503	rc = SUCCESS;
4504	switch (type) {
4505	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
4506		scsi_lookup = fusion->cmd_list[smid_task - 1];
4507
4508		if (scsi_lookup->scmd == NULL)
4509			break;
4510		else {
4511			instance->instancet->disable_intr(instance);
4512			megasas_sync_irqs((unsigned long)instance);
4513			instance->instancet->enable_intr(instance);
4514			megasas_enable_irq_poll(instance);
4515			if (scsi_lookup->scmd == NULL)
4516				break;
4517		}
4518		rc = FAILED;
4519		break;
4520
4521	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
4522		if ((channel == 0xFFFFFFFF) && (id == 0xFFFFFFFF))
4523			break;
4524		instance->instancet->disable_intr(instance);
4525		megasas_sync_irqs((unsigned long)instance);
4526		rc = megasas_track_scsiio(instance, id, channel);
4527		instance->instancet->enable_intr(instance);
4528		megasas_enable_irq_poll(instance);
4529
4530		break;
4531	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
4532	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
4533		break;
4534	default:
4535		rc = FAILED;
4536		break;
4537	}
4538
4539	return rc;
4540
4541}
4542
4543/*
4544 * megasas_fusion_smid_lookup : Look for fusion command correpspodning to SCSI
4545 * @instance: per adapter struct
4546 *
4547 * Return Non Zero index, if SMID found in outstanding commands
4548 */
4549static u16 megasas_fusion_smid_lookup(struct scsi_cmnd *scmd)
4550{
4551	int i, ret = 0;
4552	struct megasas_instance *instance;
4553	struct megasas_cmd_fusion *cmd_fusion;
4554	struct fusion_context *fusion;
4555
4556	instance = (struct megasas_instance *)scmd->device->host->hostdata;
4557
4558	fusion = instance->ctrl_context;
4559
4560	for (i = 0; i < instance->max_scsi_cmds; i++) {
4561		cmd_fusion = fusion->cmd_list[i];
4562		if (cmd_fusion->scmd && (cmd_fusion->scmd == scmd)) {
4563			scmd_printk(KERN_NOTICE, scmd, "Abort request is for"
4564				" SMID: %d\n", cmd_fusion->index);
4565			ret = cmd_fusion->index;
4566			break;
4567		}
4568	}
4569
4570	return ret;
4571}
4572
4573/*
4574* megasas_get_tm_devhandle - Get devhandle for TM request
4575* @sdev-		     OS provided scsi device
4576*
4577* Returns-		     devhandle/targetID of SCSI device
4578*/
4579static u16 megasas_get_tm_devhandle(struct scsi_device *sdev)
4580{
4581	u16 pd_index = 0;
4582	u32 device_id;
4583	struct megasas_instance *instance;
4584	struct fusion_context *fusion;
4585	struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync;
4586	u16 devhandle = (u16)ULONG_MAX;
4587
4588	instance = (struct megasas_instance *)sdev->host->hostdata;
4589	fusion = instance->ctrl_context;
4590
4591	if (!MEGASAS_IS_LOGICAL(sdev)) {
4592		if (instance->use_seqnum_jbod_fp) {
4593			pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
4594				    + sdev->id;
4595			pd_sync = (void *)fusion->pd_seq_sync
4596					[(instance->pd_seq_map_id - 1) & 1];
4597			devhandle = pd_sync->seq[pd_index].devHandle;
4598		} else
4599			sdev_printk(KERN_ERR, sdev, "Firmware expose tmCapable"
4600				" without JBOD MAP support from %s %d\n", __func__, __LINE__);
4601	} else {
4602		device_id = ((sdev->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL)
4603				+ sdev->id;
4604		devhandle = device_id;
4605	}
4606
4607	return devhandle;
4608}
4609
4610/*
4611 * megasas_task_abort_fusion : SCSI task abort function for fusion adapters
4612 * @scmd : pointer to scsi command object
4613 *
4614 * Return SUCCESS, if command aborted else FAILED
4615 */
4616
4617int megasas_task_abort_fusion(struct scsi_cmnd *scmd)
4618{
4619	struct megasas_instance *instance;
4620	u16 smid, devhandle;
4621	int ret;
4622	struct MR_PRIV_DEVICE *mr_device_priv_data;
4623	mr_device_priv_data = scmd->device->hostdata;
4624
4625	instance = (struct megasas_instance *)scmd->device->host->hostdata;
4626
4627	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4628		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4629		"SCSI host:%d\n", instance->host->host_no);
4630		ret = FAILED;
4631		return ret;
4632	}
4633
4634	if (!mr_device_priv_data) {
4635		sdev_printk(KERN_INFO, scmd->device, "device been deleted! "
4636			"scmd(%p)\n", scmd);
4637		scmd->result = DID_NO_CONNECT << 16;
4638		ret = SUCCESS;
4639		goto out;
4640	}
4641
4642	if (!mr_device_priv_data->is_tm_capable) {
4643		ret = FAILED;
4644		goto out;
4645	}
4646
4647	mutex_lock(&instance->reset_mutex);
4648
4649	smid = megasas_fusion_smid_lookup(scmd);
4650
4651	if (!smid) {
4652		ret = SUCCESS;
4653		scmd_printk(KERN_NOTICE, scmd, "Command for which abort is"
4654			" issued is not found in outstanding commands\n");
4655		mutex_unlock(&instance->reset_mutex);
4656		goto out;
4657	}
4658
4659	devhandle = megasas_get_tm_devhandle(scmd->device);
4660
4661	if (devhandle == (u16)ULONG_MAX) {
4662		ret = FAILED;
4663		sdev_printk(KERN_INFO, scmd->device,
4664			"task abort issued for invalid devhandle\n");
4665		mutex_unlock(&instance->reset_mutex);
4666		goto out;
4667	}
4668	sdev_printk(KERN_INFO, scmd->device,
4669		"attempting task abort! scmd(0x%p) tm_dev_handle 0x%x\n",
4670		scmd, devhandle);
4671
4672	mr_device_priv_data->tm_busy = true;
4673	ret = megasas_issue_tm(instance, devhandle,
4674			scmd->device->channel, scmd->device->id, smid,
4675			MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4676			mr_device_priv_data);
4677	mr_device_priv_data->tm_busy = false;
4678
4679	mutex_unlock(&instance->reset_mutex);
4680	scmd_printk(KERN_INFO, scmd, "task abort %s!! scmd(0x%p)\n",
4681			((ret == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
4682out:
4683	scsi_print_command(scmd);
4684	if (megasas_dbg_lvl & TM_DEBUG)
4685		megasas_dump_fusion_io(scmd);
4686
4687	return ret;
4688}
4689
4690/*
4691 * megasas_reset_target_fusion : target reset function for fusion adapters
4692 * scmd: SCSI command pointer
4693 *
4694 * Returns SUCCESS if all commands associated with target aborted else FAILED
4695 */
4696
4697int megasas_reset_target_fusion(struct scsi_cmnd *scmd)
4698{
4699
4700	struct megasas_instance *instance;
4701	int ret = FAILED;
4702	u16 devhandle;
4703	struct MR_PRIV_DEVICE *mr_device_priv_data;
4704	mr_device_priv_data = scmd->device->hostdata;
4705
4706	instance = (struct megasas_instance *)scmd->device->host->hostdata;
4707
4708	if (atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) {
4709		dev_err(&instance->pdev->dev, "Controller is not OPERATIONAL,"
4710		"SCSI host:%d\n", instance->host->host_no);
4711		ret = FAILED;
4712		return ret;
4713	}
4714
4715	if (!mr_device_priv_data) {
4716		sdev_printk(KERN_INFO, scmd->device,
4717			    "device been deleted! scmd: (0x%p)\n", scmd);
4718		scmd->result = DID_NO_CONNECT << 16;
4719		ret = SUCCESS;
4720		goto out;
4721	}
4722
4723	if (!mr_device_priv_data->is_tm_capable) {
4724		ret = FAILED;
4725		goto out;
4726	}
4727
4728	mutex_lock(&instance->reset_mutex);
4729	devhandle = megasas_get_tm_devhandle(scmd->device);
4730
4731	if (devhandle == (u16)ULONG_MAX) {
4732		ret = FAILED;
4733		sdev_printk(KERN_INFO, scmd->device,
4734			"target reset issued for invalid devhandle\n");
4735		mutex_unlock(&instance->reset_mutex);
4736		goto out;
4737	}
4738
4739	sdev_printk(KERN_INFO, scmd->device,
4740		"attempting target reset! scmd(0x%p) tm_dev_handle: 0x%x\n",
4741		scmd, devhandle);
4742	mr_device_priv_data->tm_busy = true;
4743	ret = megasas_issue_tm(instance, devhandle,
4744			scmd->device->channel, scmd->device->id, 0,
4745			MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
4746			mr_device_priv_data);
4747	mr_device_priv_data->tm_busy = false;
4748	mutex_unlock(&instance->reset_mutex);
4749	scmd_printk(KERN_NOTICE, scmd, "target reset %s!!\n",
4750		(ret == SUCCESS) ? "SUCCESS" : "FAILED");
4751
4752out:
4753	return ret;
4754}
4755
4756/*SRIOV get other instance in cluster if any*/
4757static struct
4758megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance)
4759{
4760	int i;
4761
4762	for (i = 0; i < MAX_MGMT_ADAPTERS; i++) {
4763		if (megasas_mgmt_info.instance[i] &&
4764			(megasas_mgmt_info.instance[i] != instance) &&
4765			 megasas_mgmt_info.instance[i]->requestorId &&
4766			 megasas_mgmt_info.instance[i]->peerIsPresent &&
4767			(memcmp((megasas_mgmt_info.instance[i]->clusterId),
4768			instance->clusterId, MEGASAS_CLUSTER_ID_SIZE) == 0))
4769			return megasas_mgmt_info.instance[i];
4770	}
4771	return NULL;
4772}
4773
4774/* Check for a second path that is currently UP */
4775int megasas_check_mpio_paths(struct megasas_instance *instance,
4776	struct scsi_cmnd *scmd)
4777{
4778	struct megasas_instance *peer_instance = NULL;
4779	int retval = (DID_REQUEUE << 16);
4780
4781	if (instance->peerIsPresent) {
4782		peer_instance = megasas_get_peer_instance(instance);
4783		if ((peer_instance) &&
4784			(atomic_read(&peer_instance->adprecovery) ==
4785			MEGASAS_HBA_OPERATIONAL))
4786			retval = (DID_NO_CONNECT << 16);
4787	}
4788	return retval;
4789}
4790
4791/* Core fusion reset function */
4792int megasas_reset_fusion(struct Scsi_Host *shost, int reason)
4793{
4794	int retval = SUCCESS, i, j, convert = 0;
4795	struct megasas_instance *instance;
4796	struct megasas_cmd_fusion *cmd_fusion, *r1_cmd;
4797	struct fusion_context *fusion;
4798	u32 abs_state, status_reg, reset_adapter, fpio_count = 0;
4799	u32 io_timeout_in_crash_mode = 0;
4800	struct scsi_cmnd *scmd_local = NULL;
4801	struct scsi_device *sdev;
4802	int ret_target_prop = DCMD_FAILED;
4803	bool is_target_prop = false;
4804	bool do_adp_reset = true;
4805	int max_reset_tries = MEGASAS_FUSION_MAX_RESET_TRIES;
4806
4807	instance = (struct megasas_instance *)shost->hostdata;
4808	fusion = instance->ctrl_context;
4809
4810	mutex_lock(&instance->reset_mutex);
4811
4812	if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) {
4813		dev_warn(&instance->pdev->dev, "Hardware critical error, "
4814		       "returning FAILED for scsi%d.\n",
4815			instance->host->host_no);
4816		mutex_unlock(&instance->reset_mutex);
4817		return FAILED;
4818	}
4819	status_reg = instance->instancet->read_fw_status_reg(instance);
4820	abs_state = status_reg & MFI_STATE_MASK;
4821
4822	/* IO timeout detected, forcibly put FW in FAULT state */
4823	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
4824		instance->crash_dump_app_support && reason) {
4825		dev_info(&instance->pdev->dev, "IO/DCMD timeout is detected, "
4826			"forcibly FAULT Firmware\n");
4827		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4828		status_reg = megasas_readl(instance, &instance->reg_set->doorbell);
4829		writel(status_reg | MFI_STATE_FORCE_OCR,
4830			&instance->reg_set->doorbell);
4831		readl(&instance->reg_set->doorbell);
4832		mutex_unlock(&instance->reset_mutex);
4833		do {
4834			ssleep(3);
4835			io_timeout_in_crash_mode++;
4836			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
4837				"seconds for crash dump collection and OCR "
4838				"to be done\n", (io_timeout_in_crash_mode * 3));
4839		} while ((atomic_read(&instance->adprecovery) != MEGASAS_HBA_OPERATIONAL) &&
4840			(io_timeout_in_crash_mode < 80));
4841
4842		if (atomic_read(&instance->adprecovery) == MEGASAS_HBA_OPERATIONAL) {
4843			dev_info(&instance->pdev->dev, "OCR done for IO "
4844				"timeout case\n");
4845			retval = SUCCESS;
4846		} else {
4847			dev_info(&instance->pdev->dev, "Controller is not "
4848				"operational after 240 seconds wait for IO "
4849				"timeout case in FW crash dump mode\n do "
4850				"OCR/kill adapter\n");
4851			retval = megasas_reset_fusion(shost, 0);
4852		}
4853		return retval;
4854	}
4855
4856	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
4857		del_timer_sync(&instance->sriov_heartbeat_timer);
4858	set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
4859	set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
4860	atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING);
4861	instance->instancet->disable_intr(instance);
4862	megasas_sync_irqs((unsigned long)instance);
4863
4864	/* First try waiting for commands to complete */
4865	if (megasas_wait_for_outstanding_fusion(instance, reason,
4866						&convert)) {
4867		atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_INFAULT);
4868		dev_warn(&instance->pdev->dev, "resetting fusion "
4869		       "adapter scsi%d.\n", instance->host->host_no);
4870		if (convert)
4871			reason = 0;
4872
4873		if (megasas_dbg_lvl & OCR_DEBUG)
4874			dev_info(&instance->pdev->dev, "\nPending SCSI commands:\n");
4875
4876		/* Now return commands back to the OS */
4877		for (i = 0 ; i < instance->max_scsi_cmds; i++) {
4878			cmd_fusion = fusion->cmd_list[i];
4879			/*check for extra commands issued by driver*/
4880			if (instance->adapter_type >= VENTURA_SERIES) {
4881				r1_cmd = fusion->cmd_list[i + instance->max_fw_cmds];
4882				megasas_return_cmd_fusion(instance, r1_cmd);
4883			}
4884			scmd_local = cmd_fusion->scmd;
4885			if (cmd_fusion->scmd) {
4886				if (megasas_dbg_lvl & OCR_DEBUG) {
4887					sdev_printk(KERN_INFO,
4888						cmd_fusion->scmd->device, "SMID: 0x%x\n",
4889						cmd_fusion->index);
4890					megasas_dump_fusion_io(cmd_fusion->scmd);
4891				}
4892
4893				if (cmd_fusion->io_request->Function ==
4894					MPI2_FUNCTION_SCSI_IO_REQUEST)
4895					fpio_count++;
4896
4897				scmd_local->result =
4898					megasas_check_mpio_paths(instance,
4899							scmd_local);
4900				if (instance->ldio_threshold &&
4901					megasas_cmd_type(scmd_local) == READ_WRITE_LDIO)
4902					atomic_dec(&instance->ldio_outstanding);
4903				megasas_return_cmd_fusion(instance, cmd_fusion);
4904				scsi_dma_unmap(scmd_local);
4905				scmd_local->scsi_done(scmd_local);
4906			}
4907		}
4908
4909		dev_info(&instance->pdev->dev, "Outstanding fastpath IOs: %d\n",
4910			fpio_count);
4911
4912		atomic_set(&instance->fw_outstanding, 0);
4913
4914		status_reg = instance->instancet->read_fw_status_reg(instance);
4915		abs_state = status_reg & MFI_STATE_MASK;
4916		reset_adapter = status_reg & MFI_RESET_ADAPTER;
4917		if (instance->disableOnlineCtrlReset ||
4918		    (abs_state == MFI_STATE_FAULT && !reset_adapter)) {
4919			/* Reset not supported, kill adapter */
4920			dev_warn(&instance->pdev->dev, "Reset not supported"
4921			       ", killing adapter scsi%d.\n",
4922				instance->host->host_no);
4923			goto kill_hba;
4924		}
4925
4926		/* Let SR-IOV VF & PF sync up if there was a HB failure */
4927		if (instance->requestorId && !reason) {
4928			msleep(MEGASAS_OCR_SETTLE_TIME_VF);
4929			do_adp_reset = false;
4930			max_reset_tries = MEGASAS_SRIOV_MAX_RESET_TRIES_VF;
4931		}
4932
4933		/* Now try to reset the chip */
4934		for (i = 0; i < max_reset_tries; i++) {
4935			/*
4936			 * Do adp reset and wait for
4937			 * controller to transition to ready
4938			 */
4939			if (megasas_adp_reset_wait_for_ready(instance,
4940				do_adp_reset, 1) == FAILED)
4941				continue;
4942
4943			/* Wait for FW to become ready */
4944			if (megasas_transition_to_ready(instance, 1)) {
4945				dev_warn(&instance->pdev->dev,
4946					"Failed to transition controller to ready for "
4947					"scsi%d.\n", instance->host->host_no);
4948				continue;
4949			}
4950			megasas_reset_reply_desc(instance);
4951			megasas_fusion_update_can_queue(instance, OCR_CONTEXT);
4952
4953			if (megasas_ioc_init_fusion(instance)) {
4954				continue;
4955			}
4956
4957			if (megasas_get_ctrl_info(instance)) {
4958				dev_info(&instance->pdev->dev,
4959					"Failed from %s %d\n",
4960					__func__, __LINE__);
4961				goto kill_hba;
4962			}
4963
4964			megasas_refire_mgmt_cmd(instance,
4965						(i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1)
4966							? 1 : 0));
4967
4968			/* Reset load balance info */
4969			if (fusion->load_balance_info)
4970				memset(fusion->load_balance_info, 0,
4971				       (sizeof(struct LD_LOAD_BALANCE_INFO) *
4972				       MAX_LOGICAL_DRIVES_EXT));
4973
4974			if (!megasas_get_map_info(instance)) {
4975				megasas_sync_map_info(instance);
4976			} else {
4977				/*
4978				 * Return pending polled mode cmds before
4979				 * retrying OCR
4980				 */
4981				megasas_return_polled_cmds(instance);
4982				continue;
4983			}
4984
4985			megasas_setup_jbod_map(instance);
4986
4987			/* reset stream detection array */
4988			if (instance->adapter_type >= VENTURA_SERIES) {
4989				for (j = 0; j < MAX_LOGICAL_DRIVES_EXT; ++j) {
4990					memset(fusion->stream_detect_by_ld[j],
4991					0, sizeof(struct LD_STREAM_DETECT));
4992				 fusion->stream_detect_by_ld[j]->mru_bit_map
4993						= MR_STREAM_BITMAP;
4994				}
4995			}
4996
4997			clear_bit(MEGASAS_FUSION_IN_RESET,
4998				  &instance->reset_flags);
4999			instance->instancet->enable_intr(instance);
5000			megasas_enable_irq_poll(instance);
5001			shost_for_each_device(sdev, shost) {
5002				if ((instance->tgt_prop) &&
5003				    (instance->nvme_page_size))
5004					ret_target_prop = megasas_get_target_prop(instance, sdev);
5005
5006				is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false;
5007				megasas_set_dynamic_target_properties(sdev, is_target_prop);
5008			}
5009
5010			status_reg = instance->instancet->read_fw_status_reg
5011					(instance);
5012			abs_state = status_reg & MFI_STATE_MASK;
5013			if (abs_state != MFI_STATE_OPERATIONAL) {
5014				dev_info(&instance->pdev->dev,
5015					 "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n",
5016					 abs_state, instance->host->host_no);
5017				goto out;
5018			}
5019			atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5020
5021			dev_info(&instance->pdev->dev,
5022				 "Adapter is OPERATIONAL for scsi:%d\n",
5023				 instance->host->host_no);
5024
5025			/* Restart SR-IOV heartbeat */
5026			if (instance->requestorId) {
5027				if (!megasas_sriov_start_heartbeat(instance, 0))
5028					megasas_start_timer(instance);
5029				else
5030					instance->skip_heartbeat_timer_del = 1;
5031			}
5032
5033			if (instance->crash_dump_drv_support &&
5034				instance->crash_dump_app_support)
5035				megasas_set_crash_dump_params(instance,
5036					MR_CRASH_BUF_TURN_ON);
5037			else
5038				megasas_set_crash_dump_params(instance,
5039					MR_CRASH_BUF_TURN_OFF);
5040
5041			if (instance->snapdump_wait_time) {
5042				megasas_get_snapdump_properties(instance);
5043				dev_info(&instance->pdev->dev,
5044					 "Snap dump wait time\t: %d\n",
5045					 instance->snapdump_wait_time);
5046			}
5047
5048			retval = SUCCESS;
5049
5050			/* Adapter reset completed successfully */
5051			dev_warn(&instance->pdev->dev,
5052				 "Reset successful for scsi%d.\n",
5053				 instance->host->host_no);
5054
5055			goto out;
5056		}
5057		/* Reset failed, kill the adapter */
5058		dev_warn(&instance->pdev->dev, "Reset failed, killing "
5059		       "adapter scsi%d.\n", instance->host->host_no);
5060		goto kill_hba;
5061	} else {
5062		/* For VF: Restart HB timer if we didn't OCR */
5063		if (instance->requestorId) {
5064			megasas_start_timer(instance);
5065		}
5066		clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags);
5067		instance->instancet->enable_intr(instance);
5068		megasas_enable_irq_poll(instance);
5069		atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL);
5070		goto out;
5071	}
5072kill_hba:
5073	megaraid_sas_kill_hba(instance);
5074	megasas_enable_irq_poll(instance);
5075	instance->skip_heartbeat_timer_del = 1;
5076	retval = FAILED;
5077out:
5078	clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags);
5079	mutex_unlock(&instance->reset_mutex);
5080	return retval;
5081}
5082
5083/* Fusion Crash dump collection */
5084static void  megasas_fusion_crash_dump(struct megasas_instance *instance)
5085{
5086	u32 status_reg;
5087	u8 partial_copy = 0;
5088	int wait = 0;
5089
5090
5091	status_reg = instance->instancet->read_fw_status_reg(instance);
5092
5093	/*
5094	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
5095	 * to host crash buffers
5096	 */
5097	if (instance->drv_buf_index == 0) {
5098		/* Buffer is already allocated for old Crash dump.
5099		 * Do OCR and do not wait for crash dump collection
5100		 */
5101		if (instance->drv_buf_alloc) {
5102			dev_info(&instance->pdev->dev, "earlier crash dump is "
5103				"not yet copied by application, ignoring this "
5104				"crash dump and initiating OCR\n");
5105			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
5106			writel(status_reg,
5107				&instance->reg_set->outbound_scratch_pad_0);
5108			readl(&instance->reg_set->outbound_scratch_pad_0);
5109			return;
5110		}
5111		megasas_alloc_host_crash_buffer(instance);
5112		dev_info(&instance->pdev->dev, "Number of host crash buffers "
5113			"allocated: %d\n", instance->drv_buf_alloc);
5114	}
5115
5116	while (!(status_reg & MFI_STATE_CRASH_DUMP_DONE) &&
5117	       (wait < MEGASAS_WATCHDOG_WAIT_COUNT)) {
5118		if (!(status_reg & MFI_STATE_DMADONE)) {
5119			/*
5120			 * Next crash dump buffer is not yet DMA'd by FW
5121			 * Check after 10ms. Wait for 1 second for FW to
5122			 * post the next buffer. If not bail out.
5123			 */
5124			wait++;
5125			msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
5126			status_reg = instance->instancet->read_fw_status_reg(
5127					instance);
5128			continue;
5129		}
5130
5131		wait = 0;
5132		if (instance->drv_buf_index >= instance->drv_buf_alloc) {
5133			dev_info(&instance->pdev->dev,
5134				 "Driver is done copying the buffer: %d\n",
5135				 instance->drv_buf_alloc);
5136			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
5137			partial_copy = 1;
5138			break;
5139		} else {
5140			memcpy(instance->crash_buf[instance->drv_buf_index],
5141			       instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
5142			instance->drv_buf_index++;
5143			status_reg &= ~MFI_STATE_DMADONE;
5144		}
5145
5146		writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
5147		readl(&instance->reg_set->outbound_scratch_pad_0);
5148
5149		msleep(MEGASAS_WAIT_FOR_NEXT_DMA_MSECS);
5150		status_reg = instance->instancet->read_fw_status_reg(instance);
5151	}
5152
5153	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
5154		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
5155			"of copied buffers: %d\n", instance->drv_buf_index);
5156		instance->fw_crash_buffer_size =  instance->drv_buf_index;
5157		instance->fw_crash_state = AVAILABLE;
5158		instance->drv_buf_index = 0;
5159		writel(status_reg, &instance->reg_set->outbound_scratch_pad_0);
5160		readl(&instance->reg_set->outbound_scratch_pad_0);
5161		if (!partial_copy)
5162			megasas_reset_fusion(instance->host, 0);
5163	}
5164}
5165
5166
5167/* Fusion OCR work queue */
5168void megasas_fusion_ocr_wq(struct work_struct *work)
5169{
5170	struct megasas_instance *instance =
5171		container_of(work, struct megasas_instance, work_init);
5172
5173	megasas_reset_fusion(instance->host, 0);
5174}
5175
5176/* Allocate fusion context */
5177int
5178megasas_alloc_fusion_context(struct megasas_instance *instance)
5179{
5180	struct fusion_context *fusion;
5181
5182	instance->ctrl_context = kzalloc(sizeof(struct fusion_context),
5183					 GFP_KERNEL);
5184	if (!instance->ctrl_context) {
5185		dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5186			__func__, __LINE__);
5187		return -ENOMEM;
5188	}
5189
5190	fusion = instance->ctrl_context;
5191
5192	fusion->log_to_span_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5193					      sizeof(LD_SPAN_INFO));
5194	fusion->log_to_span =
5195		(PLD_SPAN_INFO)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
5196						fusion->log_to_span_pages);
5197	if (!fusion->log_to_span) {
5198		fusion->log_to_span =
5199			vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT,
5200					   sizeof(LD_SPAN_INFO)));
5201		if (!fusion->log_to_span) {
5202			dev_err(&instance->pdev->dev, "Failed from %s %d\n",
5203				__func__, __LINE__);
5204			return -ENOMEM;
5205		}
5206	}
5207
5208	fusion->load_balance_info_pages = get_order(MAX_LOGICAL_DRIVES_EXT *
5209		sizeof(struct LD_LOAD_BALANCE_INFO));
5210	fusion->load_balance_info =
5211		(struct LD_LOAD_BALANCE_INFO *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
5212		fusion->load_balance_info_pages);
5213	if (!fusion->load_balance_info) {
5214		fusion->load_balance_info =
5215			vzalloc(array_size(MAX_LOGICAL_DRIVES_EXT,
5216					   sizeof(struct LD_LOAD_BALANCE_INFO)));
5217		if (!fusion->load_balance_info)
5218			dev_err(&instance->pdev->dev, "Failed to allocate load_balance_info, "
5219				"continuing without Load Balance support\n");
5220	}
5221
5222	return 0;
5223}
5224
5225void
5226megasas_free_fusion_context(struct megasas_instance *instance)
5227{
5228	struct fusion_context *fusion = instance->ctrl_context;
5229
5230	if (fusion) {
5231		if (fusion->load_balance_info) {
5232			if (is_vmalloc_addr(fusion->load_balance_info))
5233				vfree(fusion->load_balance_info);
5234			else
5235				free_pages((ulong)fusion->load_balance_info,
5236					fusion->load_balance_info_pages);
5237		}
5238
5239		if (fusion->log_to_span) {
5240			if (is_vmalloc_addr(fusion->log_to_span))
5241				vfree(fusion->log_to_span);
5242			else
5243				free_pages((ulong)fusion->log_to_span,
5244					   fusion->log_to_span_pages);
5245		}
5246
5247		kfree(fusion);
5248	}
5249}
5250
5251struct megasas_instance_template megasas_instance_template_fusion = {
5252	.enable_intr = megasas_enable_intr_fusion,
5253	.disable_intr = megasas_disable_intr_fusion,
5254	.clear_intr = megasas_clear_intr_fusion,
5255	.read_fw_status_reg = megasas_read_fw_status_reg_fusion,
5256	.adp_reset = megasas_adp_reset_fusion,
5257	.check_reset = megasas_check_reset_fusion,
5258	.service_isr = megasas_isr_fusion,
5259	.tasklet = megasas_complete_cmd_dpc_fusion,
5260	.init_adapter = megasas_init_adapter_fusion,
5261	.build_and_issue_cmd = megasas_build_and_issue_cmd_fusion,
5262	.issue_dcmd = megasas_issue_dcmd_fusion,
5263};
5264