1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic iSCSI HBA Driver
4 * Copyright (c)   2003-2013 QLogic Corporation
5 */
6
7#include <linux/ratelimit.h>
8
9#include "ql4_def.h"
10#include "ql4_version.h"
11#include "ql4_glbl.h"
12#include "ql4_dbg.h"
13#include "ql4_inline.h"
14
15uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha, ulong addr)
16{
17	return readl((void __iomem *)(ha->nx_pcibase + addr));
18}
19
20void qla4_83xx_wr_reg(struct scsi_qla_host *ha, ulong addr, uint32_t val)
21{
22	writel(val, (void __iomem *)(ha->nx_pcibase + addr));
23}
24
25static int qla4_83xx_set_win_base(struct scsi_qla_host *ha, uint32_t addr)
26{
27	uint32_t val;
28	int ret_val = QLA_SUCCESS;
29
30	qla4_83xx_wr_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num), addr);
31	val = qla4_83xx_rd_reg(ha, QLA83XX_CRB_WIN_FUNC(ha->func_num));
32	if (val != addr) {
33		ql4_printk(KERN_ERR, ha, "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n",
34			   __func__, addr, val);
35		ret_val = QLA_ERROR;
36	}
37
38	return ret_val;
39}
40
41int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
42			      uint32_t *data)
43{
44	int ret_val;
45
46	ret_val = qla4_83xx_set_win_base(ha, addr);
47
48	if (ret_val == QLA_SUCCESS) {
49		*data = qla4_83xx_rd_reg(ha, QLA83XX_WILDCARD);
50	} else {
51		*data = 0xffffffff;
52		ql4_printk(KERN_ERR, ha, "%s: failed read of addr 0x%x!\n",
53			   __func__, addr);
54	}
55
56	return ret_val;
57}
58
59int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha, uint32_t addr,
60			      uint32_t data)
61{
62	int ret_val;
63
64	ret_val = qla4_83xx_set_win_base(ha, addr);
65
66	if (ret_val == QLA_SUCCESS)
67		qla4_83xx_wr_reg(ha, QLA83XX_WILDCARD, data);
68	else
69		ql4_printk(KERN_ERR, ha, "%s: failed wrt to addr 0x%x, data 0x%x\n",
70			   __func__, addr, data);
71
72	return ret_val;
73}
74
75static int qla4_83xx_flash_lock(struct scsi_qla_host *ha)
76{
77	int lock_owner;
78	int timeout = 0;
79	uint32_t lock_status = 0;
80	int ret_val = QLA_SUCCESS;
81
82	while (lock_status == 0) {
83		lock_status = qla4_83xx_rd_reg(ha, QLA83XX_FLASH_LOCK);
84		if (lock_status)
85			break;
86
87		if (++timeout >= QLA83XX_FLASH_LOCK_TIMEOUT / 20) {
88			lock_owner = qla4_83xx_rd_reg(ha,
89						      QLA83XX_FLASH_LOCK_ID);
90			ql4_printk(KERN_ERR, ha, "%s: flash lock by func %d failed, held by func %d\n",
91				   __func__, ha->func_num, lock_owner);
92			ret_val = QLA_ERROR;
93			break;
94		}
95		msleep(20);
96	}
97
98	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, ha->func_num);
99	return ret_val;
100}
101
102static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha)
103{
104	/* Reading FLASH_UNLOCK register unlocks the Flash */
105	qla4_83xx_wr_reg(ha, QLA83XX_FLASH_LOCK_ID, 0xFF);
106	qla4_83xx_rd_reg(ha, QLA83XX_FLASH_UNLOCK);
107}
108
109int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha, uint32_t flash_addr,
110			     uint8_t *p_data, int u32_word_count)
111{
112	int i;
113	uint32_t u32_word;
114	uint32_t addr = flash_addr;
115	int ret_val = QLA_SUCCESS;
116
117	ret_val = qla4_83xx_flash_lock(ha);
118	if (ret_val == QLA_ERROR)
119		goto exit_lock_error;
120
121	if (addr & 0x03) {
122		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
123			   __func__, addr);
124		ret_val = QLA_ERROR;
125		goto exit_flash_read;
126	}
127
128	for (i = 0; i < u32_word_count; i++) {
129		ret_val = qla4_83xx_wr_reg_indirect(ha,
130						    QLA83XX_FLASH_DIRECT_WINDOW,
131						    (addr & 0xFFFF0000));
132		if (ret_val == QLA_ERROR) {
133			ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!",
134				   __func__, addr);
135			goto exit_flash_read;
136		}
137
138		ret_val = qla4_83xx_rd_reg_indirect(ha,
139						QLA83XX_FLASH_DIRECT_DATA(addr),
140						&u32_word);
141		if (ret_val == QLA_ERROR) {
142			ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
143				   __func__, addr);
144			goto exit_flash_read;
145		}
146
147		*(__le32 *)p_data = le32_to_cpu(u32_word);
148		p_data = p_data + 4;
149		addr = addr + 4;
150	}
151
152exit_flash_read:
153	qla4_83xx_flash_unlock(ha);
154
155exit_lock_error:
156	return ret_val;
157}
158
159int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha,
160				      uint32_t flash_addr, uint8_t *p_data,
161				      int u32_word_count)
162{
163	uint32_t i;
164	uint32_t u32_word;
165	uint32_t flash_offset;
166	uint32_t addr = flash_addr;
167	int ret_val = QLA_SUCCESS;
168
169	flash_offset = addr & (QLA83XX_FLASH_SECTOR_SIZE - 1);
170
171	if (addr & 0x3) {
172		ql4_printk(KERN_ERR, ha, "%s: Illegal addr = 0x%x\n",
173			   __func__, addr);
174		ret_val = QLA_ERROR;
175		goto exit_lockless_read;
176	}
177
178	ret_val = qla4_83xx_wr_reg_indirect(ha, QLA83XX_FLASH_DIRECT_WINDOW,
179					    addr);
180	if (ret_val == QLA_ERROR) {
181		ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
182			   __func__, addr);
183		goto exit_lockless_read;
184	}
185
186	/* Check if data is spread across multiple sectors  */
187	if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
188	    (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
189
190		/* Multi sector read */
191		for (i = 0; i < u32_word_count; i++) {
192			ret_val = qla4_83xx_rd_reg_indirect(ha,
193						QLA83XX_FLASH_DIRECT_DATA(addr),
194						&u32_word);
195			if (ret_val == QLA_ERROR) {
196				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
197					   __func__, addr);
198				goto exit_lockless_read;
199			}
200
201			*(__le32 *)p_data  = le32_to_cpu(u32_word);
202			p_data = p_data + 4;
203			addr = addr + 4;
204			flash_offset = flash_offset + 4;
205
206			if (flash_offset > (QLA83XX_FLASH_SECTOR_SIZE - 1)) {
207				/* This write is needed once for each sector */
208				ret_val = qla4_83xx_wr_reg_indirect(ha,
209						   QLA83XX_FLASH_DIRECT_WINDOW,
210						   addr);
211				if (ret_val == QLA_ERROR) {
212					ql4_printk(KERN_ERR, ha, "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
213						   __func__, addr);
214					goto exit_lockless_read;
215				}
216				flash_offset = 0;
217			}
218		}
219	} else {
220		/* Single sector read */
221		for (i = 0; i < u32_word_count; i++) {
222			ret_val = qla4_83xx_rd_reg_indirect(ha,
223						QLA83XX_FLASH_DIRECT_DATA(addr),
224						&u32_word);
225			if (ret_val == QLA_ERROR) {
226				ql4_printk(KERN_ERR, ha, "%s: failed to read addr 0x%x!\n",
227					   __func__, addr);
228				goto exit_lockless_read;
229			}
230
231			*(__le32 *)p_data = le32_to_cpu(u32_word);
232			p_data = p_data + 4;
233			addr = addr + 4;
234		}
235	}
236
237exit_lockless_read:
238	return ret_val;
239}
240
241void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
242{
243	if (qla4_83xx_flash_lock(ha))
244		ql4_printk(KERN_INFO, ha, "%s: Resetting rom lock\n", __func__);
245
246	/*
247	 * We got the lock, or someone else is holding the lock
248	 * since we are restting, forcefully unlock
249	 */
250	qla4_83xx_flash_unlock(ha);
251}
252
253#define INTENT_TO_RECOVER	0x01
254#define PROCEED_TO_RECOVER	0x02
255
256static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha)
257{
258
259	uint32_t lock = 0, lockid;
260	int ret_val = QLA_ERROR;
261
262	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
263
264	/* Check for other Recovery in progress, go wait */
265	if ((lockid & 0x3) != 0)
266		goto exit_lock_recovery;
267
268	/* Intent to Recover */
269	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
270				   (ha->func_num << 2) | INTENT_TO_RECOVER);
271
272	msleep(200);
273
274	/* Check Intent to Recover is advertised */
275	lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY);
276	if ((lockid & 0x3C) != (ha->func_num << 2))
277		goto exit_lock_recovery;
278
279	ql4_printk(KERN_INFO, ha, "%s: IDC Lock recovery initiated for func %d\n",
280		   __func__, ha->func_num);
281
282	/* Proceed to Recover */
283	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY,
284				   (ha->func_num << 2) | PROCEED_TO_RECOVER);
285
286	/* Force Unlock */
287	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, 0xFF);
288	ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_UNLOCK);
289
290	/* Clear bits 0-5 in IDC_RECOVERY register*/
291	ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCKRECOVERY, 0);
292
293	/* Get lock */
294	lock = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK);
295	if (lock) {
296		lockid = ha->isp_ops->rd_reg_direct(ha, QLA83XX_DRV_LOCK_ID);
297		lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->func_num;
298		ha->isp_ops->wr_reg_direct(ha, QLA83XX_DRV_LOCK_ID, lockid);
299		ret_val = QLA_SUCCESS;
300	}
301
302exit_lock_recovery:
303	return ret_val;
304}
305
306#define	QLA83XX_DRV_LOCK_MSLEEP		200
307
308int qla4_83xx_drv_lock(struct scsi_qla_host *ha)
309{
310	int timeout = 0;
311	uint32_t status = 0;
312	int ret_val = QLA_SUCCESS;
313	uint32_t first_owner = 0;
314	uint32_t tmo_owner = 0;
315	uint32_t lock_id;
316	uint32_t func_num;
317	uint32_t lock_cnt;
318
319	while (status == 0) {
320		status = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK);
321		if (status) {
322			/* Increment Counter (8-31) and update func_num (0-7) on
323			 * getting a successful lock  */
324			lock_id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
325			lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->func_num;
326			qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, lock_id);
327			break;
328		}
329
330		if (timeout == 0)
331			/* Save counter + ID of function holding the lock for
332			 * first failure */
333			first_owner = ha->isp_ops->rd_reg_direct(ha,
334							  QLA83XX_DRV_LOCK_ID);
335
336		if (++timeout >=
337		    (QLA83XX_DRV_LOCK_TIMEOUT / QLA83XX_DRV_LOCK_MSLEEP)) {
338			tmo_owner = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
339			func_num = tmo_owner & 0xFF;
340			lock_cnt = tmo_owner >> 8;
341			ql4_printk(KERN_INFO, ha, "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n",
342				   __func__, ha->func_num, func_num, lock_cnt,
343				   (first_owner & 0xFF));
344
345			if (first_owner != tmo_owner) {
346				/* Some other driver got lock, OR same driver
347				 * got lock again (counter value changed), when
348				 * we were waiting for lock.
349				 * Retry for another 2 sec */
350				ql4_printk(KERN_INFO, ha, "%s: IDC lock failed for func %d\n",
351					   __func__, ha->func_num);
352				timeout = 0;
353			} else {
354				/* Same driver holding lock > 2sec.
355				 * Force Recovery */
356				ret_val = qla4_83xx_lock_recovery(ha);
357				if (ret_val == QLA_SUCCESS) {
358					/* Recovered and got lock */
359					ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d successful\n",
360						   __func__, ha->func_num);
361					break;
362				}
363				/* Recovery Failed, some other function
364				 * has the lock, wait for 2secs and retry */
365				ql4_printk(KERN_INFO, ha, "%s: IDC lock Recovery by %d failed, Retrying timeout\n",
366					   __func__, ha->func_num);
367				timeout = 0;
368			}
369		}
370		msleep(QLA83XX_DRV_LOCK_MSLEEP);
371	}
372
373	return ret_val;
374}
375
376void qla4_83xx_drv_unlock(struct scsi_qla_host *ha)
377{
378	int id;
379
380	id = qla4_83xx_rd_reg(ha, QLA83XX_DRV_LOCK_ID);
381
382	if ((id & 0xFF) != ha->func_num) {
383		ql4_printk(KERN_ERR, ha, "%s: IDC Unlock by %d failed, lock owner is %d\n",
384			   __func__, ha->func_num, (id & 0xFF));
385		return;
386	}
387
388	/* Keep lock counter value, update the ha->func_num to 0xFF */
389	qla4_83xx_wr_reg(ha, QLA83XX_DRV_LOCK_ID, (id | 0xFF));
390	qla4_83xx_rd_reg(ha, QLA83XX_DRV_UNLOCK);
391}
392
393void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha)
394{
395	uint32_t idc_ctrl;
396
397	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
398	idc_ctrl |= DONTRESET_BIT0;
399	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
400	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
401			  idc_ctrl));
402}
403
404void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha)
405{
406	uint32_t idc_ctrl;
407
408	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
409	idc_ctrl &= ~DONTRESET_BIT0;
410	qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL, idc_ctrl);
411	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: idc_ctrl = %d\n", __func__,
412			  idc_ctrl));
413}
414
415int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha)
416{
417	uint32_t idc_ctrl;
418
419	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
420	return idc_ctrl & DONTRESET_BIT0;
421}
422
423/*-------------------------IDC State Machine ---------------------*/
424
425enum {
426	UNKNOWN_CLASS = 0,
427	NIC_CLASS,
428	FCOE_CLASS,
429	ISCSI_CLASS
430};
431
432struct device_info {
433	int func_num;
434	int device_type;
435	int port_num;
436};
437
438int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
439{
440	uint32_t drv_active;
441	uint32_t dev_part, dev_part1, dev_part2;
442	int i;
443	struct device_info device_map[16];
444	int func_nibble;
445	int nibble;
446	int nic_present = 0;
447	int iscsi_present = 0;
448	int iscsi_func_low = 0;
449
450	/* Use the dev_partition register to determine the PCI function number
451	 * and then check drv_active register to see which driver is loaded */
452	dev_part1 = qla4_83xx_rd_reg(ha,
453				     ha->reg_tbl[QLA8XXX_CRB_DEV_PART_INFO]);
454	dev_part2 = qla4_83xx_rd_reg(ha, QLA83XX_CRB_DEV_PART_INFO2);
455	drv_active = qla4_83xx_rd_reg(ha, ha->reg_tbl[QLA8XXX_CRB_DRV_ACTIVE]);
456
457	/* Each function has 4 bits in dev_partition Info register,
458	 * Lower 2 bits - device type, Upper 2 bits - physical port number */
459	dev_part = dev_part1;
460	for (i = nibble = 0; i <= 15; i++, nibble++) {
461		func_nibble = dev_part & (0xF << (nibble * 4));
462		func_nibble >>= (nibble * 4);
463		device_map[i].func_num = i;
464		device_map[i].device_type = func_nibble & 0x3;
465		device_map[i].port_num = func_nibble & 0xC;
466
467		if (device_map[i].device_type == NIC_CLASS) {
468			if (drv_active & (1 << device_map[i].func_num)) {
469				nic_present++;
470				break;
471			}
472		} else if (device_map[i].device_type == ISCSI_CLASS) {
473			if (drv_active & (1 << device_map[i].func_num)) {
474				if (!iscsi_present ||
475				    (iscsi_present &&
476				     (iscsi_func_low > device_map[i].func_num)))
477					iscsi_func_low = device_map[i].func_num;
478
479				iscsi_present++;
480			}
481		}
482
483		/* For function_num[8..15] get info from dev_part2 register */
484		if (nibble == 7) {
485			nibble = 0;
486			dev_part = dev_part2;
487		}
488	}
489
490	/* NIC, iSCSI and FCOE are the Reset owners based on order, NIC gets
491	 * precedence over iSCSI and FCOE and iSCSI over FCOE, based on drivers
492	 * present. */
493	if (!nic_present && (ha->func_num == iscsi_func_low)) {
494		DEBUG2(ql4_printk(KERN_INFO, ha,
495				  "%s: can reset - NIC not present and lower iSCSI function is %d\n",
496				  __func__, ha->func_num));
497		return 1;
498	}
499
500	return 0;
501}
502
503/**
504 * qla4_83xx_need_reset_handler - Code to start reset sequence
505 * @ha: pointer to adapter structure
506 *
507 * Note: IDC lock must be held upon entry
508 **/
509void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha)
510{
511	uint32_t dev_state, drv_state, drv_active;
512	unsigned long reset_timeout, dev_init_timeout;
513
514	ql4_printk(KERN_INFO, ha, "%s: Performing ISP error recovery\n",
515		   __func__);
516
517	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
518		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: reset acknowledged\n",
519				  __func__));
520		qla4_8xxx_set_rst_ready(ha);
521
522		/* Non-reset owners ACK Reset and wait for device INIT state
523		 * as part of Reset Recovery by Reset Owner */
524		dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
525
526		do {
527			if (time_after_eq(jiffies, dev_init_timeout)) {
528				ql4_printk(KERN_INFO, ha, "%s: Non Reset owner dev init timeout\n",
529					   __func__);
530				break;
531			}
532
533			ha->isp_ops->idc_unlock(ha);
534			msleep(1000);
535			ha->isp_ops->idc_lock(ha);
536
537			dev_state = qla4_8xxx_rd_direct(ha,
538							QLA8XXX_CRB_DEV_STATE);
539		} while (dev_state == QLA8XXX_DEV_NEED_RESET);
540	} else {
541		qla4_8xxx_set_rst_ready(ha);
542		reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
543		drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
544		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
545
546		ql4_printk(KERN_INFO, ha, "%s: drv_state = 0x%x, drv_active = 0x%x\n",
547			   __func__, drv_state, drv_active);
548
549		while (drv_state != drv_active) {
550			if (time_after_eq(jiffies, reset_timeout)) {
551				ql4_printk(KERN_INFO, ha, "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
552					   __func__, DRIVER_NAME, drv_state,
553					   drv_active);
554				break;
555			}
556
557			ha->isp_ops->idc_unlock(ha);
558			msleep(1000);
559			ha->isp_ops->idc_lock(ha);
560
561			drv_state = qla4_8xxx_rd_direct(ha,
562							QLA8XXX_CRB_DRV_STATE);
563			drv_active = qla4_8xxx_rd_direct(ha,
564							QLA8XXX_CRB_DRV_ACTIVE);
565		}
566
567		if (drv_state != drv_active) {
568			ql4_printk(KERN_INFO, ha, "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n",
569				   __func__, (drv_active ^ drv_state));
570			drv_active = drv_active & drv_state;
571			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE,
572					    drv_active);
573		}
574
575		clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
576		/* Start Reset Recovery */
577		qla4_8xxx_device_bootstrap(ha);
578	}
579}
580
581void qla4_83xx_get_idc_param(struct scsi_qla_host *ha)
582{
583	uint32_t idc_params, ret_val;
584
585	ret_val = qla4_83xx_flash_read_u32(ha, QLA83XX_IDC_PARAM_ADDR,
586					   (uint8_t *)&idc_params, 1);
587	if (ret_val == QLA_SUCCESS) {
588		ha->nx_dev_init_timeout = idc_params & 0xFFFF;
589		ha->nx_reset_timeout = (idc_params >> 16) & 0xFFFF;
590	} else {
591		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
592		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
593	}
594
595	DEBUG2(ql4_printk(KERN_DEBUG, ha,
596			  "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n",
597			  __func__, ha->nx_dev_init_timeout,
598			  ha->nx_reset_timeout));
599}
600
601/*-------------------------Reset Sequence Functions-----------------------*/
602
603static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha)
604{
605	uint8_t *phdr;
606
607	if (!ha->reset_tmplt.buff) {
608		ql4_printk(KERN_ERR, ha, "%s: Error: Invalid reset_seq_template\n",
609			   __func__);
610		return;
611	}
612
613	phdr = ha->reset_tmplt.buff;
614
615	DEBUG2(ql4_printk(KERN_INFO, ha,
616			  "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n",
617			  *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
618			  *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
619			  *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
620			  *(phdr+13), *(phdr+14), *(phdr+15)));
621}
622
623static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
624{
625	uint8_t *p_cache;
626	uint32_t src, count, size;
627	uint64_t dest;
628	int ret_val = QLA_SUCCESS;
629
630	src = QLA83XX_BOOTLOADER_FLASH_ADDR;
631	dest = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_ADDR);
632	size = qla4_83xx_rd_reg(ha, QLA83XX_BOOTLOADER_SIZE);
633
634	/* 128 bit alignment check */
635	if (size & 0xF)
636		size = (size + 16) & ~0xF;
637
638	/* 16 byte count */
639	count = size/16;
640
641	p_cache = vmalloc(size);
642	if (p_cache == NULL) {
643		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate memory for boot loader cache\n",
644			   __func__);
645		ret_val = QLA_ERROR;
646		goto exit_copy_bootloader;
647	}
648
649	ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache,
650						    size / sizeof(uint32_t));
651	if (ret_val == QLA_ERROR) {
652		ql4_printk(KERN_ERR, ha, "%s: Error reading firmware from flash\n",
653			   __func__);
654		goto exit_copy_error;
655	}
656	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Read firmware from flash\n",
657			  __func__));
658
659	/* 128 bit/16 byte write to MS memory */
660	ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
661					      count);
662	if (ret_val == QLA_ERROR) {
663		ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
664			   __func__);
665		goto exit_copy_error;
666	}
667
668	DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Wrote firmware size %d to MS\n",
669			  __func__, size));
670
671exit_copy_error:
672	vfree(p_cache);
673
674exit_copy_bootloader:
675	return ret_val;
676}
677
678static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha)
679{
680	uint32_t val, ret_val = QLA_ERROR;
681	int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
682
683	do {
684		val = qla4_83xx_rd_reg(ha, QLA83XX_CMDPEG_STATE);
685		if (val == PHAN_INITIALIZE_COMPLETE) {
686			DEBUG2(ql4_printk(KERN_INFO, ha,
687					  "%s: Command Peg initialization complete. State=0x%x\n",
688					  __func__, val));
689			ret_val = QLA_SUCCESS;
690			break;
691		}
692		msleep(CRB_CMDPEG_CHECK_DELAY);
693	} while (--retries);
694
695	return ret_val;
696}
697
698/**
699 * qla4_83xx_poll_reg - Poll the given CRB addr for duration msecs till
700 * value read ANDed with test_mask is equal to test_result.
701 *
702 * @ha : Pointer to adapter structure
703 * @addr : CRB register address
704 * @duration : Poll for total of "duration" msecs
705 * @test_mask : Mask value read with "test_mask"
706 * @test_result : Compare (value&test_mask) with test_result.
707 **/
708static int qla4_83xx_poll_reg(struct scsi_qla_host *ha, uint32_t addr,
709			      int duration, uint32_t test_mask,
710			      uint32_t test_result)
711{
712	uint32_t value;
713	uint8_t retries;
714	int ret_val = QLA_SUCCESS;
715
716	ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
717	if (ret_val == QLA_ERROR)
718		goto exit_poll_reg;
719
720	retries = duration / 10;
721	do {
722		if ((value & test_mask) != test_result) {
723			msleep(duration / 10);
724			ret_val = qla4_83xx_rd_reg_indirect(ha, addr, &value);
725			if (ret_val == QLA_ERROR)
726				goto exit_poll_reg;
727
728			ret_val = QLA_ERROR;
729		} else {
730			ret_val = QLA_SUCCESS;
731			break;
732		}
733	} while (retries--);
734
735exit_poll_reg:
736	if (ret_val == QLA_ERROR) {
737		ha->reset_tmplt.seq_error++;
738		ql4_printk(KERN_ERR, ha, "%s: Poll Failed:  0x%08x 0x%08x 0x%08x\n",
739			   __func__, value, test_mask, test_result);
740	}
741
742	return ret_val;
743}
744
745static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha)
746{
747	uint32_t sum =  0;
748	uint16_t *buff = (uint16_t *)ha->reset_tmplt.buff;
749	int u16_count =  ha->reset_tmplt.hdr->size / sizeof(uint16_t);
750	int ret_val;
751
752	while (u16_count-- > 0)
753		sum += *buff++;
754
755	while (sum >> 16)
756		sum = (sum & 0xFFFF) +  (sum >> 16);
757
758	/* checksum of 0 indicates a valid template */
759	if (~sum) {
760		ret_val = QLA_SUCCESS;
761	} else {
762		ql4_printk(KERN_ERR, ha, "%s: Reset seq checksum failed\n",
763			   __func__);
764		ret_val = QLA_ERROR;
765	}
766
767	return ret_val;
768}
769
770/**
771 * qla4_83xx_read_reset_template - Read Reset Template from Flash
772 * @ha: Pointer to adapter structure
773 **/
774void qla4_83xx_read_reset_template(struct scsi_qla_host *ha)
775{
776	uint8_t *p_buff;
777	uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
778	uint32_t ret_val;
779
780	ha->reset_tmplt.seq_error = 0;
781	ha->reset_tmplt.buff = vmalloc(QLA83XX_RESTART_TEMPLATE_SIZE);
782	if (ha->reset_tmplt.buff == NULL) {
783		ql4_printk(KERN_ERR, ha, "%s: Failed to allocate reset template resources\n",
784			   __func__);
785		goto exit_read_reset_template;
786	}
787
788	p_buff = ha->reset_tmplt.buff;
789	addr = QLA83XX_RESET_TEMPLATE_ADDR;
790
791	tmplt_hdr_def_size = sizeof(struct qla4_83xx_reset_template_hdr) /
792				    sizeof(uint32_t);
793
794	DEBUG2(ql4_printk(KERN_INFO, ha,
795			  "%s: Read template hdr size %d from Flash\n",
796			  __func__, tmplt_hdr_def_size));
797
798	/* Copy template header from flash */
799	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
800					   tmplt_hdr_def_size);
801	if (ret_val != QLA_SUCCESS) {
802		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
803			   __func__);
804		goto exit_read_template_error;
805	}
806
807	ha->reset_tmplt.hdr =
808		(struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff;
809
810	/* Validate the template header size and signature */
811	tmplt_hdr_size = ha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
812	if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
813	    (ha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
814		ql4_printk(KERN_ERR, ha, "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n",
815			   __func__, tmplt_hdr_size, tmplt_hdr_def_size);
816		goto exit_read_template_error;
817	}
818
819	addr = QLA83XX_RESET_TEMPLATE_ADDR + ha->reset_tmplt.hdr->hdr_size;
820	p_buff = ha->reset_tmplt.buff + ha->reset_tmplt.hdr->hdr_size;
821	tmplt_hdr_def_size = (ha->reset_tmplt.hdr->size -
822			      ha->reset_tmplt.hdr->hdr_size) / sizeof(uint32_t);
823
824	DEBUG2(ql4_printk(KERN_INFO, ha,
825			  "%s: Read rest of the template size %d\n",
826			  __func__, ha->reset_tmplt.hdr->size));
827
828	/* Copy rest of the template */
829	ret_val = qla4_83xx_flash_read_u32(ha, addr, p_buff,
830					   tmplt_hdr_def_size);
831	if (ret_val != QLA_SUCCESS) {
832		ql4_printk(KERN_ERR, ha, "%s: Failed to read reset template\n",
833			   __func__);
834		goto exit_read_template_error;
835	}
836
837	/* Integrity check */
838	if (qla4_83xx_reset_seq_checksum_test(ha)) {
839		ql4_printk(KERN_ERR, ha, "%s: Reset Seq checksum failed!\n",
840			   __func__);
841		goto exit_read_template_error;
842	}
843	DEBUG2(ql4_printk(KERN_INFO, ha,
844			  "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n",
845			  __func__));
846
847	/* Get STOP, START, INIT sequence offsets */
848	ha->reset_tmplt.init_offset = ha->reset_tmplt.buff +
849				      ha->reset_tmplt.hdr->init_seq_offset;
850	ha->reset_tmplt.start_offset = ha->reset_tmplt.buff +
851				       ha->reset_tmplt.hdr->start_seq_offset;
852	ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff +
853				      ha->reset_tmplt.hdr->hdr_size;
854	qla4_83xx_dump_reset_seq_hdr(ha);
855
856	goto exit_read_reset_template;
857
858exit_read_template_error:
859	vfree(ha->reset_tmplt.buff);
860
861exit_read_reset_template:
862	return;
863}
864
865/**
866 * qla4_83xx_read_write_crb_reg - Read from raddr and write value to waddr.
867 *
868 * @ha : Pointer to adapter structure
869 * @raddr : CRB address to read from
870 * @waddr : CRB address to write to
871 **/
872static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha,
873					 uint32_t raddr, uint32_t waddr)
874{
875	uint32_t value;
876
877	qla4_83xx_rd_reg_indirect(ha, raddr, &value);
878	qla4_83xx_wr_reg_indirect(ha, waddr, value);
879}
880
881/**
882 * qla4_83xx_rmw_crb_reg - Read Modify Write crb register
883 *
884 * This function read value from raddr, AND with test_mask,
885 * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
886 *
887 * @ha : Pointer to adapter structure
888 * @raddr : CRB address to read from
889 * @waddr : CRB address to write to
890 * @p_rmw_hdr : header with shift/or/xor values.
891 **/
892static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha, uint32_t raddr,
893				  uint32_t waddr,
894				  struct qla4_83xx_rmw *p_rmw_hdr)
895{
896	uint32_t value;
897
898	if (p_rmw_hdr->index_a)
899		value = ha->reset_tmplt.array[p_rmw_hdr->index_a];
900	else
901		qla4_83xx_rd_reg_indirect(ha, raddr, &value);
902
903	value &= p_rmw_hdr->test_mask;
904	value <<= p_rmw_hdr->shl;
905	value >>= p_rmw_hdr->shr;
906	value |= p_rmw_hdr->or_value;
907	value ^= p_rmw_hdr->xor_value;
908
909	qla4_83xx_wr_reg_indirect(ha, waddr, value);
910
911	return;
912}
913
914static void qla4_83xx_write_list(struct scsi_qla_host *ha,
915				 struct qla4_83xx_reset_entry_hdr *p_hdr)
916{
917	struct qla4_83xx_entry *p_entry;
918	uint32_t i;
919
920	p_entry = (struct qla4_83xx_entry *)
921		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
922
923	for (i = 0; i < p_hdr->count; i++, p_entry++) {
924		qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2);
925		if (p_hdr->delay)
926			udelay((uint32_t)(p_hdr->delay));
927	}
928}
929
930static void qla4_83xx_read_write_list(struct scsi_qla_host *ha,
931				      struct qla4_83xx_reset_entry_hdr *p_hdr)
932{
933	struct qla4_83xx_entry *p_entry;
934	uint32_t i;
935
936	p_entry = (struct qla4_83xx_entry *)
937		  ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
938
939	for (i = 0; i < p_hdr->count; i++, p_entry++) {
940		qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2);
941		if (p_hdr->delay)
942			udelay((uint32_t)(p_hdr->delay));
943	}
944}
945
946static void qla4_83xx_poll_list(struct scsi_qla_host *ha,
947				struct qla4_83xx_reset_entry_hdr *p_hdr)
948{
949	long delay;
950	struct qla4_83xx_entry *p_entry;
951	struct qla4_83xx_poll *p_poll;
952	uint32_t i;
953	uint32_t value;
954
955	p_poll = (struct qla4_83xx_poll *)
956		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
957
958	/* Entries start after 8 byte qla4_83xx_poll, poll header contains
959	 * the test_mask, test_value. */
960	p_entry = (struct qla4_83xx_entry *)((char *)p_poll +
961					     sizeof(struct qla4_83xx_poll));
962
963	delay = (long)p_hdr->delay;
964	if (!delay) {
965		for (i = 0; i < p_hdr->count; i++, p_entry++) {
966			qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
967					   p_poll->test_mask,
968					   p_poll->test_value);
969		}
970	} else {
971		for (i = 0; i < p_hdr->count; i++, p_entry++) {
972			if (qla4_83xx_poll_reg(ha, p_entry->arg1, delay,
973					       p_poll->test_mask,
974					       p_poll->test_value)) {
975				qla4_83xx_rd_reg_indirect(ha, p_entry->arg1,
976							  &value);
977				qla4_83xx_rd_reg_indirect(ha, p_entry->arg2,
978							  &value);
979			}
980		}
981	}
982}
983
984static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha,
985				      struct qla4_83xx_reset_entry_hdr *p_hdr)
986{
987	long delay;
988	struct qla4_83xx_quad_entry *p_entry;
989	struct qla4_83xx_poll *p_poll;
990	uint32_t i;
991
992	p_poll = (struct qla4_83xx_poll *)
993		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
994	p_entry = (struct qla4_83xx_quad_entry *)
995		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
996	delay = (long)p_hdr->delay;
997
998	for (i = 0; i < p_hdr->count; i++, p_entry++) {
999		qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr,
1000					  p_entry->dr_value);
1001		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1002					  p_entry->ar_value);
1003		if (delay) {
1004			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1005					       p_poll->test_mask,
1006					       p_poll->test_value)) {
1007				DEBUG2(ql4_printk(KERN_INFO, ha,
1008						  "%s: Timeout Error: poll list, item_num %d, entry_num %d\n",
1009						  __func__, i,
1010						  ha->reset_tmplt.seq_index));
1011			}
1012		}
1013	}
1014}
1015
1016static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha,
1017					struct qla4_83xx_reset_entry_hdr *p_hdr)
1018{
1019	struct qla4_83xx_entry *p_entry;
1020	struct qla4_83xx_rmw *p_rmw_hdr;
1021	uint32_t i;
1022
1023	p_rmw_hdr = (struct qla4_83xx_rmw *)
1024		    ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1025	p_entry = (struct qla4_83xx_entry *)
1026		  ((char *)p_rmw_hdr + sizeof(struct qla4_83xx_rmw));
1027
1028	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1029		qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2,
1030				      p_rmw_hdr);
1031		if (p_hdr->delay)
1032			udelay((uint32_t)(p_hdr->delay));
1033	}
1034}
1035
1036static void qla4_83xx_pause(struct scsi_qla_host *ha,
1037			    struct qla4_83xx_reset_entry_hdr *p_hdr)
1038{
1039	if (p_hdr->delay)
1040		mdelay((uint32_t)((long)p_hdr->delay));
1041}
1042
1043static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha,
1044				     struct qla4_83xx_reset_entry_hdr *p_hdr)
1045{
1046	long delay;
1047	int index;
1048	struct qla4_83xx_quad_entry *p_entry;
1049	struct qla4_83xx_poll *p_poll;
1050	uint32_t i;
1051	uint32_t value;
1052
1053	p_poll = (struct qla4_83xx_poll *)
1054		 ((char *)p_hdr + sizeof(struct qla4_83xx_reset_entry_hdr));
1055	p_entry = (struct qla4_83xx_quad_entry *)
1056		  ((char *)p_poll + sizeof(struct qla4_83xx_poll));
1057	delay = (long)p_hdr->delay;
1058
1059	for (i = 0; i < p_hdr->count; i++, p_entry++) {
1060		qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr,
1061					  p_entry->ar_value);
1062		if (delay) {
1063			if (qla4_83xx_poll_reg(ha, p_entry->ar_addr, delay,
1064					       p_poll->test_mask,
1065					       p_poll->test_value)) {
1066				DEBUG2(ql4_printk(KERN_INFO, ha,
1067						  "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n",
1068						  __func__, i,
1069						  ha->reset_tmplt.seq_index));
1070			} else {
1071				index = ha->reset_tmplt.array_index;
1072				qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr,
1073							  &value);
1074				ha->reset_tmplt.array[index++] = value;
1075
1076				if (index == QLA83XX_MAX_RESET_SEQ_ENTRIES)
1077					ha->reset_tmplt.array_index = 1;
1078			}
1079		}
1080	}
1081}
1082
1083static void qla4_83xx_seq_end(struct scsi_qla_host *ha,
1084			      struct qla4_83xx_reset_entry_hdr *p_hdr)
1085{
1086	ha->reset_tmplt.seq_end = 1;
1087}
1088
1089static void qla4_83xx_template_end(struct scsi_qla_host *ha,
1090				   struct qla4_83xx_reset_entry_hdr *p_hdr)
1091{
1092	ha->reset_tmplt.template_end = 1;
1093
1094	if (ha->reset_tmplt.seq_error == 0) {
1095		DEBUG2(ql4_printk(KERN_INFO, ha,
1096				  "%s: Reset sequence completed SUCCESSFULLY.\n",
1097				  __func__));
1098	} else {
1099		ql4_printk(KERN_ERR, ha, "%s: Reset sequence completed with some timeout errors.\n",
1100			   __func__);
1101	}
1102}
1103
1104/**
1105 * qla4_83xx_process_reset_template - Process reset template.
1106 *
1107 * Process all entries in reset template till entry with SEQ_END opcode,
1108 * which indicates end of the reset template processing. Each entry has a
1109 * Reset Entry header, entry opcode/command, with size of the entry, number
1110 * of entries in sub-sequence and delay in microsecs or timeout in millisecs.
1111 *
1112 * @ha : Pointer to adapter structure
1113 * @p_buff : Common reset entry header.
1114 **/
1115static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha,
1116					     char *p_buff)
1117{
1118	int index, entries;
1119	struct qla4_83xx_reset_entry_hdr *p_hdr;
1120	char *p_entry = p_buff;
1121
1122	ha->reset_tmplt.seq_end = 0;
1123	ha->reset_tmplt.template_end = 0;
1124	entries = ha->reset_tmplt.hdr->entries;
1125	index = ha->reset_tmplt.seq_index;
1126
1127	for (; (!ha->reset_tmplt.seq_end) && (index  < entries); index++) {
1128
1129		p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry;
1130		switch (p_hdr->cmd) {
1131		case OPCODE_NOP:
1132			break;
1133		case OPCODE_WRITE_LIST:
1134			qla4_83xx_write_list(ha, p_hdr);
1135			break;
1136		case OPCODE_READ_WRITE_LIST:
1137			qla4_83xx_read_write_list(ha, p_hdr);
1138			break;
1139		case OPCODE_POLL_LIST:
1140			qla4_83xx_poll_list(ha, p_hdr);
1141			break;
1142		case OPCODE_POLL_WRITE_LIST:
1143			qla4_83xx_poll_write_list(ha, p_hdr);
1144			break;
1145		case OPCODE_READ_MODIFY_WRITE:
1146			qla4_83xx_read_modify_write(ha, p_hdr);
1147			break;
1148		case OPCODE_SEQ_PAUSE:
1149			qla4_83xx_pause(ha, p_hdr);
1150			break;
1151		case OPCODE_SEQ_END:
1152			qla4_83xx_seq_end(ha, p_hdr);
1153			break;
1154		case OPCODE_TMPL_END:
1155			qla4_83xx_template_end(ha, p_hdr);
1156			break;
1157		case OPCODE_POLL_READ_LIST:
1158			qla4_83xx_poll_read_list(ha, p_hdr);
1159			break;
1160		default:
1161			ql4_printk(KERN_ERR, ha, "%s: Unknown command ==> 0x%04x on entry = %d\n",
1162				   __func__, p_hdr->cmd, index);
1163			break;
1164		}
1165
1166		/* Set pointer to next entry in the sequence. */
1167		p_entry += p_hdr->size;
1168	}
1169
1170	ha->reset_tmplt.seq_index = index;
1171}
1172
1173static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha)
1174{
1175	ha->reset_tmplt.seq_index = 0;
1176	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.stop_offset);
1177
1178	if (ha->reset_tmplt.seq_end != 1)
1179		ql4_printk(KERN_ERR, ha, "%s: Abrupt STOP Sub-Sequence end.\n",
1180			   __func__);
1181}
1182
1183static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha)
1184{
1185	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.start_offset);
1186
1187	if (ha->reset_tmplt.template_end != 1)
1188		ql4_printk(KERN_ERR, ha, "%s: Abrupt START Sub-Sequence end.\n",
1189			   __func__);
1190}
1191
1192static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha)
1193{
1194	qla4_83xx_process_reset_template(ha, ha->reset_tmplt.init_offset);
1195
1196	if (ha->reset_tmplt.seq_end != 1)
1197		ql4_printk(KERN_ERR, ha, "%s: Abrupt INIT Sub-Sequence end.\n",
1198			   __func__);
1199}
1200
1201static int qla4_83xx_restart(struct scsi_qla_host *ha)
1202{
1203	int ret_val = QLA_SUCCESS;
1204	uint32_t idc_ctrl;
1205
1206	qla4_83xx_process_stop_seq(ha);
1207
1208	/*
1209	 * Collect minidump.
1210	 * If IDC_CTRL BIT1 is set, clear it on going to INIT state and
1211	 * don't collect minidump
1212	 */
1213	idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
1214	if (idc_ctrl & GRACEFUL_RESET_BIT1) {
1215		qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
1216				 (idc_ctrl & ~GRACEFUL_RESET_BIT1));
1217		ql4_printk(KERN_INFO, ha, "%s: Graceful RESET: Not collecting minidump\n",
1218			   __func__);
1219	} else {
1220		qla4_8xxx_get_minidump(ha);
1221	}
1222
1223	qla4_83xx_process_init_seq(ha);
1224
1225	if (qla4_83xx_copy_bootloader(ha)) {
1226		ql4_printk(KERN_ERR, ha, "%s: Copy bootloader, firmware restart failed!\n",
1227			   __func__);
1228		ret_val = QLA_ERROR;
1229		goto exit_restart;
1230	}
1231
1232	qla4_83xx_wr_reg(ha, QLA83XX_FW_IMAGE_VALID, QLA83XX_BOOT_FROM_FLASH);
1233	qla4_83xx_process_start_seq(ha);
1234
1235exit_restart:
1236	return ret_val;
1237}
1238
1239int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
1240{
1241	int ret_val = QLA_SUCCESS;
1242
1243	ret_val = qla4_83xx_restart(ha);
1244	if (ret_val == QLA_ERROR) {
1245		ql4_printk(KERN_ERR, ha, "%s: Restart error\n", __func__);
1246		goto exit_start_fw;
1247	} else {
1248		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Restart done\n",
1249				  __func__));
1250	}
1251
1252	ret_val = qla4_83xx_check_cmd_peg_status(ha);
1253	if (ret_val == QLA_ERROR)
1254		ql4_printk(KERN_ERR, ha, "%s: Peg not initialized\n",
1255			   __func__);
1256
1257exit_start_fw:
1258	return ret_val;
1259}
1260
1261/*----------------------Interrupt Related functions ---------------------*/
1262
1263static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
1264{
1265	if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
1266		qla4_8xxx_intr_disable(ha);
1267}
1268
1269static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
1270{
1271	uint32_t mb_int, ret;
1272
1273	if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1274		ret = readl(&ha->qla4_83xx_reg->mbox_int);
1275		mb_int = ret & ~INT_ENABLE_FW_MB;
1276		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1277		writel(1, &ha->qla4_83xx_reg->leg_int_mask);
1278	}
1279}
1280
1281void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
1282{
1283	qla4_83xx_disable_mbox_intrs(ha);
1284	qla4_83xx_disable_iocb_intrs(ha);
1285}
1286
1287static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
1288{
1289	if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
1290		qla4_8xxx_intr_enable(ha);
1291		set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
1292	}
1293}
1294
1295void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
1296{
1297	uint32_t mb_int;
1298
1299	if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
1300		mb_int = INT_ENABLE_FW_MB;
1301		writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
1302		writel(0, &ha->qla4_83xx_reg->leg_int_mask);
1303		set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
1304	}
1305}
1306
1307
1308void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
1309{
1310	qla4_83xx_enable_mbox_intrs(ha);
1311	qla4_83xx_enable_iocb_intrs(ha);
1312}
1313
1314
1315void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
1316			      int incount)
1317{
1318	int i;
1319
1320	/* Load all mailbox registers, except mailbox 0. */
1321	for (i = 1; i < incount; i++)
1322		writel(mbx_cmd[i], &ha->qla4_83xx_reg->mailbox_in[i]);
1323
1324	writel(mbx_cmd[0], &ha->qla4_83xx_reg->mailbox_in[0]);
1325
1326	/* Set Host Interrupt register to 1, to tell the firmware that
1327	 * a mailbox command is pending. Firmware after reading the
1328	 * mailbox command, clears the host interrupt register */
1329	writel(HINT_MBX_INT_PENDING, &ha->qla4_83xx_reg->host_intr);
1330}
1331
1332void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha, int outcount)
1333{
1334	int intr_status;
1335
1336	intr_status = readl(&ha->qla4_83xx_reg->risc_intr);
1337	if (intr_status) {
1338		ha->mbox_status_count = outcount;
1339		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1340	}
1341}
1342
1343/**
1344 * qla4_83xx_isp_reset - Resets ISP and aborts all outstanding commands.
1345 * @ha: pointer to host adapter structure.
1346 **/
1347int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
1348{
1349	int rval;
1350	uint32_t dev_state;
1351
1352	ha->isp_ops->idc_lock(ha);
1353	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
1354
1355	if (ql4xdontresethba)
1356		qla4_83xx_set_idc_dontreset(ha);
1357
1358	if (dev_state == QLA8XXX_DEV_READY) {
1359		/* If IDC_CTRL DONTRESETHBA_BIT0 is set dont do reset
1360		 * recovery */
1361		if (qla4_83xx_idc_dontreset(ha) == DONTRESET_BIT0) {
1362			ql4_printk(KERN_ERR, ha, "%s: Reset recovery disabled\n",
1363				   __func__);
1364			rval = QLA_ERROR;
1365			goto exit_isp_reset;
1366		}
1367
1368		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET\n",
1369				  __func__));
1370		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
1371				    QLA8XXX_DEV_NEED_RESET);
1372
1373	} else {
1374		/* If device_state is NEED_RESET, go ahead with
1375		 * Reset,irrespective of ql4xdontresethba. This is to allow a
1376		 * non-reset-owner to force a reset. Non-reset-owner sets
1377		 * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
1378		 * and then forces a Reset by setting device_state to
1379		 * NEED_RESET. */
1380		DEBUG2(ql4_printk(KERN_INFO, ha,
1381				  "%s: HW state already set to NEED_RESET\n",
1382				  __func__));
1383	}
1384
1385	/* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
1386	 * priority and which drivers are present. Unlike ISP8022, the function
1387	 * setting NEED_RESET, may not be the Reset owner. */
1388	if (qla4_83xx_can_perform_reset(ha))
1389		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
1390
1391	ha->isp_ops->idc_unlock(ha);
1392	rval = qla4_8xxx_device_state_handler(ha);
1393
1394	ha->isp_ops->idc_lock(ha);
1395	qla4_8xxx_clear_rst_ready(ha);
1396exit_isp_reset:
1397	ha->isp_ops->idc_unlock(ha);
1398
1399	if (rval == QLA_SUCCESS)
1400		clear_bit(AF_FW_RECOVERY, &ha->flags);
1401
1402	return rval;
1403}
1404
1405static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha)
1406{
1407	u32 val = 0, val1 = 0;
1408	int i;
1409
1410	qla4_83xx_rd_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL, &val);
1411	DEBUG2(ql4_printk(KERN_INFO, ha, "SRE-Shim Ctrl:0x%x\n", val));
1412
1413	/* Port 0 Rx Buffer Pause Threshold Registers. */
1414	DEBUG2(ql4_printk(KERN_INFO, ha,
1415		"Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1416	for (i = 0; i < 8; i++) {
1417		qla4_83xx_rd_reg_indirect(ha,
1418				QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4), &val);
1419		DEBUG2(pr_info("0x%x ", val));
1420	}
1421
1422	DEBUG2(pr_info("\n"));
1423
1424	/* Port 1 Rx Buffer Pause Threshold Registers. */
1425	DEBUG2(ql4_printk(KERN_INFO, ha,
1426		"Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"));
1427	for (i = 0; i < 8; i++) {
1428		qla4_83xx_rd_reg_indirect(ha,
1429				QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4), &val);
1430		DEBUG2(pr_info("0x%x  ", val));
1431	}
1432
1433	DEBUG2(pr_info("\n"));
1434
1435	/* Port 0 RxB Traffic Class Max Cell Registers. */
1436	DEBUG2(ql4_printk(KERN_INFO, ha,
1437		"Port 0 RxB Traffic Class Max Cell Registers[3..0]:"));
1438	for (i = 0; i < 4; i++) {
1439		qla4_83xx_rd_reg_indirect(ha,
1440			       QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4), &val);
1441		DEBUG2(pr_info("0x%x  ", val));
1442	}
1443
1444	DEBUG2(pr_info("\n"));
1445
1446	/* Port 1 RxB Traffic Class Max Cell Registers. */
1447	DEBUG2(ql4_printk(KERN_INFO, ha,
1448		"Port 1 RxB Traffic Class Max Cell Registers[3..0]:"));
1449	for (i = 0; i < 4; i++) {
1450		qla4_83xx_rd_reg_indirect(ha,
1451			       QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4), &val);
1452		DEBUG2(pr_info("0x%x  ", val));
1453	}
1454
1455	DEBUG2(pr_info("\n"));
1456
1457	/* Port 0 RxB Rx Traffic Class Stats. */
1458	DEBUG2(ql4_printk(KERN_INFO, ha,
1459			  "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"));
1460	for (i = 7; i >= 0; i--) {
1461		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
1462		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1463		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS,
1464					  (val | (i << 29)));
1465		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT0_RXB_TC_STATS, &val);
1466		DEBUG2(pr_info("0x%x  ", val));
1467	}
1468
1469	DEBUG2(pr_info("\n"));
1470
1471	/* Port 1 RxB Rx Traffic Class Stats. */
1472	DEBUG2(ql4_printk(KERN_INFO, ha,
1473			  "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"));
1474	for (i = 7; i >= 0; i--) {
1475		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
1476		val &= ~(0x7 << 29);    /* Reset bits 29 to 31 */
1477		qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS,
1478					  (val | (i << 29)));
1479		qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT1_RXB_TC_STATS, &val);
1480		DEBUG2(pr_info("0x%x  ", val));
1481	}
1482
1483	DEBUG2(pr_info("\n"));
1484
1485	qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS, &val);
1486	qla4_83xx_rd_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS, &val1);
1487
1488	DEBUG2(ql4_printk(KERN_INFO, ha,
1489			  "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
1490			  val, val1));
1491}
1492
1493static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1494{
1495	int i;
1496
1497	/* set SRE-Shim Control Register */
1498	qla4_83xx_wr_reg_indirect(ha, QLA83XX_SRE_SHIM_CONTROL,
1499				  QLA83XX_SET_PAUSE_VAL);
1500
1501	for (i = 0; i < 8; i++) {
1502		/* Port 0 Rx Buffer Pause Threshold Registers. */
1503		qla4_83xx_wr_reg_indirect(ha,
1504				      QLA83XX_PORT0_RXB_PAUSE_THRS + (i * 0x4),
1505				      QLA83XX_SET_PAUSE_VAL);
1506		/* Port 1 Rx Buffer Pause Threshold Registers. */
1507		qla4_83xx_wr_reg_indirect(ha,
1508				      QLA83XX_PORT1_RXB_PAUSE_THRS + (i * 0x4),
1509				      QLA83XX_SET_PAUSE_VAL);
1510	}
1511
1512	for (i = 0; i < 4; i++) {
1513		/* Port 0 RxB Traffic Class Max Cell Registers. */
1514		qla4_83xx_wr_reg_indirect(ha,
1515				     QLA83XX_PORT0_RXB_TC_MAX_CELL + (i * 0x4),
1516				     QLA83XX_SET_TC_MAX_CELL_VAL);
1517		/* Port 1 RxB Traffic Class Max Cell Registers. */
1518		qla4_83xx_wr_reg_indirect(ha,
1519				     QLA83XX_PORT1_RXB_TC_MAX_CELL + (i * 0x4),
1520				     QLA83XX_SET_TC_MAX_CELL_VAL);
1521	}
1522
1523	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT2_IFB_PAUSE_THRS,
1524				  QLA83XX_SET_PAUSE_VAL);
1525	qla4_83xx_wr_reg_indirect(ha, QLA83XX_PORT3_IFB_PAUSE_THRS,
1526				  QLA83XX_SET_PAUSE_VAL);
1527
1528	ql4_printk(KERN_INFO, ha, "Disabled pause frames successfully.\n");
1529}
1530
1531/**
1532 * qla4_83xx_eport_init - Initialize EPort.
1533 * @ha: Pointer to host adapter structure.
1534 *
1535 * If EPort hardware is in reset state before disabling pause, there would be
1536 * serious hardware wedging issues. To prevent this perform eport init everytime
1537 * before disabling pause frames.
1538 **/
1539static void qla4_83xx_eport_init(struct scsi_qla_host *ha)
1540{
1541	/* Clear the 8 registers */
1542	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_REG, 0x0);
1543	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT0, 0x0);
1544	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT1, 0x0);
1545	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT2, 0x0);
1546	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_PORT3, 0x0);
1547	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_SRE_SHIM, 0x0);
1548	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_EPG_SHIM, 0x0);
1549	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_ETHER_PCS, 0x0);
1550
1551	/* Write any value to Reset Control register */
1552	qla4_83xx_wr_reg_indirect(ha, QLA83XX_RESET_CONTROL, 0xFF);
1553
1554	ql4_printk(KERN_INFO, ha, "EPORT is out of reset.\n");
1555}
1556
1557void qla4_83xx_disable_pause(struct scsi_qla_host *ha)
1558{
1559	ha->isp_ops->idc_lock(ha);
1560	/* Before disabling pause frames, ensure that eport is not in reset */
1561	qla4_83xx_eport_init(ha);
1562	qla4_83xx_dump_pause_control_regs(ha);
1563	__qla4_83xx_disable_pause(ha);
1564	ha->isp_ops->idc_unlock(ha);
1565}
1566
1567/**
1568 * qla4_83xx_is_detached - Check if we are marked invisible.
1569 * @ha: Pointer to host adapter structure.
1570 **/
1571int qla4_83xx_is_detached(struct scsi_qla_host *ha)
1572{
1573	uint32_t drv_active;
1574
1575	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1576
1577	if (test_bit(AF_INIT_DONE, &ha->flags) &&
1578	    !(drv_active & (1 << ha->func_num))) {
1579		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: drv_active = 0x%X\n",
1580				  __func__, drv_active));
1581		return QLA_SUCCESS;
1582	}
1583
1584	return QLA_ERROR;
1585}
1586