1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
4  *
5  * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
6  *
7  * Based on the original DAC960 driver,
8  * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
9  * Portions Copyright 2002 by Mylex (An IBM Business Unit)
10  *
11  */
12 
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/delay.h>
16 #include <linux/interrupt.h>
17 #include <linux/pci.h>
18 #include <linux/raid_class.h>
19 #include <asm/unaligned.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_host.h>
22 #include <scsi/scsi_device.h>
23 #include <scsi/scsi_cmnd.h>
24 #include <scsi/scsi_tcq.h>
25 #include "myrb.h"
26 
27 static struct raid_template *myrb_raid_template;
28 
29 static void myrb_monitor(struct work_struct *work);
30 static inline void myrb_translate_devstate(void *DeviceState);
31 
myrb_logical_channel(struct Scsi_Host *shost)32 static inline int myrb_logical_channel(struct Scsi_Host *shost)
33 {
34 	return shost->max_channel - 1;
35 }
36 
37 static struct myrb_devstate_name_entry {
38 	enum myrb_devstate state;
39 	const char *name;
40 } myrb_devstate_name_list[] = {
41 	{ MYRB_DEVICE_DEAD, "Dead" },
42 	{ MYRB_DEVICE_WO, "WriteOnly" },
43 	{ MYRB_DEVICE_ONLINE, "Online" },
44 	{ MYRB_DEVICE_CRITICAL, "Critical" },
45 	{ MYRB_DEVICE_STANDBY, "Standby" },
46 	{ MYRB_DEVICE_OFFLINE, "Offline" },
47 };
48 
myrb_devstate_name(enum myrb_devstate state)49 static const char *myrb_devstate_name(enum myrb_devstate state)
50 {
51 	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
52 	int i;
53 
54 	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
55 		if (entry[i].state == state)
56 			return entry[i].name;
57 	}
58 	return "Unknown";
59 }
60 
61 static struct myrb_raidlevel_name_entry {
62 	enum myrb_raidlevel level;
63 	const char *name;
64 } myrb_raidlevel_name_list[] = {
65 	{ MYRB_RAID_LEVEL0, "RAID0" },
66 	{ MYRB_RAID_LEVEL1, "RAID1" },
67 	{ MYRB_RAID_LEVEL3, "RAID3" },
68 	{ MYRB_RAID_LEVEL5, "RAID5" },
69 	{ MYRB_RAID_LEVEL6, "RAID6" },
70 	{ MYRB_RAID_JBOD, "JBOD" },
71 };
72 
myrb_raidlevel_name(enum myrb_raidlevel level)73 static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
74 {
75 	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
76 	int i;
77 
78 	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
79 		if (entry[i].level == level)
80 			return entry[i].name;
81 	}
82 	return NULL;
83 }
84 
85 /**
86  * myrb_create_mempools - allocates auxiliary data structures
87  *
88  * Return: true on success, false otherwise.
89  */
myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)90 static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
91 {
92 	size_t elem_size, elem_align;
93 
94 	elem_align = sizeof(struct myrb_sge);
95 	elem_size = cb->host->sg_tablesize * elem_align;
96 	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
97 				      elem_size, elem_align, 0);
98 	if (cb->sg_pool == NULL) {
99 		shost_printk(KERN_ERR, cb->host,
100 			     "Failed to allocate SG pool\n");
101 		return false;
102 	}
103 
104 	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
105 				       sizeof(struct myrb_dcdb),
106 				       sizeof(unsigned int), 0);
107 	if (!cb->dcdb_pool) {
108 		dma_pool_destroy(cb->sg_pool);
109 		cb->sg_pool = NULL;
110 		shost_printk(KERN_ERR, cb->host,
111 			     "Failed to allocate DCDB pool\n");
112 		return false;
113 	}
114 
115 	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
116 		 "myrb_wq_%d", cb->host->host_no);
117 	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
118 	if (!cb->work_q) {
119 		dma_pool_destroy(cb->dcdb_pool);
120 		cb->dcdb_pool = NULL;
121 		dma_pool_destroy(cb->sg_pool);
122 		cb->sg_pool = NULL;
123 		shost_printk(KERN_ERR, cb->host,
124 			     "Failed to create workqueue\n");
125 		return false;
126 	}
127 
128 	/*
129 	 * Initialize the Monitoring Timer.
130 	 */
131 	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
132 	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
133 
134 	return true;
135 }
136 
137 /**
138  * myrb_destroy_mempools - tears down the memory pools for the controller
139  */
myrb_destroy_mempools(struct myrb_hba *cb)140 static void myrb_destroy_mempools(struct myrb_hba *cb)
141 {
142 	cancel_delayed_work_sync(&cb->monitor_work);
143 	destroy_workqueue(cb->work_q);
144 
145 	dma_pool_destroy(cb->sg_pool);
146 	dma_pool_destroy(cb->dcdb_pool);
147 }
148 
149 /**
150  * myrb_reset_cmd - reset command block
151  */
myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)152 static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
153 {
154 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
155 
156 	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
157 	cmd_blk->status = 0;
158 }
159 
160 /**
161  * myrb_qcmd - queues command block for execution
162  */
myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)163 static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
164 {
165 	void __iomem *base = cb->io_base;
166 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
167 	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
168 
169 	cb->write_cmd_mbox(next_mbox, mbox);
170 	if (cb->prev_cmd_mbox1->words[0] == 0 ||
171 	    cb->prev_cmd_mbox2->words[0] == 0)
172 		cb->get_cmd_mbox(base);
173 	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
174 	cb->prev_cmd_mbox1 = next_mbox;
175 	if (++next_mbox > cb->last_cmd_mbox)
176 		next_mbox = cb->first_cmd_mbox;
177 	cb->next_cmd_mbox = next_mbox;
178 }
179 
180 /**
181  * myrb_exec_cmd - executes command block and waits for completion.
182  *
183  * Return: command status
184  */
myrb_exec_cmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)185 static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
186 		struct myrb_cmdblk *cmd_blk)
187 {
188 	DECLARE_COMPLETION_ONSTACK(cmpl);
189 	unsigned long flags;
190 
191 	cmd_blk->completion = &cmpl;
192 
193 	spin_lock_irqsave(&cb->queue_lock, flags);
194 	cb->qcmd(cb, cmd_blk);
195 	spin_unlock_irqrestore(&cb->queue_lock, flags);
196 
197 	WARN_ON(in_interrupt());
198 	wait_for_completion(&cmpl);
199 	return cmd_blk->status;
200 }
201 
202 /**
203  * myrb_exec_type3 - executes a type 3 command and waits for completion.
204  *
205  * Return: command status
206  */
myrb_exec_type3(struct myrb_hba *cb, enum myrb_cmd_opcode op, dma_addr_t addr)207 static unsigned short myrb_exec_type3(struct myrb_hba *cb,
208 		enum myrb_cmd_opcode op, dma_addr_t addr)
209 {
210 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
211 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
212 	unsigned short status;
213 
214 	mutex_lock(&cb->dcmd_mutex);
215 	myrb_reset_cmd(cmd_blk);
216 	mbox->type3.id = MYRB_DCMD_TAG;
217 	mbox->type3.opcode = op;
218 	mbox->type3.addr = addr;
219 	status = myrb_exec_cmd(cb, cmd_blk);
220 	mutex_unlock(&cb->dcmd_mutex);
221 	return status;
222 }
223 
224 /**
225  * myrb_exec_type3D - executes a type 3D command and waits for completion.
226  *
227  * Return: command status
228  */
myrb_exec_type3D(struct myrb_hba *cb, enum myrb_cmd_opcode op, struct scsi_device *sdev, struct myrb_pdev_state *pdev_info)229 static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
230 		enum myrb_cmd_opcode op, struct scsi_device *sdev,
231 		struct myrb_pdev_state *pdev_info)
232 {
233 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
234 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
235 	unsigned short status;
236 	dma_addr_t pdev_info_addr;
237 
238 	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
239 					sizeof(struct myrb_pdev_state),
240 					DMA_FROM_DEVICE);
241 	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
242 		return MYRB_STATUS_SUBSYS_FAILED;
243 
244 	mutex_lock(&cb->dcmd_mutex);
245 	myrb_reset_cmd(cmd_blk);
246 	mbox->type3D.id = MYRB_DCMD_TAG;
247 	mbox->type3D.opcode = op;
248 	mbox->type3D.channel = sdev->channel;
249 	mbox->type3D.target = sdev->id;
250 	mbox->type3D.addr = pdev_info_addr;
251 	status = myrb_exec_cmd(cb, cmd_blk);
252 	mutex_unlock(&cb->dcmd_mutex);
253 	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
254 			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
255 	if (status == MYRB_STATUS_SUCCESS &&
256 	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
257 		myrb_translate_devstate(pdev_info);
258 
259 	return status;
260 }
261 
262 static char *myrb_event_msg[] = {
263 	"killed because write recovery failed",
264 	"killed because of SCSI bus reset failure",
265 	"killed because of double check condition",
266 	"killed because it was removed",
267 	"killed because of gross error on SCSI chip",
268 	"killed because of bad tag returned from drive",
269 	"killed because of timeout on SCSI command",
270 	"killed because of reset SCSI command issued from system",
271 	"killed because busy or parity error count exceeded limit",
272 	"killed because of 'kill drive' command from system",
273 	"killed because of selection timeout",
274 	"killed due to SCSI phase sequence error",
275 	"killed due to unknown status",
276 };
277 
278 /**
279  * myrb_get_event - get event log from HBA
280  * @cb: pointer to the hba structure
281  * @event: number of the event
282  *
283  * Execute a type 3E command and logs the event message
284  */
myrb_get_event(struct myrb_hba *cb, unsigned int event)285 static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
286 {
287 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
288 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
289 	struct myrb_log_entry *ev_buf;
290 	dma_addr_t ev_addr;
291 	unsigned short status;
292 
293 	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
294 				    sizeof(struct myrb_log_entry),
295 				    &ev_addr, GFP_KERNEL);
296 	if (!ev_buf)
297 		return;
298 
299 	myrb_reset_cmd(cmd_blk);
300 	mbox->type3E.id = MYRB_MCMD_TAG;
301 	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
302 	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
303 	mbox->type3E.opqual = 1;
304 	mbox->type3E.ev_seq = event;
305 	mbox->type3E.addr = ev_addr;
306 	status = myrb_exec_cmd(cb, cmd_blk);
307 	if (status != MYRB_STATUS_SUCCESS)
308 		shost_printk(KERN_INFO, cb->host,
309 			     "Failed to get event log %d, status %04x\n",
310 			     event, status);
311 
312 	else if (ev_buf->seq_num == event) {
313 		struct scsi_sense_hdr sshdr;
314 
315 		memset(&sshdr, 0, sizeof(sshdr));
316 		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
317 
318 		if (sshdr.sense_key == VENDOR_SPECIFIC &&
319 		    sshdr.asc == 0x80 &&
320 		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
321 			shost_printk(KERN_CRIT, cb->host,
322 				     "Physical drive %d:%d: %s\n",
323 				     ev_buf->channel, ev_buf->target,
324 				     myrb_event_msg[sshdr.ascq]);
325 		else
326 			shost_printk(KERN_CRIT, cb->host,
327 				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
328 				     ev_buf->channel, ev_buf->target,
329 				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
330 	}
331 
332 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
333 			  ev_buf, ev_addr);
334 }
335 
336 /**
337  * myrb_get_errtable - retrieves the error table from the controller
338  *
339  * Executes a type 3 command and logs the error table from the controller.
340  */
myrb_get_errtable(struct myrb_hba *cb)341 static void myrb_get_errtable(struct myrb_hba *cb)
342 {
343 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
344 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
345 	unsigned short status;
346 	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
347 
348 	memcpy(&old_table, cb->err_table, sizeof(old_table));
349 
350 	myrb_reset_cmd(cmd_blk);
351 	mbox->type3.id = MYRB_MCMD_TAG;
352 	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
353 	mbox->type3.addr = cb->err_table_addr;
354 	status = myrb_exec_cmd(cb, cmd_blk);
355 	if (status == MYRB_STATUS_SUCCESS) {
356 		struct myrb_error_entry *table = cb->err_table;
357 		struct myrb_error_entry *new, *old;
358 		size_t err_table_offset;
359 		struct scsi_device *sdev;
360 
361 		shost_for_each_device(sdev, cb->host) {
362 			if (sdev->channel >= myrb_logical_channel(cb->host))
363 				continue;
364 			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
365 				+ sdev->id;
366 			new = table + err_table_offset;
367 			old = &old_table[err_table_offset];
368 			if (new->parity_err == old->parity_err &&
369 			    new->soft_err == old->soft_err &&
370 			    new->hard_err == old->hard_err &&
371 			    new->misc_err == old->misc_err)
372 				continue;
373 			sdev_printk(KERN_CRIT, sdev,
374 				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
375 				    new->parity_err, new->soft_err,
376 				    new->hard_err, new->misc_err);
377 		}
378 	}
379 }
380 
381 /**
382  * myrb_get_ldev_info - retrieves the logical device table from the controller
383  *
384  * Executes a type 3 command and updates the logical device table.
385  *
386  * Return: command status
387  */
myrb_get_ldev_info(struct myrb_hba *cb)388 static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
389 {
390 	unsigned short status;
391 	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
392 	struct Scsi_Host *shost = cb->host;
393 
394 	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
395 				 cb->ldev_info_addr);
396 	if (status != MYRB_STATUS_SUCCESS)
397 		return status;
398 
399 	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
400 		struct myrb_ldev_info *old = NULL;
401 		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
402 		struct scsi_device *sdev;
403 
404 		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
405 					  ldev_num, 0);
406 		if (!sdev) {
407 			if (new->state == MYRB_DEVICE_OFFLINE)
408 				continue;
409 			shost_printk(KERN_INFO, shost,
410 				     "Adding Logical Drive %d in state %s\n",
411 				     ldev_num, myrb_devstate_name(new->state));
412 			scsi_add_device(shost, myrb_logical_channel(shost),
413 					ldev_num, 0);
414 			continue;
415 		}
416 		old = sdev->hostdata;
417 		if (new->state != old->state)
418 			shost_printk(KERN_INFO, shost,
419 				     "Logical Drive %d is now %s\n",
420 				     ldev_num, myrb_devstate_name(new->state));
421 		if (new->wb_enabled != old->wb_enabled)
422 			sdev_printk(KERN_INFO, sdev,
423 				    "Logical Drive is now WRITE %s\n",
424 				    (new->wb_enabled ? "BACK" : "THRU"));
425 		memcpy(old, new, sizeof(*new));
426 		scsi_device_put(sdev);
427 	}
428 	return status;
429 }
430 
431 /**
432  * myrb_get_rbld_progress - get rebuild progress information
433  *
434  * Executes a type 3 command and returns the rebuild progress
435  * information.
436  *
437  * Return: command status
438  */
myrb_get_rbld_progress(struct myrb_hba *cb, struct myrb_rbld_progress *rbld)439 static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
440 		struct myrb_rbld_progress *rbld)
441 {
442 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
443 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
444 	struct myrb_rbld_progress *rbld_buf;
445 	dma_addr_t rbld_addr;
446 	unsigned short status;
447 
448 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
449 				      sizeof(struct myrb_rbld_progress),
450 				      &rbld_addr, GFP_KERNEL);
451 	if (!rbld_buf)
452 		return MYRB_STATUS_RBLD_NOT_CHECKED;
453 
454 	myrb_reset_cmd(cmd_blk);
455 	mbox->type3.id = MYRB_MCMD_TAG;
456 	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
457 	mbox->type3.addr = rbld_addr;
458 	status = myrb_exec_cmd(cb, cmd_blk);
459 	if (rbld)
460 		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
461 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
462 			  rbld_buf, rbld_addr);
463 	return status;
464 }
465 
466 /**
467  * myrb_update_rbld_progress - updates the rebuild status
468  *
469  * Updates the rebuild status for the attached logical devices.
470  *
471  */
myrb_update_rbld_progress(struct myrb_hba *cb)472 static void myrb_update_rbld_progress(struct myrb_hba *cb)
473 {
474 	struct myrb_rbld_progress rbld_buf;
475 	unsigned short status;
476 
477 	status = myrb_get_rbld_progress(cb, &rbld_buf);
478 	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
479 	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
480 		status = MYRB_STATUS_RBLD_SUCCESS;
481 	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
482 		unsigned int blocks_done =
483 			rbld_buf.ldev_size - rbld_buf.blocks_left;
484 		struct scsi_device *sdev;
485 
486 		sdev = scsi_device_lookup(cb->host,
487 					  myrb_logical_channel(cb->host),
488 					  rbld_buf.ldev_num, 0);
489 		if (!sdev)
490 			return;
491 
492 		switch (status) {
493 		case MYRB_STATUS_SUCCESS:
494 			sdev_printk(KERN_INFO, sdev,
495 				    "Rebuild in Progress, %d%% completed\n",
496 				    (100 * (blocks_done >> 7))
497 				    / (rbld_buf.ldev_size >> 7));
498 			break;
499 		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
500 			sdev_printk(KERN_INFO, sdev,
501 				    "Rebuild Failed due to Logical Drive Failure\n");
502 			break;
503 		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
504 			sdev_printk(KERN_INFO, sdev,
505 				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
506 			break;
507 		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
508 			sdev_printk(KERN_INFO, sdev,
509 				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
510 			break;
511 		case MYRB_STATUS_RBLD_SUCCESS:
512 			sdev_printk(KERN_INFO, sdev,
513 				    "Rebuild Completed Successfully\n");
514 			break;
515 		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
516 			sdev_printk(KERN_INFO, sdev,
517 				     "Rebuild Successfully Terminated\n");
518 			break;
519 		default:
520 			break;
521 		}
522 		scsi_device_put(sdev);
523 	}
524 	cb->last_rbld_status = status;
525 }
526 
527 /**
528  * myrb_get_cc_progress - retrieve the rebuild status
529  *
530  * Execute a type 3 Command and fetch the rebuild / consistency check
531  * status.
532  */
myrb_get_cc_progress(struct myrb_hba *cb)533 static void myrb_get_cc_progress(struct myrb_hba *cb)
534 {
535 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
536 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
537 	struct myrb_rbld_progress *rbld_buf;
538 	dma_addr_t rbld_addr;
539 	unsigned short status;
540 
541 	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
542 				      sizeof(struct myrb_rbld_progress),
543 				      &rbld_addr, GFP_KERNEL);
544 	if (!rbld_buf) {
545 		cb->need_cc_status = true;
546 		return;
547 	}
548 	myrb_reset_cmd(cmd_blk);
549 	mbox->type3.id = MYRB_MCMD_TAG;
550 	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
551 	mbox->type3.addr = rbld_addr;
552 	status = myrb_exec_cmd(cb, cmd_blk);
553 	if (status == MYRB_STATUS_SUCCESS) {
554 		unsigned int ldev_num = rbld_buf->ldev_num;
555 		unsigned int ldev_size = rbld_buf->ldev_size;
556 		unsigned int blocks_done =
557 			ldev_size - rbld_buf->blocks_left;
558 		struct scsi_device *sdev;
559 
560 		sdev = scsi_device_lookup(cb->host,
561 					  myrb_logical_channel(cb->host),
562 					  ldev_num, 0);
563 		if (sdev) {
564 			sdev_printk(KERN_INFO, sdev,
565 				    "Consistency Check in Progress: %d%% completed\n",
566 				    (100 * (blocks_done >> 7))
567 				    / (ldev_size >> 7));
568 			scsi_device_put(sdev);
569 		}
570 	}
571 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
572 			  rbld_buf, rbld_addr);
573 }
574 
575 /**
576  * myrb_bgi_control - updates background initialisation status
577  *
578  * Executes a type 3B command and updates the background initialisation status
579  */
myrb_bgi_control(struct myrb_hba *cb)580 static void myrb_bgi_control(struct myrb_hba *cb)
581 {
582 	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
583 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
584 	struct myrb_bgi_status *bgi, *last_bgi;
585 	dma_addr_t bgi_addr;
586 	struct scsi_device *sdev = NULL;
587 	unsigned short status;
588 
589 	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
590 				 &bgi_addr, GFP_KERNEL);
591 	if (!bgi) {
592 		shost_printk(KERN_ERR, cb->host,
593 			     "Failed to allocate bgi memory\n");
594 		return;
595 	}
596 	myrb_reset_cmd(cmd_blk);
597 	mbox->type3B.id = MYRB_DCMD_TAG;
598 	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
599 	mbox->type3B.optype = 0x20;
600 	mbox->type3B.addr = bgi_addr;
601 	status = myrb_exec_cmd(cb, cmd_blk);
602 	last_bgi = &cb->bgi_status;
603 	sdev = scsi_device_lookup(cb->host,
604 				  myrb_logical_channel(cb->host),
605 				  bgi->ldev_num, 0);
606 	switch (status) {
607 	case MYRB_STATUS_SUCCESS:
608 		switch (bgi->status) {
609 		case MYRB_BGI_INVALID:
610 			break;
611 		case MYRB_BGI_STARTED:
612 			if (!sdev)
613 				break;
614 			sdev_printk(KERN_INFO, sdev,
615 				    "Background Initialization Started\n");
616 			break;
617 		case MYRB_BGI_INPROGRESS:
618 			if (!sdev)
619 				break;
620 			if (bgi->blocks_done == last_bgi->blocks_done &&
621 			    bgi->ldev_num == last_bgi->ldev_num)
622 				break;
623 			sdev_printk(KERN_INFO, sdev,
624 				 "Background Initialization in Progress: %d%% completed\n",
625 				 (100 * (bgi->blocks_done >> 7))
626 				 / (bgi->ldev_size >> 7));
627 			break;
628 		case MYRB_BGI_SUSPENDED:
629 			if (!sdev)
630 				break;
631 			sdev_printk(KERN_INFO, sdev,
632 				    "Background Initialization Suspended\n");
633 			break;
634 		case MYRB_BGI_CANCELLED:
635 			if (!sdev)
636 				break;
637 			sdev_printk(KERN_INFO, sdev,
638 				    "Background Initialization Cancelled\n");
639 			break;
640 		}
641 		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
642 		break;
643 	case MYRB_STATUS_BGI_SUCCESS:
644 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
645 			sdev_printk(KERN_INFO, sdev,
646 				    "Background Initialization Completed Successfully\n");
647 		cb->bgi_status.status = MYRB_BGI_INVALID;
648 		break;
649 	case MYRB_STATUS_BGI_ABORTED:
650 		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
651 			sdev_printk(KERN_INFO, sdev,
652 				    "Background Initialization Aborted\n");
653 		fallthrough;
654 	case MYRB_STATUS_NO_BGI_INPROGRESS:
655 		cb->bgi_status.status = MYRB_BGI_INVALID;
656 		break;
657 	}
658 	if (sdev)
659 		scsi_device_put(sdev);
660 	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
661 			  bgi, bgi_addr);
662 }
663 
664 /**
665  * myrb_hba_enquiry - updates the controller status
666  *
667  * Executes a DAC_V1_Enquiry command and updates the controller status.
668  *
669  * Return: command status
670  */
myrb_hba_enquiry(struct myrb_hba *cb)671 static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
672 {
673 	struct myrb_enquiry old, *new;
674 	unsigned short status;
675 
676 	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
677 
678 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
679 	if (status != MYRB_STATUS_SUCCESS)
680 		return status;
681 
682 	new = cb->enquiry;
683 	if (new->ldev_count > old.ldev_count) {
684 		int ldev_num = old.ldev_count - 1;
685 
686 		while (++ldev_num < new->ldev_count)
687 			shost_printk(KERN_CRIT, cb->host,
688 				     "Logical Drive %d Now Exists\n",
689 				     ldev_num);
690 	}
691 	if (new->ldev_count < old.ldev_count) {
692 		int ldev_num = new->ldev_count - 1;
693 
694 		while (++ldev_num < old.ldev_count)
695 			shost_printk(KERN_CRIT, cb->host,
696 				     "Logical Drive %d No Longer Exists\n",
697 				     ldev_num);
698 	}
699 	if (new->status.deferred != old.status.deferred)
700 		shost_printk(KERN_CRIT, cb->host,
701 			     "Deferred Write Error Flag is now %s\n",
702 			     (new->status.deferred ? "TRUE" : "FALSE"));
703 	if (new->ev_seq != old.ev_seq) {
704 		cb->new_ev_seq = new->ev_seq;
705 		cb->need_err_info = true;
706 		shost_printk(KERN_INFO, cb->host,
707 			     "Event log %d/%d (%d/%d) available\n",
708 			     cb->old_ev_seq, cb->new_ev_seq,
709 			     old.ev_seq, new->ev_seq);
710 	}
711 	if ((new->ldev_critical > 0 &&
712 	     new->ldev_critical != old.ldev_critical) ||
713 	    (new->ldev_offline > 0 &&
714 	     new->ldev_offline != old.ldev_offline) ||
715 	    (new->ldev_count != old.ldev_count)) {
716 		shost_printk(KERN_INFO, cb->host,
717 			     "Logical drive count changed (%d/%d/%d)\n",
718 			     new->ldev_critical,
719 			     new->ldev_offline,
720 			     new->ldev_count);
721 		cb->need_ldev_info = true;
722 	}
723 	if (new->pdev_dead > 0 ||
724 	    new->pdev_dead != old.pdev_dead ||
725 	    time_after_eq(jiffies, cb->secondary_monitor_time
726 			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
727 		cb->need_bgi_status = cb->bgi_status_supported;
728 		cb->secondary_monitor_time = jiffies;
729 	}
730 	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
731 	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
732 	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
733 	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
734 		cb->need_rbld = true;
735 		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
736 	}
737 	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
738 		switch (new->rbld) {
739 		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
740 			shost_printk(KERN_INFO, cb->host,
741 				     "Consistency Check Completed Successfully\n");
742 			break;
743 		case MYRB_STDBY_RBLD_IN_PROGRESS:
744 		case MYRB_BG_RBLD_IN_PROGRESS:
745 			break;
746 		case MYRB_BG_CHECK_IN_PROGRESS:
747 			cb->need_cc_status = true;
748 			break;
749 		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
750 			shost_printk(KERN_INFO, cb->host,
751 				     "Consistency Check Completed with Error\n");
752 			break;
753 		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
754 			shost_printk(KERN_INFO, cb->host,
755 				     "Consistency Check Failed - Physical Device Failed\n");
756 			break;
757 		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
758 			shost_printk(KERN_INFO, cb->host,
759 				     "Consistency Check Failed - Logical Drive Failed\n");
760 			break;
761 		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
762 			shost_printk(KERN_INFO, cb->host,
763 				     "Consistency Check Failed - Other Causes\n");
764 			break;
765 		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
766 			shost_printk(KERN_INFO, cb->host,
767 				     "Consistency Check Successfully Terminated\n");
768 			break;
769 		}
770 	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
771 		cb->need_cc_status = true;
772 
773 	return MYRB_STATUS_SUCCESS;
774 }
775 
776 /**
777  * myrb_set_pdev_state - sets the device state for a physical device
778  *
779  * Return: command status
780  */
myrb_set_pdev_state(struct myrb_hba *cb, struct scsi_device *sdev, enum myrb_devstate state)781 static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
782 		struct scsi_device *sdev, enum myrb_devstate state)
783 {
784 	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
785 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
786 	unsigned short status;
787 
788 	mutex_lock(&cb->dcmd_mutex);
789 	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
790 	mbox->type3D.id = MYRB_DCMD_TAG;
791 	mbox->type3D.channel = sdev->channel;
792 	mbox->type3D.target = sdev->id;
793 	mbox->type3D.state = state & 0x1F;
794 	status = myrb_exec_cmd(cb, cmd_blk);
795 	mutex_unlock(&cb->dcmd_mutex);
796 
797 	return status;
798 }
799 
800 /**
801  * myrb_enable_mmio - enables the Memory Mailbox Interface
802  *
803  * PD and P controller types have no memory mailbox, but still need the
804  * other dma mapped memory.
805  *
806  * Return: true on success, false otherwise.
807  */
myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)808 static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
809 {
810 	void __iomem *base = cb->io_base;
811 	struct pci_dev *pdev = cb->pdev;
812 	size_t err_table_size;
813 	size_t ldev_info_size;
814 	union myrb_cmd_mbox *cmd_mbox_mem;
815 	struct myrb_stat_mbox *stat_mbox_mem;
816 	union myrb_cmd_mbox mbox;
817 	unsigned short status;
818 
819 	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
820 
821 	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
822 		dev_err(&pdev->dev, "DMA mask out of range\n");
823 		return false;
824 	}
825 
826 	cb->enquiry = dma_alloc_coherent(&pdev->dev,
827 					 sizeof(struct myrb_enquiry),
828 					 &cb->enquiry_addr, GFP_KERNEL);
829 	if (!cb->enquiry)
830 		return false;
831 
832 	err_table_size = sizeof(struct myrb_error_entry) *
833 		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
834 	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
835 					   &cb->err_table_addr, GFP_KERNEL);
836 	if (!cb->err_table)
837 		return false;
838 
839 	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
840 	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
841 					       &cb->ldev_info_addr, GFP_KERNEL);
842 	if (!cb->ldev_info_buf)
843 		return false;
844 
845 	/*
846 	 * Skip mailbox initialisation for PD and P Controllers
847 	 */
848 	if (!mmio_init_fn)
849 		return true;
850 
851 	/* These are the base addresses for the command memory mailbox array */
852 	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
853 	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
854 						cb->cmd_mbox_size,
855 						&cb->cmd_mbox_addr,
856 						GFP_KERNEL);
857 	if (!cb->first_cmd_mbox)
858 		return false;
859 
860 	cmd_mbox_mem = cb->first_cmd_mbox;
861 	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
862 	cb->last_cmd_mbox = cmd_mbox_mem;
863 	cb->next_cmd_mbox = cb->first_cmd_mbox;
864 	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
865 	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
866 
867 	/* These are the base addresses for the status memory mailbox array */
868 	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
869 	    sizeof(struct myrb_stat_mbox);
870 	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
871 						 cb->stat_mbox_size,
872 						 &cb->stat_mbox_addr,
873 						 GFP_KERNEL);
874 	if (!cb->first_stat_mbox)
875 		return false;
876 
877 	stat_mbox_mem = cb->first_stat_mbox;
878 	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
879 	cb->last_stat_mbox = stat_mbox_mem;
880 	cb->next_stat_mbox = cb->first_stat_mbox;
881 
882 	/* Enable the Memory Mailbox Interface. */
883 	cb->dual_mode_interface = true;
884 	mbox.typeX.opcode = 0x2B;
885 	mbox.typeX.id = 0;
886 	mbox.typeX.opcode2 = 0x14;
887 	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
888 	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
889 
890 	status = mmio_init_fn(pdev, base, &mbox);
891 	if (status != MYRB_STATUS_SUCCESS) {
892 		cb->dual_mode_interface = false;
893 		mbox.typeX.opcode2 = 0x10;
894 		status = mmio_init_fn(pdev, base, &mbox);
895 		if (status != MYRB_STATUS_SUCCESS) {
896 			dev_err(&pdev->dev,
897 				"Failed to enable mailbox, statux %02X\n",
898 				status);
899 			return false;
900 		}
901 	}
902 	return true;
903 }
904 
905 /**
906  * myrb_get_hba_config - reads the configuration information
907  *
908  * Reads the configuration information from the controller and
909  * initializes the controller structure.
910  *
911  * Return: 0 on success, errno otherwise
912  */
myrb_get_hba_config(struct myrb_hba *cb)913 static int myrb_get_hba_config(struct myrb_hba *cb)
914 {
915 	struct myrb_enquiry2 *enquiry2;
916 	dma_addr_t enquiry2_addr;
917 	struct myrb_config2 *config2;
918 	dma_addr_t config2_addr;
919 	struct Scsi_Host *shost = cb->host;
920 	struct pci_dev *pdev = cb->pdev;
921 	int pchan_max = 0, pchan_cur = 0;
922 	unsigned short status;
923 	int ret = -ENODEV, memsize = 0;
924 
925 	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
926 				      &enquiry2_addr, GFP_KERNEL);
927 	if (!enquiry2) {
928 		shost_printk(KERN_ERR, cb->host,
929 			     "Failed to allocate V1 enquiry2 memory\n");
930 		return -ENOMEM;
931 	}
932 	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
933 				     &config2_addr, GFP_KERNEL);
934 	if (!config2) {
935 		shost_printk(KERN_ERR, cb->host,
936 			     "Failed to allocate V1 config2 memory\n");
937 		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
938 				  enquiry2, enquiry2_addr);
939 		return -ENOMEM;
940 	}
941 	mutex_lock(&cb->dma_mutex);
942 	status = myrb_hba_enquiry(cb);
943 	mutex_unlock(&cb->dma_mutex);
944 	if (status != MYRB_STATUS_SUCCESS) {
945 		shost_printk(KERN_WARNING, cb->host,
946 			     "Failed it issue V1 Enquiry\n");
947 		goto out_free;
948 	}
949 
950 	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
951 	if (status != MYRB_STATUS_SUCCESS) {
952 		shost_printk(KERN_WARNING, cb->host,
953 			     "Failed to issue V1 Enquiry2\n");
954 		goto out_free;
955 	}
956 
957 	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
958 	if (status != MYRB_STATUS_SUCCESS) {
959 		shost_printk(KERN_WARNING, cb->host,
960 			     "Failed to issue ReadConfig2\n");
961 		goto out_free;
962 	}
963 
964 	status = myrb_get_ldev_info(cb);
965 	if (status != MYRB_STATUS_SUCCESS) {
966 		shost_printk(KERN_WARNING, cb->host,
967 			     "Failed to get logical drive information\n");
968 		goto out_free;
969 	}
970 
971 	/*
972 	 * Initialize the Controller Model Name and Full Model Name fields.
973 	 */
974 	switch (enquiry2->hw.sub_model) {
975 	case DAC960_V1_P_PD_PU:
976 		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
977 			strcpy(cb->model_name, "DAC960PU");
978 		else
979 			strcpy(cb->model_name, "DAC960PD");
980 		break;
981 	case DAC960_V1_PL:
982 		strcpy(cb->model_name, "DAC960PL");
983 		break;
984 	case DAC960_V1_PG:
985 		strcpy(cb->model_name, "DAC960PG");
986 		break;
987 	case DAC960_V1_PJ:
988 		strcpy(cb->model_name, "DAC960PJ");
989 		break;
990 	case DAC960_V1_PR:
991 		strcpy(cb->model_name, "DAC960PR");
992 		break;
993 	case DAC960_V1_PT:
994 		strcpy(cb->model_name, "DAC960PT");
995 		break;
996 	case DAC960_V1_PTL0:
997 		strcpy(cb->model_name, "DAC960PTL0");
998 		break;
999 	case DAC960_V1_PRL:
1000 		strcpy(cb->model_name, "DAC960PRL");
1001 		break;
1002 	case DAC960_V1_PTL1:
1003 		strcpy(cb->model_name, "DAC960PTL1");
1004 		break;
1005 	case DAC960_V1_1164P:
1006 		strcpy(cb->model_name, "eXtremeRAID 1100");
1007 		break;
1008 	default:
1009 		shost_printk(KERN_WARNING, cb->host,
1010 			     "Unknown Model %X\n",
1011 			     enquiry2->hw.sub_model);
1012 		goto out;
1013 	}
1014 	/*
1015 	 * Initialize the Controller Firmware Version field and verify that it
1016 	 * is a supported firmware version.
1017 	 * The supported firmware versions are:
1018 	 *
1019 	 * DAC1164P		    5.06 and above
1020 	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1021 	 * DAC960PU/PD/PL	    3.51 and above
1022 	 * DAC960PU/PD/PL/P	    2.73 and above
1023 	 */
1024 #if defined(CONFIG_ALPHA)
1025 	/*
1026 	 * DEC Alpha machines were often equipped with DAC960 cards that were
1027 	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028 	 * the last custom FW revision to be released by DEC for these older
1029 	 * controllers, appears to work quite well with this driver.
1030 	 *
1031 	 * Cards tested successfully were several versions each of the PD and
1032 	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033 	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034 	 * back of the board, of:
1035 	 *
1036 	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1037 	 *         or D040349 (3-channel)
1038 	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1039 	 *         or D040397 (3-channel)
1040 	 */
1041 # define FIRMWARE_27X	"2.70"
1042 #else
1043 # define FIRMWARE_27X	"2.73"
1044 #endif
1045 
1046 	if (enquiry2->fw.major_version == 0) {
1047 		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048 		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049 		enquiry2->fw.firmware_type = '0';
1050 		enquiry2->fw.turn_id = 0;
1051 	}
1052 	snprintf(cb->fw_version, sizeof(cb->fw_version),
1053 		"%u.%02u-%c-%02u",
1054 		enquiry2->fw.major_version,
1055 		enquiry2->fw.minor_version,
1056 		enquiry2->fw.firmware_type,
1057 		enquiry2->fw.turn_id);
1058 	if (!((enquiry2->fw.major_version == 5 &&
1059 	       enquiry2->fw.minor_version >= 6) ||
1060 	      (enquiry2->fw.major_version == 4 &&
1061 	       enquiry2->fw.minor_version >= 6) ||
1062 	      (enquiry2->fw.major_version == 3 &&
1063 	       enquiry2->fw.minor_version >= 51) ||
1064 	      (enquiry2->fw.major_version == 2 &&
1065 	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066 		shost_printk(KERN_WARNING, cb->host,
1067 			"Firmware Version '%s' unsupported\n",
1068 			cb->fw_version);
1069 		goto out;
1070 	}
1071 	/*
1072 	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073 	 * Enclosure Management Enabled fields.
1074 	 */
1075 	switch (enquiry2->hw.model) {
1076 	case MYRB_5_CHANNEL_BOARD:
1077 		pchan_max = 5;
1078 		break;
1079 	case MYRB_3_CHANNEL_BOARD:
1080 	case MYRB_3_CHANNEL_ASIC_DAC:
1081 		pchan_max = 3;
1082 		break;
1083 	case MYRB_2_CHANNEL_BOARD:
1084 		pchan_max = 2;
1085 		break;
1086 	default:
1087 		pchan_max = enquiry2->cfg_chan;
1088 		break;
1089 	}
1090 	pchan_cur = enquiry2->cur_chan;
1091 	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092 		cb->bus_width = 32;
1093 	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1094 		cb->bus_width = 16;
1095 	else
1096 		cb->bus_width = 8;
1097 	cb->ldev_block_size = enquiry2->ldev_block_size;
1098 	shost->max_channel = pchan_cur;
1099 	shost->max_id = enquiry2->max_targets;
1100 	memsize = enquiry2->mem_size >> 20;
1101 	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102 	/*
1103 	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1104 	 * Logical Drive Count, Maximum Blocks per Command, Controller
1105 	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106 	 * The Driver Queue Depth must be at most one less than the
1107 	 * Controller Queue Depth to allow for an automatic drive
1108 	 * rebuild operation.
1109 	 */
1110 	shost->can_queue = cb->enquiry->max_tcq;
1111 	if (shost->can_queue < 3)
1112 		shost->can_queue = enquiry2->max_cmds;
1113 	if (shost->can_queue < 3)
1114 		/* Play safe and disable TCQ */
1115 		shost->can_queue = 1;
1116 
1117 	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118 		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119 	shost->max_sectors = enquiry2->max_sectors;
1120 	shost->sg_tablesize = enquiry2->max_sge;
1121 	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122 		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123 	/*
1124 	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125 	 */
1126 	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127 		>> (10 - MYRB_BLKSIZE_BITS);
1128 	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129 		>> (10 - MYRB_BLKSIZE_BITS);
1130 	/* Assume 255/63 translation */
1131 	cb->ldev_geom_heads = 255;
1132 	cb->ldev_geom_sectors = 63;
1133 	if (config2->drive_geometry) {
1134 		cb->ldev_geom_heads = 128;
1135 		cb->ldev_geom_sectors = 32;
1136 	}
1137 
1138 	/*
1139 	 * Initialize the Background Initialization Status.
1140 	 */
1141 	if ((cb->fw_version[0] == '4' &&
1142 	     strcmp(cb->fw_version, "4.08") >= 0) ||
1143 	    (cb->fw_version[0] == '5' &&
1144 	     strcmp(cb->fw_version, "5.08") >= 0)) {
1145 		cb->bgi_status_supported = true;
1146 		myrb_bgi_control(cb);
1147 	}
1148 	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1149 	ret = 0;
1150 
1151 out:
1152 	shost_printk(KERN_INFO, cb->host,
1153 		"Configuring %s PCI RAID Controller\n", cb->model_name);
1154 	shost_printk(KERN_INFO, cb->host,
1155 		"  Firmware Version: %s, Memory Size: %dMB\n",
1156 		cb->fw_version, memsize);
1157 	if (cb->io_addr == 0)
1158 		shost_printk(KERN_INFO, cb->host,
1159 			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160 			(unsigned long)cb->pci_addr, cb->irq);
1161 	else
1162 		shost_printk(KERN_INFO, cb->host,
1163 			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164 			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165 			cb->irq);
1166 	shost_printk(KERN_INFO, cb->host,
1167 		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168 		cb->host->can_queue, cb->host->max_sectors);
1169 	shost_printk(KERN_INFO, cb->host,
1170 		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171 		     cb->host->can_queue, cb->host->sg_tablesize,
1172 		     MYRB_SCATTER_GATHER_LIMIT);
1173 	shost_printk(KERN_INFO, cb->host,
1174 		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175 		     cb->stripe_size, cb->segment_size,
1176 		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177 		     cb->safte_enabled ?
1178 		     "  SAF-TE Enclosure Management Enabled" : "");
1179 	shost_printk(KERN_INFO, cb->host,
1180 		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1181 		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182 		     cb->host->max_id);
1183 
1184 	shost_printk(KERN_INFO, cb->host,
1185 		     "  Logical: 1/1 channels, %d/%d disks\n",
1186 		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187 
1188 out_free:
1189 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190 			  enquiry2, enquiry2_addr);
1191 	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192 			  config2, config2_addr);
1193 
1194 	return ret;
1195 }
1196 
1197 /**
1198  * myrb_unmap - unmaps controller structures
1199  */
myrb_unmap(struct myrb_hba *cb)1200 static void myrb_unmap(struct myrb_hba *cb)
1201 {
1202 	if (cb->ldev_info_buf) {
1203 		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204 			MYRB_MAX_LDEVS;
1205 		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206 				  cb->ldev_info_buf, cb->ldev_info_addr);
1207 		cb->ldev_info_buf = NULL;
1208 	}
1209 	if (cb->err_table) {
1210 		size_t err_table_size = sizeof(struct myrb_error_entry) *
1211 			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212 		dma_free_coherent(&cb->pdev->dev, err_table_size,
1213 				  cb->err_table, cb->err_table_addr);
1214 		cb->err_table = NULL;
1215 	}
1216 	if (cb->enquiry) {
1217 		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218 				  cb->enquiry, cb->enquiry_addr);
1219 		cb->enquiry = NULL;
1220 	}
1221 	if (cb->first_stat_mbox) {
1222 		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223 				  cb->first_stat_mbox, cb->stat_mbox_addr);
1224 		cb->first_stat_mbox = NULL;
1225 	}
1226 	if (cb->first_cmd_mbox) {
1227 		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228 				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229 		cb->first_cmd_mbox = NULL;
1230 	}
1231 }
1232 
1233 /**
1234  * myrb_cleanup - cleanup controller structures
1235  */
myrb_cleanup(struct myrb_hba *cb)1236 static void myrb_cleanup(struct myrb_hba *cb)
1237 {
1238 	struct pci_dev *pdev = cb->pdev;
1239 
1240 	/* Free the memory mailbox, status, and related structures */
1241 	myrb_unmap(cb);
1242 
1243 	if (cb->mmio_base) {
1244 		if (cb->disable_intr)
1245 			cb->disable_intr(cb->io_base);
1246 		iounmap(cb->mmio_base);
1247 	}
1248 	if (cb->irq)
1249 		free_irq(cb->irq, cb);
1250 	if (cb->io_addr)
1251 		release_region(cb->io_addr, 0x80);
1252 	pci_set_drvdata(pdev, NULL);
1253 	pci_disable_device(pdev);
1254 	scsi_host_put(cb->host);
1255 }
1256 
myrb_host_reset(struct scsi_cmnd *scmd)1257 static int myrb_host_reset(struct scsi_cmnd *scmd)
1258 {
1259 	struct Scsi_Host *shost = scmd->device->host;
1260 	struct myrb_hba *cb = shost_priv(shost);
1261 
1262 	cb->reset(cb->io_base);
1263 	return SUCCESS;
1264 }
1265 
myrb_pthru_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)1266 static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1267 		struct scsi_cmnd *scmd)
1268 {
1269 	struct myrb_hba *cb = shost_priv(shost);
1270 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1271 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1272 	struct myrb_dcdb *dcdb;
1273 	dma_addr_t dcdb_addr;
1274 	struct scsi_device *sdev = scmd->device;
1275 	struct scatterlist *sgl;
1276 	unsigned long flags;
1277 	int nsge;
1278 
1279 	myrb_reset_cmd(cmd_blk);
1280 	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1281 	if (!dcdb)
1282 		return SCSI_MLQUEUE_HOST_BUSY;
1283 	nsge = scsi_dma_map(scmd);
1284 	if (nsge > 1) {
1285 		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1286 		scmd->result = (DID_ERROR << 16);
1287 		scmd->scsi_done(scmd);
1288 		return 0;
1289 	}
1290 
1291 	mbox->type3.opcode = MYRB_CMD_DCDB;
1292 	mbox->type3.id = scmd->request->tag + 3;
1293 	mbox->type3.addr = dcdb_addr;
1294 	dcdb->channel = sdev->channel;
1295 	dcdb->target = sdev->id;
1296 	switch (scmd->sc_data_direction) {
1297 	case DMA_NONE:
1298 		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1299 		break;
1300 	case DMA_TO_DEVICE:
1301 		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1302 		break;
1303 	case DMA_FROM_DEVICE:
1304 		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1305 		break;
1306 	default:
1307 		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1308 		break;
1309 	}
1310 	dcdb->early_status = false;
1311 	if (scmd->request->timeout <= 10)
1312 		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1313 	else if (scmd->request->timeout <= 60)
1314 		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1315 	else if (scmd->request->timeout <= 600)
1316 		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1317 	else
1318 		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1319 	dcdb->no_autosense = false;
1320 	dcdb->allow_disconnect = true;
1321 	sgl = scsi_sglist(scmd);
1322 	dcdb->dma_addr = sg_dma_address(sgl);
1323 	if (sg_dma_len(sgl) > USHRT_MAX) {
1324 		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1325 		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1326 	} else {
1327 		dcdb->xfer_len_lo = sg_dma_len(sgl);
1328 		dcdb->xfer_len_hi4 = 0;
1329 	}
1330 	dcdb->cdb_len = scmd->cmd_len;
1331 	dcdb->sense_len = sizeof(dcdb->sense);
1332 	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1333 
1334 	spin_lock_irqsave(&cb->queue_lock, flags);
1335 	cb->qcmd(cb, cmd_blk);
1336 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1337 	return 0;
1338 }
1339 
myrb_inquiry(struct myrb_hba *cb, struct scsi_cmnd *scmd)1340 static void myrb_inquiry(struct myrb_hba *cb,
1341 		struct scsi_cmnd *scmd)
1342 {
1343 	unsigned char inq[36] = {
1344 		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1345 		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1346 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347 		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1348 		0x20, 0x20, 0x20, 0x20,
1349 	};
1350 
1351 	if (cb->bus_width > 16)
1352 		inq[7] |= 1 << 6;
1353 	if (cb->bus_width > 8)
1354 		inq[7] |= 1 << 5;
1355 	memcpy(&inq[16], cb->model_name, 16);
1356 	memcpy(&inq[32], cb->fw_version, 1);
1357 	memcpy(&inq[33], &cb->fw_version[2], 2);
1358 	memcpy(&inq[35], &cb->fw_version[7], 1);
1359 
1360 	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1361 }
1362 
1363 static void
myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd, struct myrb_ldev_info *ldev_info)1364 myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1365 		struct myrb_ldev_info *ldev_info)
1366 {
1367 	unsigned char modes[32], *mode_pg;
1368 	bool dbd;
1369 	size_t mode_len;
1370 
1371 	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1372 	if (dbd) {
1373 		mode_len = 24;
1374 		mode_pg = &modes[4];
1375 	} else {
1376 		mode_len = 32;
1377 		mode_pg = &modes[12];
1378 	}
1379 	memset(modes, 0, sizeof(modes));
1380 	modes[0] = mode_len - 1;
1381 	if (!dbd) {
1382 		unsigned char *block_desc = &modes[4];
1383 
1384 		modes[3] = 8;
1385 		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1386 		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1387 	}
1388 	mode_pg[0] = 0x08;
1389 	mode_pg[1] = 0x12;
1390 	if (ldev_info->wb_enabled)
1391 		mode_pg[2] |= 0x04;
1392 	if (cb->segment_size) {
1393 		mode_pg[2] |= 0x08;
1394 		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1395 	}
1396 
1397 	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1398 }
1399 
myrb_request_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd)1400 static void myrb_request_sense(struct myrb_hba *cb,
1401 		struct scsi_cmnd *scmd)
1402 {
1403 	scsi_build_sense_buffer(0, scmd->sense_buffer,
1404 				NO_SENSE, 0, 0);
1405 	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1406 				 SCSI_SENSE_BUFFERSIZE);
1407 }
1408 
myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd, struct myrb_ldev_info *ldev_info)1409 static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1410 		struct myrb_ldev_info *ldev_info)
1411 {
1412 	unsigned char data[8];
1413 
1414 	dev_dbg(&scmd->device->sdev_gendev,
1415 		"Capacity %u, blocksize %u\n",
1416 		ldev_info->size, cb->ldev_block_size);
1417 	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1418 	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1419 	scsi_sg_copy_from_buffer(scmd, data, 8);
1420 }
1421 
myrb_ldev_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)1422 static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1423 		struct scsi_cmnd *scmd)
1424 {
1425 	struct myrb_hba *cb = shost_priv(shost);
1426 	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1427 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1428 	struct myrb_ldev_info *ldev_info;
1429 	struct scsi_device *sdev = scmd->device;
1430 	struct scatterlist *sgl;
1431 	unsigned long flags;
1432 	u64 lba;
1433 	u32 block_cnt;
1434 	int nsge;
1435 
1436 	ldev_info = sdev->hostdata;
1437 	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1438 	    ldev_info->state != MYRB_DEVICE_WO) {
1439 		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1440 			sdev->id, ldev_info ? ldev_info->state : 0xff);
1441 		scmd->result = (DID_BAD_TARGET << 16);
1442 		scmd->scsi_done(scmd);
1443 		return 0;
1444 	}
1445 	switch (scmd->cmnd[0]) {
1446 	case TEST_UNIT_READY:
1447 		scmd->result = (DID_OK << 16);
1448 		scmd->scsi_done(scmd);
1449 		return 0;
1450 	case INQUIRY:
1451 		if (scmd->cmnd[1] & 1) {
1452 			/* Illegal request, invalid field in CDB */
1453 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1454 						ILLEGAL_REQUEST, 0x24, 0);
1455 			scmd->result = (DRIVER_SENSE << 24) |
1456 				SAM_STAT_CHECK_CONDITION;
1457 		} else {
1458 			myrb_inquiry(cb, scmd);
1459 			scmd->result = (DID_OK << 16);
1460 		}
1461 		scmd->scsi_done(scmd);
1462 		return 0;
1463 	case SYNCHRONIZE_CACHE:
1464 		scmd->result = (DID_OK << 16);
1465 		scmd->scsi_done(scmd);
1466 		return 0;
1467 	case MODE_SENSE:
1468 		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1469 		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1470 			/* Illegal request, invalid field in CDB */
1471 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1472 						ILLEGAL_REQUEST, 0x24, 0);
1473 			scmd->result = (DRIVER_SENSE << 24) |
1474 				SAM_STAT_CHECK_CONDITION;
1475 		} else {
1476 			myrb_mode_sense(cb, scmd, ldev_info);
1477 			scmd->result = (DID_OK << 16);
1478 		}
1479 		scmd->scsi_done(scmd);
1480 		return 0;
1481 	case READ_CAPACITY:
1482 		if ((scmd->cmnd[1] & 1) ||
1483 		    (scmd->cmnd[8] & 1)) {
1484 			/* Illegal request, invalid field in CDB */
1485 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1486 						ILLEGAL_REQUEST, 0x24, 0);
1487 			scmd->result = (DRIVER_SENSE << 24) |
1488 				SAM_STAT_CHECK_CONDITION;
1489 			scmd->scsi_done(scmd);
1490 			return 0;
1491 		}
1492 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1493 		if (lba) {
1494 			/* Illegal request, invalid field in CDB */
1495 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1496 						ILLEGAL_REQUEST, 0x24, 0);
1497 			scmd->result = (DRIVER_SENSE << 24) |
1498 				SAM_STAT_CHECK_CONDITION;
1499 			scmd->scsi_done(scmd);
1500 			return 0;
1501 		}
1502 		myrb_read_capacity(cb, scmd, ldev_info);
1503 		scmd->scsi_done(scmd);
1504 		return 0;
1505 	case REQUEST_SENSE:
1506 		myrb_request_sense(cb, scmd);
1507 		scmd->result = (DID_OK << 16);
1508 		return 0;
1509 	case SEND_DIAGNOSTIC:
1510 		if (scmd->cmnd[1] != 0x04) {
1511 			/* Illegal request, invalid field in CDB */
1512 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1513 						ILLEGAL_REQUEST, 0x24, 0);
1514 			scmd->result = (DRIVER_SENSE << 24) |
1515 				SAM_STAT_CHECK_CONDITION;
1516 		} else {
1517 			/* Assume good status */
1518 			scmd->result = (DID_OK << 16);
1519 		}
1520 		scmd->scsi_done(scmd);
1521 		return 0;
1522 	case READ_6:
1523 		if (ldev_info->state == MYRB_DEVICE_WO) {
1524 			/* Data protect, attempt to read invalid data */
1525 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1526 						DATA_PROTECT, 0x21, 0x06);
1527 			scmd->result = (DRIVER_SENSE << 24) |
1528 				SAM_STAT_CHECK_CONDITION;
1529 			scmd->scsi_done(scmd);
1530 			return 0;
1531 		}
1532 		fallthrough;
1533 	case WRITE_6:
1534 		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1535 		       (scmd->cmnd[2] << 8) |
1536 		       scmd->cmnd[3]);
1537 		block_cnt = scmd->cmnd[4];
1538 		break;
1539 	case READ_10:
1540 		if (ldev_info->state == MYRB_DEVICE_WO) {
1541 			/* Data protect, attempt to read invalid data */
1542 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1543 						DATA_PROTECT, 0x21, 0x06);
1544 			scmd->result = (DRIVER_SENSE << 24) |
1545 				SAM_STAT_CHECK_CONDITION;
1546 			scmd->scsi_done(scmd);
1547 			return 0;
1548 		}
1549 		fallthrough;
1550 	case WRITE_10:
1551 	case VERIFY:		/* 0x2F */
1552 	case WRITE_VERIFY:	/* 0x2E */
1553 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1554 		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1555 		break;
1556 	case READ_12:
1557 		if (ldev_info->state == MYRB_DEVICE_WO) {
1558 			/* Data protect, attempt to read invalid data */
1559 			scsi_build_sense_buffer(0, scmd->sense_buffer,
1560 						DATA_PROTECT, 0x21, 0x06);
1561 			scmd->result = (DRIVER_SENSE << 24) |
1562 				SAM_STAT_CHECK_CONDITION;
1563 			scmd->scsi_done(scmd);
1564 			return 0;
1565 		}
1566 		fallthrough;
1567 	case WRITE_12:
1568 	case VERIFY_12: /* 0xAF */
1569 	case WRITE_VERIFY_12:	/* 0xAE */
1570 		lba = get_unaligned_be32(&scmd->cmnd[2]);
1571 		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1572 		break;
1573 	default:
1574 		/* Illegal request, invalid opcode */
1575 		scsi_build_sense_buffer(0, scmd->sense_buffer,
1576 					ILLEGAL_REQUEST, 0x20, 0);
1577 		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1578 		scmd->scsi_done(scmd);
1579 		return 0;
1580 	}
1581 
1582 	myrb_reset_cmd(cmd_blk);
1583 	mbox->type5.id = scmd->request->tag + 3;
1584 	if (scmd->sc_data_direction == DMA_NONE)
1585 		goto submit;
1586 	nsge = scsi_dma_map(scmd);
1587 	if (nsge == 1) {
1588 		sgl = scsi_sglist(scmd);
1589 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1590 			mbox->type5.opcode = MYRB_CMD_READ;
1591 		else
1592 			mbox->type5.opcode = MYRB_CMD_WRITE;
1593 
1594 		mbox->type5.ld.xfer_len = block_cnt;
1595 		mbox->type5.ld.ldev_num = sdev->id;
1596 		mbox->type5.lba = lba;
1597 		mbox->type5.addr = (u32)sg_dma_address(sgl);
1598 	} else {
1599 		struct myrb_sge *hw_sgl;
1600 		dma_addr_t hw_sgl_addr;
1601 		int i;
1602 
1603 		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1604 		if (!hw_sgl)
1605 			return SCSI_MLQUEUE_HOST_BUSY;
1606 
1607 		cmd_blk->sgl = hw_sgl;
1608 		cmd_blk->sgl_addr = hw_sgl_addr;
1609 
1610 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1611 			mbox->type5.opcode = MYRB_CMD_READ_SG;
1612 		else
1613 			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1614 
1615 		mbox->type5.ld.xfer_len = block_cnt;
1616 		mbox->type5.ld.ldev_num = sdev->id;
1617 		mbox->type5.lba = lba;
1618 		mbox->type5.addr = hw_sgl_addr;
1619 		mbox->type5.sg_count = nsge;
1620 
1621 		scsi_for_each_sg(scmd, sgl, nsge, i) {
1622 			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1623 			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1624 			hw_sgl++;
1625 		}
1626 	}
1627 submit:
1628 	spin_lock_irqsave(&cb->queue_lock, flags);
1629 	cb->qcmd(cb, cmd_blk);
1630 	spin_unlock_irqrestore(&cb->queue_lock, flags);
1631 
1632 	return 0;
1633 }
1634 
myrb_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmd)1635 static int myrb_queuecommand(struct Scsi_Host *shost,
1636 		struct scsi_cmnd *scmd)
1637 {
1638 	struct scsi_device *sdev = scmd->device;
1639 
1640 	if (sdev->channel > myrb_logical_channel(shost)) {
1641 		scmd->result = (DID_BAD_TARGET << 16);
1642 		scmd->scsi_done(scmd);
1643 		return 0;
1644 	}
1645 	if (sdev->channel == myrb_logical_channel(shost))
1646 		return myrb_ldev_queuecommand(shost, scmd);
1647 
1648 	return myrb_pthru_queuecommand(shost, scmd);
1649 }
1650 
myrb_ldev_slave_alloc(struct scsi_device *sdev)1651 static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1652 {
1653 	struct myrb_hba *cb = shost_priv(sdev->host);
1654 	struct myrb_ldev_info *ldev_info;
1655 	unsigned short ldev_num = sdev->id;
1656 	enum raid_level level;
1657 
1658 	ldev_info = cb->ldev_info_buf + ldev_num;
1659 	if (!ldev_info)
1660 		return -ENXIO;
1661 
1662 	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1663 	if (!sdev->hostdata)
1664 		return -ENOMEM;
1665 	dev_dbg(&sdev->sdev_gendev,
1666 		"slave alloc ldev %d state %x\n",
1667 		ldev_num, ldev_info->state);
1668 	memcpy(sdev->hostdata, ldev_info,
1669 	       sizeof(*ldev_info));
1670 	switch (ldev_info->raid_level) {
1671 	case MYRB_RAID_LEVEL0:
1672 		level = RAID_LEVEL_LINEAR;
1673 		break;
1674 	case MYRB_RAID_LEVEL1:
1675 		level = RAID_LEVEL_1;
1676 		break;
1677 	case MYRB_RAID_LEVEL3:
1678 		level = RAID_LEVEL_3;
1679 		break;
1680 	case MYRB_RAID_LEVEL5:
1681 		level = RAID_LEVEL_5;
1682 		break;
1683 	case MYRB_RAID_LEVEL6:
1684 		level = RAID_LEVEL_6;
1685 		break;
1686 	case MYRB_RAID_JBOD:
1687 		level = RAID_LEVEL_JBOD;
1688 		break;
1689 	default:
1690 		level = RAID_LEVEL_UNKNOWN;
1691 		break;
1692 	}
1693 	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1694 	return 0;
1695 }
1696 
myrb_pdev_slave_alloc(struct scsi_device *sdev)1697 static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1698 {
1699 	struct myrb_hba *cb = shost_priv(sdev->host);
1700 	struct myrb_pdev_state *pdev_info;
1701 	unsigned short status;
1702 
1703 	if (sdev->id > MYRB_MAX_TARGETS)
1704 		return -ENXIO;
1705 
1706 	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1707 	if (!pdev_info)
1708 		return -ENOMEM;
1709 
1710 	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1711 				  sdev, pdev_info);
1712 	if (status != MYRB_STATUS_SUCCESS) {
1713 		dev_dbg(&sdev->sdev_gendev,
1714 			"Failed to get device state, status %x\n",
1715 			status);
1716 		kfree(pdev_info);
1717 		return -ENXIO;
1718 	}
1719 	if (!pdev_info->present) {
1720 		dev_dbg(&sdev->sdev_gendev,
1721 			"device not present, skip\n");
1722 		kfree(pdev_info);
1723 		return -ENXIO;
1724 	}
1725 	dev_dbg(&sdev->sdev_gendev,
1726 		"slave alloc pdev %d:%d state %x\n",
1727 		sdev->channel, sdev->id, pdev_info->state);
1728 	sdev->hostdata = pdev_info;
1729 
1730 	return 0;
1731 }
1732 
myrb_slave_alloc(struct scsi_device *sdev)1733 static int myrb_slave_alloc(struct scsi_device *sdev)
1734 {
1735 	if (sdev->channel > myrb_logical_channel(sdev->host))
1736 		return -ENXIO;
1737 
1738 	if (sdev->lun > 0)
1739 		return -ENXIO;
1740 
1741 	if (sdev->channel == myrb_logical_channel(sdev->host))
1742 		return myrb_ldev_slave_alloc(sdev);
1743 
1744 	return myrb_pdev_slave_alloc(sdev);
1745 }
1746 
myrb_slave_configure(struct scsi_device *sdev)1747 static int myrb_slave_configure(struct scsi_device *sdev)
1748 {
1749 	struct myrb_ldev_info *ldev_info;
1750 
1751 	if (sdev->channel > myrb_logical_channel(sdev->host))
1752 		return -ENXIO;
1753 
1754 	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1755 		sdev->no_uld_attach = 1;
1756 		return 0;
1757 	}
1758 	if (sdev->lun != 0)
1759 		return -ENXIO;
1760 
1761 	ldev_info = sdev->hostdata;
1762 	if (!ldev_info)
1763 		return -ENXIO;
1764 	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1765 		sdev_printk(KERN_INFO, sdev,
1766 			    "Logical drive is %s\n",
1767 			    myrb_devstate_name(ldev_info->state));
1768 
1769 	sdev->tagged_supported = 1;
1770 	return 0;
1771 }
1772 
myrb_slave_destroy(struct scsi_device *sdev)1773 static void myrb_slave_destroy(struct scsi_device *sdev)
1774 {
1775 	kfree(sdev->hostdata);
1776 }
1777 
myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])1778 static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1779 		sector_t capacity, int geom[])
1780 {
1781 	struct myrb_hba *cb = shost_priv(sdev->host);
1782 
1783 	geom[0] = cb->ldev_geom_heads;
1784 	geom[1] = cb->ldev_geom_sectors;
1785 	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1786 
1787 	return 0;
1788 }
1789 
raid_state_show(struct device *dev, struct device_attribute *attr, char *buf)1790 static ssize_t raid_state_show(struct device *dev,
1791 		struct device_attribute *attr, char *buf)
1792 {
1793 	struct scsi_device *sdev = to_scsi_device(dev);
1794 	struct myrb_hba *cb = shost_priv(sdev->host);
1795 	int ret;
1796 
1797 	if (!sdev->hostdata)
1798 		return snprintf(buf, 16, "Unknown\n");
1799 
1800 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1801 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1802 		const char *name;
1803 
1804 		name = myrb_devstate_name(ldev_info->state);
1805 		if (name)
1806 			ret = snprintf(buf, 32, "%s\n", name);
1807 		else
1808 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1809 				       ldev_info->state);
1810 	} else {
1811 		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1812 		unsigned short status;
1813 		const char *name;
1814 
1815 		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1816 					  sdev, pdev_info);
1817 		if (status != MYRB_STATUS_SUCCESS)
1818 			sdev_printk(KERN_INFO, sdev,
1819 				    "Failed to get device state, status %x\n",
1820 				    status);
1821 
1822 		if (!pdev_info->present)
1823 			name = "Removed";
1824 		else
1825 			name = myrb_devstate_name(pdev_info->state);
1826 		if (name)
1827 			ret = snprintf(buf, 32, "%s\n", name);
1828 		else
1829 			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1830 				       pdev_info->state);
1831 	}
1832 	return ret;
1833 }
1834 
raid_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1835 static ssize_t raid_state_store(struct device *dev,
1836 		struct device_attribute *attr, const char *buf, size_t count)
1837 {
1838 	struct scsi_device *sdev = to_scsi_device(dev);
1839 	struct myrb_hba *cb = shost_priv(sdev->host);
1840 	struct myrb_pdev_state *pdev_info;
1841 	enum myrb_devstate new_state;
1842 	unsigned short status;
1843 
1844 	if (!strncmp(buf, "kill", 4) ||
1845 	    !strncmp(buf, "offline", 7))
1846 		new_state = MYRB_DEVICE_DEAD;
1847 	else if (!strncmp(buf, "online", 6))
1848 		new_state = MYRB_DEVICE_ONLINE;
1849 	else if (!strncmp(buf, "standby", 7))
1850 		new_state = MYRB_DEVICE_STANDBY;
1851 	else
1852 		return -EINVAL;
1853 
1854 	pdev_info = sdev->hostdata;
1855 	if (!pdev_info) {
1856 		sdev_printk(KERN_INFO, sdev,
1857 			    "Failed - no physical device information\n");
1858 		return -ENXIO;
1859 	}
1860 	if (!pdev_info->present) {
1861 		sdev_printk(KERN_INFO, sdev,
1862 			    "Failed - device not present\n");
1863 		return -ENXIO;
1864 	}
1865 
1866 	if (pdev_info->state == new_state)
1867 		return count;
1868 
1869 	status = myrb_set_pdev_state(cb, sdev, new_state);
1870 	switch (status) {
1871 	case MYRB_STATUS_SUCCESS:
1872 		break;
1873 	case MYRB_STATUS_START_DEVICE_FAILED:
1874 		sdev_printk(KERN_INFO, sdev,
1875 			     "Failed - Unable to Start Device\n");
1876 		count = -EAGAIN;
1877 		break;
1878 	case MYRB_STATUS_NO_DEVICE:
1879 		sdev_printk(KERN_INFO, sdev,
1880 			    "Failed - No Device at Address\n");
1881 		count = -ENODEV;
1882 		break;
1883 	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1884 		sdev_printk(KERN_INFO, sdev,
1885 			 "Failed - Invalid Channel or Target or Modifier\n");
1886 		count = -EINVAL;
1887 		break;
1888 	case MYRB_STATUS_CHANNEL_BUSY:
1889 		sdev_printk(KERN_INFO, sdev,
1890 			 "Failed - Channel Busy\n");
1891 		count = -EBUSY;
1892 		break;
1893 	default:
1894 		sdev_printk(KERN_INFO, sdev,
1895 			 "Failed - Unexpected Status %04X\n", status);
1896 		count = -EIO;
1897 		break;
1898 	}
1899 	return count;
1900 }
1901 static DEVICE_ATTR_RW(raid_state);
1902 
raid_level_show(struct device *dev, struct device_attribute *attr, char *buf)1903 static ssize_t raid_level_show(struct device *dev,
1904 		struct device_attribute *attr, char *buf)
1905 {
1906 	struct scsi_device *sdev = to_scsi_device(dev);
1907 
1908 	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1909 		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1910 		const char *name;
1911 
1912 		if (!ldev_info)
1913 			return -ENXIO;
1914 
1915 		name = myrb_raidlevel_name(ldev_info->raid_level);
1916 		if (!name)
1917 			return snprintf(buf, 32, "Invalid (%02X)\n",
1918 					ldev_info->state);
1919 		return snprintf(buf, 32, "%s\n", name);
1920 	}
1921 	return snprintf(buf, 32, "Physical Drive\n");
1922 }
1923 static DEVICE_ATTR_RO(raid_level);
1924 
rebuild_show(struct device *dev, struct device_attribute *attr, char *buf)1925 static ssize_t rebuild_show(struct device *dev,
1926 		struct device_attribute *attr, char *buf)
1927 {
1928 	struct scsi_device *sdev = to_scsi_device(dev);
1929 	struct myrb_hba *cb = shost_priv(sdev->host);
1930 	struct myrb_rbld_progress rbld_buf;
1931 	unsigned char status;
1932 
1933 	if (sdev->channel < myrb_logical_channel(sdev->host))
1934 		return snprintf(buf, 32, "physical device - not rebuilding\n");
1935 
1936 	status = myrb_get_rbld_progress(cb, &rbld_buf);
1937 
1938 	if (rbld_buf.ldev_num != sdev->id ||
1939 	    status != MYRB_STATUS_SUCCESS)
1940 		return snprintf(buf, 32, "not rebuilding\n");
1941 
1942 	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1943 			rbld_buf.ldev_size - rbld_buf.blocks_left,
1944 			rbld_buf.ldev_size);
1945 }
1946 
rebuild_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)1947 static ssize_t rebuild_store(struct device *dev,
1948 		struct device_attribute *attr, const char *buf, size_t count)
1949 {
1950 	struct scsi_device *sdev = to_scsi_device(dev);
1951 	struct myrb_hba *cb = shost_priv(sdev->host);
1952 	struct myrb_cmdblk *cmd_blk;
1953 	union myrb_cmd_mbox *mbox;
1954 	unsigned short status;
1955 	int rc, start;
1956 	const char *msg;
1957 
1958 	rc = kstrtoint(buf, 0, &start);
1959 	if (rc)
1960 		return rc;
1961 
1962 	if (sdev->channel >= myrb_logical_channel(sdev->host))
1963 		return -ENXIO;
1964 
1965 	status = myrb_get_rbld_progress(cb, NULL);
1966 	if (start) {
1967 		if (status == MYRB_STATUS_SUCCESS) {
1968 			sdev_printk(KERN_INFO, sdev,
1969 				    "Rebuild Not Initiated; already in progress\n");
1970 			return -EALREADY;
1971 		}
1972 		mutex_lock(&cb->dcmd_mutex);
1973 		cmd_blk = &cb->dcmd_blk;
1974 		myrb_reset_cmd(cmd_blk);
1975 		mbox = &cmd_blk->mbox;
1976 		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1977 		mbox->type3D.id = MYRB_DCMD_TAG;
1978 		mbox->type3D.channel = sdev->channel;
1979 		mbox->type3D.target = sdev->id;
1980 		status = myrb_exec_cmd(cb, cmd_blk);
1981 		mutex_unlock(&cb->dcmd_mutex);
1982 	} else {
1983 		struct pci_dev *pdev = cb->pdev;
1984 		unsigned char *rate;
1985 		dma_addr_t rate_addr;
1986 
1987 		if (status != MYRB_STATUS_SUCCESS) {
1988 			sdev_printk(KERN_INFO, sdev,
1989 				    "Rebuild Not Cancelled; not in progress\n");
1990 			return 0;
1991 		}
1992 
1993 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1994 					  &rate_addr, GFP_KERNEL);
1995 		if (rate == NULL) {
1996 			sdev_printk(KERN_INFO, sdev,
1997 				    "Cancellation of Rebuild Failed - Out of Memory\n");
1998 			return -ENOMEM;
1999 		}
2000 		mutex_lock(&cb->dcmd_mutex);
2001 		cmd_blk = &cb->dcmd_blk;
2002 		myrb_reset_cmd(cmd_blk);
2003 		mbox = &cmd_blk->mbox;
2004 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2005 		mbox->type3R.id = MYRB_DCMD_TAG;
2006 		mbox->type3R.rbld_rate = 0xFF;
2007 		mbox->type3R.addr = rate_addr;
2008 		status = myrb_exec_cmd(cb, cmd_blk);
2009 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2010 		mutex_unlock(&cb->dcmd_mutex);
2011 	}
2012 	if (status == MYRB_STATUS_SUCCESS) {
2013 		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2014 			    start ? "Initiated" : "Cancelled");
2015 		return count;
2016 	}
2017 	if (!start) {
2018 		sdev_printk(KERN_INFO, sdev,
2019 			    "Rebuild Not Cancelled, status 0x%x\n",
2020 			    status);
2021 		return -EIO;
2022 	}
2023 
2024 	switch (status) {
2025 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2026 		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2027 		break;
2028 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2029 		msg = "New Disk Failed During Rebuild";
2030 		break;
2031 	case MYRB_STATUS_INVALID_ADDRESS:
2032 		msg = "Invalid Device Address";
2033 		break;
2034 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2035 		msg = "Already in Progress";
2036 		break;
2037 	default:
2038 		msg = NULL;
2039 		break;
2040 	}
2041 	if (msg)
2042 		sdev_printk(KERN_INFO, sdev,
2043 			    "Rebuild Failed - %s\n", msg);
2044 	else
2045 		sdev_printk(KERN_INFO, sdev,
2046 			    "Rebuild Failed, status 0x%x\n", status);
2047 
2048 	return -EIO;
2049 }
2050 static DEVICE_ATTR_RW(rebuild);
2051 
consistency_check_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)2052 static ssize_t consistency_check_store(struct device *dev,
2053 		struct device_attribute *attr, const char *buf, size_t count)
2054 {
2055 	struct scsi_device *sdev = to_scsi_device(dev);
2056 	struct myrb_hba *cb = shost_priv(sdev->host);
2057 	struct myrb_rbld_progress rbld_buf;
2058 	struct myrb_cmdblk *cmd_blk;
2059 	union myrb_cmd_mbox *mbox;
2060 	unsigned short ldev_num = 0xFFFF;
2061 	unsigned short status;
2062 	int rc, start;
2063 	const char *msg;
2064 
2065 	rc = kstrtoint(buf, 0, &start);
2066 	if (rc)
2067 		return rc;
2068 
2069 	if (sdev->channel < myrb_logical_channel(sdev->host))
2070 		return -ENXIO;
2071 
2072 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2073 	if (start) {
2074 		if (status == MYRB_STATUS_SUCCESS) {
2075 			sdev_printk(KERN_INFO, sdev,
2076 				    "Check Consistency Not Initiated; already in progress\n");
2077 			return -EALREADY;
2078 		}
2079 		mutex_lock(&cb->dcmd_mutex);
2080 		cmd_blk = &cb->dcmd_blk;
2081 		myrb_reset_cmd(cmd_blk);
2082 		mbox = &cmd_blk->mbox;
2083 		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2084 		mbox->type3C.id = MYRB_DCMD_TAG;
2085 		mbox->type3C.ldev_num = sdev->id;
2086 		mbox->type3C.auto_restore = true;
2087 
2088 		status = myrb_exec_cmd(cb, cmd_blk);
2089 		mutex_unlock(&cb->dcmd_mutex);
2090 	} else {
2091 		struct pci_dev *pdev = cb->pdev;
2092 		unsigned char *rate;
2093 		dma_addr_t rate_addr;
2094 
2095 		if (ldev_num != sdev->id) {
2096 			sdev_printk(KERN_INFO, sdev,
2097 				    "Check Consistency Not Cancelled; not in progress\n");
2098 			return 0;
2099 		}
2100 		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2101 					  &rate_addr, GFP_KERNEL);
2102 		if (rate == NULL) {
2103 			sdev_printk(KERN_INFO, sdev,
2104 				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2105 			return -ENOMEM;
2106 		}
2107 		mutex_lock(&cb->dcmd_mutex);
2108 		cmd_blk = &cb->dcmd_blk;
2109 		myrb_reset_cmd(cmd_blk);
2110 		mbox = &cmd_blk->mbox;
2111 		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2112 		mbox->type3R.id = MYRB_DCMD_TAG;
2113 		mbox->type3R.rbld_rate = 0xFF;
2114 		mbox->type3R.addr = rate_addr;
2115 		status = myrb_exec_cmd(cb, cmd_blk);
2116 		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2117 		mutex_unlock(&cb->dcmd_mutex);
2118 	}
2119 	if (status == MYRB_STATUS_SUCCESS) {
2120 		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2121 			    start ? "Initiated" : "Cancelled");
2122 		return count;
2123 	}
2124 	if (!start) {
2125 		sdev_printk(KERN_INFO, sdev,
2126 			    "Check Consistency Not Cancelled, status 0x%x\n",
2127 			    status);
2128 		return -EIO;
2129 	}
2130 
2131 	switch (status) {
2132 	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2133 		msg = "Dependent Physical Device is DEAD";
2134 		break;
2135 	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2136 		msg = "New Disk Failed During Rebuild";
2137 		break;
2138 	case MYRB_STATUS_INVALID_ADDRESS:
2139 		msg = "Invalid or Nonredundant Logical Drive";
2140 		break;
2141 	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2142 		msg = "Already in Progress";
2143 		break;
2144 	default:
2145 		msg = NULL;
2146 		break;
2147 	}
2148 	if (msg)
2149 		sdev_printk(KERN_INFO, sdev,
2150 			    "Check Consistency Failed - %s\n", msg);
2151 	else
2152 		sdev_printk(KERN_INFO, sdev,
2153 			    "Check Consistency Failed, status 0x%x\n", status);
2154 
2155 	return -EIO;
2156 }
2157 
consistency_check_show(struct device *dev, struct device_attribute *attr, char *buf)2158 static ssize_t consistency_check_show(struct device *dev,
2159 		struct device_attribute *attr, char *buf)
2160 {
2161 	return rebuild_show(dev, attr, buf);
2162 }
2163 static DEVICE_ATTR_RW(consistency_check);
2164 
ctlr_num_show(struct device *dev, struct device_attribute *attr, char *buf)2165 static ssize_t ctlr_num_show(struct device *dev,
2166 		struct device_attribute *attr, char *buf)
2167 {
2168 	struct Scsi_Host *shost = class_to_shost(dev);
2169 	struct myrb_hba *cb = shost_priv(shost);
2170 
2171 	return snprintf(buf, 20, "%u\n", cb->ctlr_num);
2172 }
2173 static DEVICE_ATTR_RO(ctlr_num);
2174 
firmware_show(struct device *dev, struct device_attribute *attr, char *buf)2175 static ssize_t firmware_show(struct device *dev,
2176 		struct device_attribute *attr, char *buf)
2177 {
2178 	struct Scsi_Host *shost = class_to_shost(dev);
2179 	struct myrb_hba *cb = shost_priv(shost);
2180 
2181 	return snprintf(buf, 16, "%s\n", cb->fw_version);
2182 }
2183 static DEVICE_ATTR_RO(firmware);
2184 
model_show(struct device *dev, struct device_attribute *attr, char *buf)2185 static ssize_t model_show(struct device *dev,
2186 		struct device_attribute *attr, char *buf)
2187 {
2188 	struct Scsi_Host *shost = class_to_shost(dev);
2189 	struct myrb_hba *cb = shost_priv(shost);
2190 
2191 	return snprintf(buf, 16, "%s\n", cb->model_name);
2192 }
2193 static DEVICE_ATTR_RO(model);
2194 
flush_cache_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)2195 static ssize_t flush_cache_store(struct device *dev,
2196 		struct device_attribute *attr, const char *buf, size_t count)
2197 {
2198 	struct Scsi_Host *shost = class_to_shost(dev);
2199 	struct myrb_hba *cb = shost_priv(shost);
2200 	unsigned short status;
2201 
2202 	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2203 	if (status == MYRB_STATUS_SUCCESS) {
2204 		shost_printk(KERN_INFO, shost,
2205 			     "Cache Flush Completed\n");
2206 		return count;
2207 	}
2208 	shost_printk(KERN_INFO, shost,
2209 		     "Cache Flush Failed, status %x\n", status);
2210 	return -EIO;
2211 }
2212 static DEVICE_ATTR_WO(flush_cache);
2213 
2214 static struct device_attribute *myrb_sdev_attrs[] = {
2215 	&dev_attr_rebuild,
2216 	&dev_attr_consistency_check,
2217 	&dev_attr_raid_state,
2218 	&dev_attr_raid_level,
2219 	NULL,
2220 };
2221 
2222 static struct device_attribute *myrb_shost_attrs[] = {
2223 	&dev_attr_ctlr_num,
2224 	&dev_attr_model,
2225 	&dev_attr_firmware,
2226 	&dev_attr_flush_cache,
2227 	NULL,
2228 };
2229 
2230 static struct scsi_host_template myrb_template = {
2231 	.module			= THIS_MODULE,
2232 	.name			= "DAC960",
2233 	.proc_name		= "myrb",
2234 	.queuecommand		= myrb_queuecommand,
2235 	.eh_host_reset_handler	= myrb_host_reset,
2236 	.slave_alloc		= myrb_slave_alloc,
2237 	.slave_configure	= myrb_slave_configure,
2238 	.slave_destroy		= myrb_slave_destroy,
2239 	.bios_param		= myrb_biosparam,
2240 	.cmd_size		= sizeof(struct myrb_cmdblk),
2241 	.shost_attrs		= myrb_shost_attrs,
2242 	.sdev_attrs		= myrb_sdev_attrs,
2243 	.this_id		= -1,
2244 };
2245 
2246 /**
2247  * myrb_is_raid - return boolean indicating device is raid volume
2248  * @dev the device struct object
2249  */
myrb_is_raid(struct device *dev)2250 static int myrb_is_raid(struct device *dev)
2251 {
2252 	struct scsi_device *sdev = to_scsi_device(dev);
2253 
2254 	return sdev->channel == myrb_logical_channel(sdev->host);
2255 }
2256 
2257 /**
2258  * myrb_get_resync - get raid volume resync percent complete
2259  * @dev the device struct object
2260  */
myrb_get_resync(struct device *dev)2261 static void myrb_get_resync(struct device *dev)
2262 {
2263 	struct scsi_device *sdev = to_scsi_device(dev);
2264 	struct myrb_hba *cb = shost_priv(sdev->host);
2265 	struct myrb_rbld_progress rbld_buf;
2266 	unsigned int percent_complete = 0;
2267 	unsigned short status;
2268 	unsigned int ldev_size = 0, remaining = 0;
2269 
2270 	if (sdev->channel < myrb_logical_channel(sdev->host))
2271 		return;
2272 	status = myrb_get_rbld_progress(cb, &rbld_buf);
2273 	if (status == MYRB_STATUS_SUCCESS) {
2274 		if (rbld_buf.ldev_num == sdev->id) {
2275 			ldev_size = rbld_buf.ldev_size;
2276 			remaining = rbld_buf.blocks_left;
2277 		}
2278 	}
2279 	if (remaining && ldev_size)
2280 		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2281 	raid_set_resync(myrb_raid_template, dev, percent_complete);
2282 }
2283 
2284 /**
2285  * myrb_get_state - get raid volume status
2286  * @dev the device struct object
2287  */
myrb_get_state(struct device *dev)2288 static void myrb_get_state(struct device *dev)
2289 {
2290 	struct scsi_device *sdev = to_scsi_device(dev);
2291 	struct myrb_hba *cb = shost_priv(sdev->host);
2292 	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2293 	enum raid_state state = RAID_STATE_UNKNOWN;
2294 	unsigned short status;
2295 
2296 	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2297 		state = RAID_STATE_UNKNOWN;
2298 	else {
2299 		status = myrb_get_rbld_progress(cb, NULL);
2300 		if (status == MYRB_STATUS_SUCCESS)
2301 			state = RAID_STATE_RESYNCING;
2302 		else {
2303 			switch (ldev_info->state) {
2304 			case MYRB_DEVICE_ONLINE:
2305 				state = RAID_STATE_ACTIVE;
2306 				break;
2307 			case MYRB_DEVICE_WO:
2308 			case MYRB_DEVICE_CRITICAL:
2309 				state = RAID_STATE_DEGRADED;
2310 				break;
2311 			default:
2312 				state = RAID_STATE_OFFLINE;
2313 			}
2314 		}
2315 	}
2316 	raid_set_state(myrb_raid_template, dev, state);
2317 }
2318 
2319 static struct raid_function_template myrb_raid_functions = {
2320 	.cookie		= &myrb_template,
2321 	.is_raid	= myrb_is_raid,
2322 	.get_resync	= myrb_get_resync,
2323 	.get_state	= myrb_get_state,
2324 };
2325 
myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk, struct scsi_cmnd *scmd)2326 static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2327 		struct scsi_cmnd *scmd)
2328 {
2329 	unsigned short status;
2330 
2331 	if (!cmd_blk)
2332 		return;
2333 
2334 	scsi_dma_unmap(scmd);
2335 
2336 	if (cmd_blk->dcdb) {
2337 		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2338 		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2339 			      cmd_blk->dcdb_addr);
2340 		cmd_blk->dcdb = NULL;
2341 	}
2342 	if (cmd_blk->sgl) {
2343 		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2344 		cmd_blk->sgl = NULL;
2345 		cmd_blk->sgl_addr = 0;
2346 	}
2347 	status = cmd_blk->status;
2348 	switch (status) {
2349 	case MYRB_STATUS_SUCCESS:
2350 	case MYRB_STATUS_DEVICE_BUSY:
2351 		scmd->result = (DID_OK << 16) | status;
2352 		break;
2353 	case MYRB_STATUS_BAD_DATA:
2354 		dev_dbg(&scmd->device->sdev_gendev,
2355 			"Bad Data Encountered\n");
2356 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2357 			/* Unrecovered read error */
2358 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2359 						MEDIUM_ERROR, 0x11, 0);
2360 		else
2361 			/* Write error */
2362 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2363 						MEDIUM_ERROR, 0x0C, 0);
2364 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2365 		break;
2366 	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2367 		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2368 		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2369 			/* Unrecovered read error, auto-reallocation failed */
2370 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2371 						MEDIUM_ERROR, 0x11, 0x04);
2372 		else
2373 			/* Write error, auto-reallocation failed */
2374 			scsi_build_sense_buffer(0, scmd->sense_buffer,
2375 						MEDIUM_ERROR, 0x0C, 0x02);
2376 		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2377 		break;
2378 	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2379 		dev_dbg(&scmd->device->sdev_gendev,
2380 			    "Logical Drive Nonexistent or Offline");
2381 		scmd->result = (DID_BAD_TARGET << 16);
2382 		break;
2383 	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2384 		dev_dbg(&scmd->device->sdev_gendev,
2385 			    "Attempt to Access Beyond End of Logical Drive");
2386 		/* Logical block address out of range */
2387 		scsi_build_sense_buffer(0, scmd->sense_buffer,
2388 					NOT_READY, 0x21, 0);
2389 		break;
2390 	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2391 		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2392 		scmd->result = (DID_BAD_TARGET << 16);
2393 		break;
2394 	default:
2395 		scmd_printk(KERN_ERR, scmd,
2396 			    "Unexpected Error Status %04X", status);
2397 		scmd->result = (DID_ERROR << 16);
2398 		break;
2399 	}
2400 	scmd->scsi_done(scmd);
2401 }
2402 
myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)2403 static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2404 {
2405 	if (!cmd_blk)
2406 		return;
2407 
2408 	if (cmd_blk->completion) {
2409 		complete(cmd_blk->completion);
2410 		cmd_blk->completion = NULL;
2411 	}
2412 }
2413 
myrb_monitor(struct work_struct *work)2414 static void myrb_monitor(struct work_struct *work)
2415 {
2416 	struct myrb_hba *cb = container_of(work,
2417 			struct myrb_hba, monitor_work.work);
2418 	struct Scsi_Host *shost = cb->host;
2419 	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2420 
2421 	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2422 
2423 	if (cb->new_ev_seq > cb->old_ev_seq) {
2424 		int event = cb->old_ev_seq;
2425 
2426 		dev_dbg(&shost->shost_gendev,
2427 			"get event log no %d/%d\n",
2428 			cb->new_ev_seq, event);
2429 		myrb_get_event(cb, event);
2430 		cb->old_ev_seq = event + 1;
2431 		interval = 10;
2432 	} else if (cb->need_err_info) {
2433 		cb->need_err_info = false;
2434 		dev_dbg(&shost->shost_gendev, "get error table\n");
2435 		myrb_get_errtable(cb);
2436 		interval = 10;
2437 	} else if (cb->need_rbld && cb->rbld_first) {
2438 		cb->need_rbld = false;
2439 		dev_dbg(&shost->shost_gendev,
2440 			"get rebuild progress\n");
2441 		myrb_update_rbld_progress(cb);
2442 		interval = 10;
2443 	} else if (cb->need_ldev_info) {
2444 		cb->need_ldev_info = false;
2445 		dev_dbg(&shost->shost_gendev,
2446 			"get logical drive info\n");
2447 		myrb_get_ldev_info(cb);
2448 		interval = 10;
2449 	} else if (cb->need_rbld) {
2450 		cb->need_rbld = false;
2451 		dev_dbg(&shost->shost_gendev,
2452 			"get rebuild progress\n");
2453 		myrb_update_rbld_progress(cb);
2454 		interval = 10;
2455 	} else if (cb->need_cc_status) {
2456 		cb->need_cc_status = false;
2457 		dev_dbg(&shost->shost_gendev,
2458 			"get consistency check progress\n");
2459 		myrb_get_cc_progress(cb);
2460 		interval = 10;
2461 	} else if (cb->need_bgi_status) {
2462 		cb->need_bgi_status = false;
2463 		dev_dbg(&shost->shost_gendev, "get background init status\n");
2464 		myrb_bgi_control(cb);
2465 		interval = 10;
2466 	} else {
2467 		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2468 		mutex_lock(&cb->dma_mutex);
2469 		myrb_hba_enquiry(cb);
2470 		mutex_unlock(&cb->dma_mutex);
2471 		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2472 		    cb->need_err_info || cb->need_rbld ||
2473 		    cb->need_ldev_info || cb->need_cc_status ||
2474 		    cb->need_bgi_status) {
2475 			dev_dbg(&shost->shost_gendev,
2476 				"reschedule monitor\n");
2477 			interval = 0;
2478 		}
2479 	}
2480 	if (interval > 1)
2481 		cb->primary_monitor_time = jiffies;
2482 	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2483 }
2484 
2485 /**
2486  * myrb_err_status - reports controller BIOS messages
2487  *
2488  * Controller BIOS messages are passed through the Error Status Register
2489  * when the driver performs the BIOS handshaking.
2490  *
2491  * Return: true for fatal errors and false otherwise.
2492  */
myrb_err_status(struct myrb_hba *cb, unsigned char error, unsigned char parm0, unsigned char parm1)2493 static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2494 		unsigned char parm0, unsigned char parm1)
2495 {
2496 	struct pci_dev *pdev = cb->pdev;
2497 
2498 	switch (error) {
2499 	case 0x00:
2500 		dev_info(&pdev->dev,
2501 			 "Physical Device %d:%d Not Responding\n",
2502 			 parm1, parm0);
2503 		break;
2504 	case 0x08:
2505 		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2506 		break;
2507 	case 0x30:
2508 		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2509 		break;
2510 	case 0x60:
2511 		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2512 		break;
2513 	case 0x70:
2514 		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2515 		break;
2516 	case 0x90:
2517 		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2518 			   parm1, parm0);
2519 		break;
2520 	case 0xA0:
2521 		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2522 		break;
2523 	case 0xB0:
2524 		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2525 		break;
2526 	case 0xD0:
2527 		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2528 		break;
2529 	case 0xF0:
2530 		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2531 		return true;
2532 	default:
2533 		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2534 			error);
2535 		return true;
2536 	}
2537 	return false;
2538 }
2539 
2540 /*
2541  * Hardware-specific functions
2542  */
2543 
2544 /*
2545  * DAC960 LA Series Controllers
2546  */
2547 
DAC960_LA_hw_mbox_new_cmd(void __iomem *base)2548 static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2549 {
2550 	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2551 }
2552 
DAC960_LA_ack_hw_mbox_status(void __iomem *base)2553 static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2554 {
2555 	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2556 }
2557 
DAC960_LA_gen_intr(void __iomem *base)2558 static inline void DAC960_LA_gen_intr(void __iomem *base)
2559 {
2560 	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2561 }
2562 
DAC960_LA_reset_ctrl(void __iomem *base)2563 static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2564 {
2565 	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2566 }
2567 
DAC960_LA_mem_mbox_new_cmd(void __iomem *base)2568 static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2569 {
2570 	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2571 }
2572 
DAC960_LA_hw_mbox_is_full(void __iomem *base)2573 static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2574 {
2575 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2576 
2577 	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2578 }
2579 
DAC960_LA_init_in_progress(void __iomem *base)2580 static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2581 {
2582 	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2583 
2584 	return !(idb & DAC960_LA_IDB_INIT_DONE);
2585 }
2586 
DAC960_LA_ack_hw_mbox_intr(void __iomem *base)2587 static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2588 {
2589 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2590 }
2591 
DAC960_LA_ack_mem_mbox_intr(void __iomem *base)2592 static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2593 {
2594 	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2595 }
2596 
DAC960_LA_ack_intr(void __iomem *base)2597 static inline void DAC960_LA_ack_intr(void __iomem *base)
2598 {
2599 	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2600 	       base + DAC960_LA_ODB_OFFSET);
2601 }
2602 
DAC960_LA_hw_mbox_status_available(void __iomem *base)2603 static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2604 {
2605 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2606 
2607 	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2608 }
2609 
DAC960_LA_mem_mbox_status_available(void __iomem *base)2610 static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2611 {
2612 	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2613 
2614 	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2615 }
2616 
DAC960_LA_enable_intr(void __iomem *base)2617 static inline void DAC960_LA_enable_intr(void __iomem *base)
2618 {
2619 	unsigned char odb = 0xFF;
2620 
2621 	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2622 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2623 }
2624 
DAC960_LA_disable_intr(void __iomem *base)2625 static inline void DAC960_LA_disable_intr(void __iomem *base)
2626 {
2627 	unsigned char odb = 0xFF;
2628 
2629 	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2630 	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2631 }
2632 
DAC960_LA_intr_enabled(void __iomem *base)2633 static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2634 {
2635 	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2636 
2637 	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2638 }
2639 
DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, union myrb_cmd_mbox *mbox)2640 static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2641 		union myrb_cmd_mbox *mbox)
2642 {
2643 	mem_mbox->words[1] = mbox->words[1];
2644 	mem_mbox->words[2] = mbox->words[2];
2645 	mem_mbox->words[3] = mbox->words[3];
2646 	/* Memory barrier to prevent reordering */
2647 	wmb();
2648 	mem_mbox->words[0] = mbox->words[0];
2649 	/* Memory barrier to force PCI access */
2650 	mb();
2651 }
2652 
DAC960_LA_write_hw_mbox(void __iomem *base, union myrb_cmd_mbox *mbox)2653 static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2654 		union myrb_cmd_mbox *mbox)
2655 {
2656 	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2657 	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2658 	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2659 	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2660 }
2661 
DAC960_LA_read_status_cmd_ident(void __iomem *base)2662 static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2663 {
2664 	return readb(base + DAC960_LA_STSID_OFFSET);
2665 }
2666 
DAC960_LA_read_status(void __iomem *base)2667 static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2668 {
2669 	return readw(base + DAC960_LA_STS_OFFSET);
2670 }
2671 
2672 static inline bool
DAC960_LA_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1)2673 DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2674 		unsigned char *param0, unsigned char *param1)
2675 {
2676 	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2677 
2678 	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2679 		return false;
2680 	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2681 
2682 	*error = errsts;
2683 	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2684 	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2685 	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2686 	return true;
2687 }
2688 
2689 static inline unsigned short
DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base, union myrb_cmd_mbox *mbox)2690 DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2691 		union myrb_cmd_mbox *mbox)
2692 {
2693 	unsigned short status;
2694 	int timeout = 0;
2695 
2696 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2697 		if (!DAC960_LA_hw_mbox_is_full(base))
2698 			break;
2699 		udelay(10);
2700 		timeout++;
2701 	}
2702 	if (DAC960_LA_hw_mbox_is_full(base)) {
2703 		dev_err(&pdev->dev,
2704 			"Timeout waiting for empty mailbox\n");
2705 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2706 	}
2707 	DAC960_LA_write_hw_mbox(base, mbox);
2708 	DAC960_LA_hw_mbox_new_cmd(base);
2709 	timeout = 0;
2710 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2711 		if (DAC960_LA_hw_mbox_status_available(base))
2712 			break;
2713 		udelay(10);
2714 		timeout++;
2715 	}
2716 	if (!DAC960_LA_hw_mbox_status_available(base)) {
2717 		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2718 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2719 	}
2720 	status = DAC960_LA_read_status(base);
2721 	DAC960_LA_ack_hw_mbox_intr(base);
2722 	DAC960_LA_ack_hw_mbox_status(base);
2723 
2724 	return status;
2725 }
2726 
DAC960_LA_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base)2727 static int DAC960_LA_hw_init(struct pci_dev *pdev,
2728 		struct myrb_hba *cb, void __iomem *base)
2729 {
2730 	int timeout = 0;
2731 	unsigned char error, parm0, parm1;
2732 
2733 	DAC960_LA_disable_intr(base);
2734 	DAC960_LA_ack_hw_mbox_status(base);
2735 	udelay(1000);
2736 	while (DAC960_LA_init_in_progress(base) &&
2737 	       timeout < MYRB_MAILBOX_TIMEOUT) {
2738 		if (DAC960_LA_read_error_status(base, &error,
2739 					      &parm0, &parm1) &&
2740 		    myrb_err_status(cb, error, parm0, parm1))
2741 			return -ENODEV;
2742 		udelay(10);
2743 		timeout++;
2744 	}
2745 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2746 		dev_err(&pdev->dev,
2747 			"Timeout waiting for Controller Initialisation\n");
2748 		return -ETIMEDOUT;
2749 	}
2750 	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2751 		dev_err(&pdev->dev,
2752 			"Unable to Enable Memory Mailbox Interface\n");
2753 		DAC960_LA_reset_ctrl(base);
2754 		return -ENODEV;
2755 	}
2756 	DAC960_LA_enable_intr(base);
2757 	cb->qcmd = myrb_qcmd;
2758 	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2759 	if (cb->dual_mode_interface)
2760 		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2761 	else
2762 		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2763 	cb->disable_intr = DAC960_LA_disable_intr;
2764 	cb->reset = DAC960_LA_reset_ctrl;
2765 
2766 	return 0;
2767 }
2768 
DAC960_LA_intr_handler(int irq, void *arg)2769 static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2770 {
2771 	struct myrb_hba *cb = arg;
2772 	void __iomem *base = cb->io_base;
2773 	struct myrb_stat_mbox *next_stat_mbox;
2774 	unsigned long flags;
2775 
2776 	spin_lock_irqsave(&cb->queue_lock, flags);
2777 	DAC960_LA_ack_intr(base);
2778 	next_stat_mbox = cb->next_stat_mbox;
2779 	while (next_stat_mbox->valid) {
2780 		unsigned char id = next_stat_mbox->id;
2781 		struct scsi_cmnd *scmd = NULL;
2782 		struct myrb_cmdblk *cmd_blk = NULL;
2783 
2784 		if (id == MYRB_DCMD_TAG)
2785 			cmd_blk = &cb->dcmd_blk;
2786 		else if (id == MYRB_MCMD_TAG)
2787 			cmd_blk = &cb->mcmd_blk;
2788 		else {
2789 			scmd = scsi_host_find_tag(cb->host, id - 3);
2790 			if (scmd)
2791 				cmd_blk = scsi_cmd_priv(scmd);
2792 		}
2793 		if (cmd_blk)
2794 			cmd_blk->status = next_stat_mbox->status;
2795 		else
2796 			dev_err(&cb->pdev->dev,
2797 				"Unhandled command completion %d\n", id);
2798 
2799 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2800 		if (++next_stat_mbox > cb->last_stat_mbox)
2801 			next_stat_mbox = cb->first_stat_mbox;
2802 
2803 		if (cmd_blk) {
2804 			if (id < 3)
2805 				myrb_handle_cmdblk(cb, cmd_blk);
2806 			else
2807 				myrb_handle_scsi(cb, cmd_blk, scmd);
2808 		}
2809 	}
2810 	cb->next_stat_mbox = next_stat_mbox;
2811 	spin_unlock_irqrestore(&cb->queue_lock, flags);
2812 	return IRQ_HANDLED;
2813 }
2814 
2815 struct myrb_privdata DAC960_LA_privdata = {
2816 	.hw_init =	DAC960_LA_hw_init,
2817 	.irq_handler =	DAC960_LA_intr_handler,
2818 	.mmio_size =	DAC960_LA_mmio_size,
2819 };
2820 
2821 /*
2822  * DAC960 PG Series Controllers
2823  */
DAC960_PG_hw_mbox_new_cmd(void __iomem *base)2824 static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2825 {
2826 	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2827 }
2828 
DAC960_PG_ack_hw_mbox_status(void __iomem *base)2829 static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2830 {
2831 	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2832 }
2833 
DAC960_PG_gen_intr(void __iomem *base)2834 static inline void DAC960_PG_gen_intr(void __iomem *base)
2835 {
2836 	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2837 }
2838 
DAC960_PG_reset_ctrl(void __iomem *base)2839 static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2840 {
2841 	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2842 }
2843 
DAC960_PG_mem_mbox_new_cmd(void __iomem *base)2844 static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2845 {
2846 	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2847 }
2848 
DAC960_PG_hw_mbox_is_full(void __iomem *base)2849 static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2850 {
2851 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2852 
2853 	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2854 }
2855 
DAC960_PG_init_in_progress(void __iomem *base)2856 static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2857 {
2858 	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2859 
2860 	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2861 }
2862 
DAC960_PG_ack_hw_mbox_intr(void __iomem *base)2863 static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2864 {
2865 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2866 }
2867 
DAC960_PG_ack_mem_mbox_intr(void __iomem *base)2868 static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2869 {
2870 	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2871 }
2872 
DAC960_PG_ack_intr(void __iomem *base)2873 static inline void DAC960_PG_ack_intr(void __iomem *base)
2874 {
2875 	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2876 	       base + DAC960_PG_ODB_OFFSET);
2877 }
2878 
DAC960_PG_hw_mbox_status_available(void __iomem *base)2879 static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2880 {
2881 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2882 
2883 	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2884 }
2885 
DAC960_PG_mem_mbox_status_available(void __iomem *base)2886 static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2887 {
2888 	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2889 
2890 	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2891 }
2892 
DAC960_PG_enable_intr(void __iomem *base)2893 static inline void DAC960_PG_enable_intr(void __iomem *base)
2894 {
2895 	unsigned int imask = (unsigned int)-1;
2896 
2897 	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2898 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2899 }
2900 
DAC960_PG_disable_intr(void __iomem *base)2901 static inline void DAC960_PG_disable_intr(void __iomem *base)
2902 {
2903 	unsigned int imask = (unsigned int)-1;
2904 
2905 	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2906 }
2907 
DAC960_PG_intr_enabled(void __iomem *base)2908 static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2909 {
2910 	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2911 
2912 	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2913 }
2914 
DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox, union myrb_cmd_mbox *mbox)2915 static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2916 		union myrb_cmd_mbox *mbox)
2917 {
2918 	mem_mbox->words[1] = mbox->words[1];
2919 	mem_mbox->words[2] = mbox->words[2];
2920 	mem_mbox->words[3] = mbox->words[3];
2921 	/* Memory barrier to prevent reordering */
2922 	wmb();
2923 	mem_mbox->words[0] = mbox->words[0];
2924 	/* Memory barrier to force PCI access */
2925 	mb();
2926 }
2927 
DAC960_PG_write_hw_mbox(void __iomem *base, union myrb_cmd_mbox *mbox)2928 static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2929 		union myrb_cmd_mbox *mbox)
2930 {
2931 	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2932 	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2933 	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2934 	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2935 }
2936 
2937 static inline unsigned char
DAC960_PG_read_status_cmd_ident(void __iomem *base)2938 DAC960_PG_read_status_cmd_ident(void __iomem *base)
2939 {
2940 	return readb(base + DAC960_PG_STSID_OFFSET);
2941 }
2942 
2943 static inline unsigned short
DAC960_PG_read_status(void __iomem *base)2944 DAC960_PG_read_status(void __iomem *base)
2945 {
2946 	return readw(base + DAC960_PG_STS_OFFSET);
2947 }
2948 
2949 static inline bool
DAC960_PG_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1)2950 DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2951 		unsigned char *param0, unsigned char *param1)
2952 {
2953 	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2954 
2955 	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2956 		return false;
2957 	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2958 	*error = errsts;
2959 	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2960 	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2961 	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2962 	return true;
2963 }
2964 
2965 static inline unsigned short
DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base, union myrb_cmd_mbox *mbox)2966 DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2967 		union myrb_cmd_mbox *mbox)
2968 {
2969 	unsigned short status;
2970 	int timeout = 0;
2971 
2972 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2973 		if (!DAC960_PG_hw_mbox_is_full(base))
2974 			break;
2975 		udelay(10);
2976 		timeout++;
2977 	}
2978 	if (DAC960_PG_hw_mbox_is_full(base)) {
2979 		dev_err(&pdev->dev,
2980 			"Timeout waiting for empty mailbox\n");
2981 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2982 	}
2983 	DAC960_PG_write_hw_mbox(base, mbox);
2984 	DAC960_PG_hw_mbox_new_cmd(base);
2985 
2986 	timeout = 0;
2987 	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2988 		if (DAC960_PG_hw_mbox_status_available(base))
2989 			break;
2990 		udelay(10);
2991 		timeout++;
2992 	}
2993 	if (!DAC960_PG_hw_mbox_status_available(base)) {
2994 		dev_err(&pdev->dev,
2995 			"Timeout waiting for mailbox status\n");
2996 		return MYRB_STATUS_SUBSYS_TIMEOUT;
2997 	}
2998 	status = DAC960_PG_read_status(base);
2999 	DAC960_PG_ack_hw_mbox_intr(base);
3000 	DAC960_PG_ack_hw_mbox_status(base);
3001 
3002 	return status;
3003 }
3004 
DAC960_PG_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base)3005 static int DAC960_PG_hw_init(struct pci_dev *pdev,
3006 		struct myrb_hba *cb, void __iomem *base)
3007 {
3008 	int timeout = 0;
3009 	unsigned char error, parm0, parm1;
3010 
3011 	DAC960_PG_disable_intr(base);
3012 	DAC960_PG_ack_hw_mbox_status(base);
3013 	udelay(1000);
3014 	while (DAC960_PG_init_in_progress(base) &&
3015 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3016 		if (DAC960_PG_read_error_status(base, &error,
3017 						&parm0, &parm1) &&
3018 		    myrb_err_status(cb, error, parm0, parm1))
3019 			return -EIO;
3020 		udelay(10);
3021 		timeout++;
3022 	}
3023 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3024 		dev_err(&pdev->dev,
3025 			"Timeout waiting for Controller Initialisation\n");
3026 		return -ETIMEDOUT;
3027 	}
3028 	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3029 		dev_err(&pdev->dev,
3030 			"Unable to Enable Memory Mailbox Interface\n");
3031 		DAC960_PG_reset_ctrl(base);
3032 		return -ENODEV;
3033 	}
3034 	DAC960_PG_enable_intr(base);
3035 	cb->qcmd = myrb_qcmd;
3036 	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3037 	if (cb->dual_mode_interface)
3038 		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3039 	else
3040 		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3041 	cb->disable_intr = DAC960_PG_disable_intr;
3042 	cb->reset = DAC960_PG_reset_ctrl;
3043 
3044 	return 0;
3045 }
3046 
DAC960_PG_intr_handler(int irq, void *arg)3047 static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3048 {
3049 	struct myrb_hba *cb = arg;
3050 	void __iomem *base = cb->io_base;
3051 	struct myrb_stat_mbox *next_stat_mbox;
3052 	unsigned long flags;
3053 
3054 	spin_lock_irqsave(&cb->queue_lock, flags);
3055 	DAC960_PG_ack_intr(base);
3056 	next_stat_mbox = cb->next_stat_mbox;
3057 	while (next_stat_mbox->valid) {
3058 		unsigned char id = next_stat_mbox->id;
3059 		struct scsi_cmnd *scmd = NULL;
3060 		struct myrb_cmdblk *cmd_blk = NULL;
3061 
3062 		if (id == MYRB_DCMD_TAG)
3063 			cmd_blk = &cb->dcmd_blk;
3064 		else if (id == MYRB_MCMD_TAG)
3065 			cmd_blk = &cb->mcmd_blk;
3066 		else {
3067 			scmd = scsi_host_find_tag(cb->host, id - 3);
3068 			if (scmd)
3069 				cmd_blk = scsi_cmd_priv(scmd);
3070 		}
3071 		if (cmd_blk)
3072 			cmd_blk->status = next_stat_mbox->status;
3073 		else
3074 			dev_err(&cb->pdev->dev,
3075 				"Unhandled command completion %d\n", id);
3076 
3077 		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3078 		if (++next_stat_mbox > cb->last_stat_mbox)
3079 			next_stat_mbox = cb->first_stat_mbox;
3080 
3081 		if (id < 3)
3082 			myrb_handle_cmdblk(cb, cmd_blk);
3083 		else
3084 			myrb_handle_scsi(cb, cmd_blk, scmd);
3085 	}
3086 	cb->next_stat_mbox = next_stat_mbox;
3087 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3088 	return IRQ_HANDLED;
3089 }
3090 
3091 struct myrb_privdata DAC960_PG_privdata = {
3092 	.hw_init =	DAC960_PG_hw_init,
3093 	.irq_handler =	DAC960_PG_intr_handler,
3094 	.mmio_size =	DAC960_PG_mmio_size,
3095 };
3096 
3097 
3098 /*
3099  * DAC960 PD Series Controllers
3100  */
3101 
DAC960_PD_hw_mbox_new_cmd(void __iomem *base)3102 static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3103 {
3104 	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3105 }
3106 
DAC960_PD_ack_hw_mbox_status(void __iomem *base)3107 static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3108 {
3109 	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3110 }
3111 
DAC960_PD_gen_intr(void __iomem *base)3112 static inline void DAC960_PD_gen_intr(void __iomem *base)
3113 {
3114 	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3115 }
3116 
DAC960_PD_reset_ctrl(void __iomem *base)3117 static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3118 {
3119 	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3120 }
3121 
DAC960_PD_hw_mbox_is_full(void __iomem *base)3122 static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3123 {
3124 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3125 
3126 	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3127 }
3128 
DAC960_PD_init_in_progress(void __iomem *base)3129 static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3130 {
3131 	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3132 
3133 	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3134 }
3135 
DAC960_PD_ack_intr(void __iomem *base)3136 static inline void DAC960_PD_ack_intr(void __iomem *base)
3137 {
3138 	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3139 }
3140 
DAC960_PD_hw_mbox_status_available(void __iomem *base)3141 static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3142 {
3143 	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3144 
3145 	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3146 }
3147 
DAC960_PD_enable_intr(void __iomem *base)3148 static inline void DAC960_PD_enable_intr(void __iomem *base)
3149 {
3150 	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3151 }
3152 
DAC960_PD_disable_intr(void __iomem *base)3153 static inline void DAC960_PD_disable_intr(void __iomem *base)
3154 {
3155 	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3156 }
3157 
DAC960_PD_intr_enabled(void __iomem *base)3158 static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3159 {
3160 	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3161 
3162 	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3163 }
3164 
DAC960_PD_write_cmd_mbox(void __iomem *base, union myrb_cmd_mbox *mbox)3165 static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3166 		union myrb_cmd_mbox *mbox)
3167 {
3168 	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3169 	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3170 	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3171 	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3172 }
3173 
3174 static inline unsigned char
DAC960_PD_read_status_cmd_ident(void __iomem *base)3175 DAC960_PD_read_status_cmd_ident(void __iomem *base)
3176 {
3177 	return readb(base + DAC960_PD_STSID_OFFSET);
3178 }
3179 
3180 static inline unsigned short
DAC960_PD_read_status(void __iomem *base)3181 DAC960_PD_read_status(void __iomem *base)
3182 {
3183 	return readw(base + DAC960_PD_STS_OFFSET);
3184 }
3185 
3186 static inline bool
DAC960_PD_read_error_status(void __iomem *base, unsigned char *error, unsigned char *param0, unsigned char *param1)3187 DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3188 		unsigned char *param0, unsigned char *param1)
3189 {
3190 	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3191 
3192 	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3193 		return false;
3194 	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3195 	*error = errsts;
3196 	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3197 	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3198 	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3199 	return true;
3200 }
3201 
DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)3202 static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3203 {
3204 	void __iomem *base = cb->io_base;
3205 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3206 
3207 	while (DAC960_PD_hw_mbox_is_full(base))
3208 		udelay(1);
3209 	DAC960_PD_write_cmd_mbox(base, mbox);
3210 	DAC960_PD_hw_mbox_new_cmd(base);
3211 }
3212 
DAC960_PD_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base)3213 static int DAC960_PD_hw_init(struct pci_dev *pdev,
3214 		struct myrb_hba *cb, void __iomem *base)
3215 {
3216 	int timeout = 0;
3217 	unsigned char error, parm0, parm1;
3218 
3219 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3220 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3221 			(unsigned long)cb->io_addr);
3222 		return -EBUSY;
3223 	}
3224 	DAC960_PD_disable_intr(base);
3225 	DAC960_PD_ack_hw_mbox_status(base);
3226 	udelay(1000);
3227 	while (DAC960_PD_init_in_progress(base) &&
3228 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3229 		if (DAC960_PD_read_error_status(base, &error,
3230 					      &parm0, &parm1) &&
3231 		    myrb_err_status(cb, error, parm0, parm1))
3232 			return -EIO;
3233 		udelay(10);
3234 		timeout++;
3235 	}
3236 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3237 		dev_err(&pdev->dev,
3238 			"Timeout waiting for Controller Initialisation\n");
3239 		return -ETIMEDOUT;
3240 	}
3241 	if (!myrb_enable_mmio(cb, NULL)) {
3242 		dev_err(&pdev->dev,
3243 			"Unable to Enable Memory Mailbox Interface\n");
3244 		DAC960_PD_reset_ctrl(base);
3245 		return -ENODEV;
3246 	}
3247 	DAC960_PD_enable_intr(base);
3248 	cb->qcmd = DAC960_PD_qcmd;
3249 	cb->disable_intr = DAC960_PD_disable_intr;
3250 	cb->reset = DAC960_PD_reset_ctrl;
3251 
3252 	return 0;
3253 }
3254 
DAC960_PD_intr_handler(int irq, void *arg)3255 static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3256 {
3257 	struct myrb_hba *cb = arg;
3258 	void __iomem *base = cb->io_base;
3259 	unsigned long flags;
3260 
3261 	spin_lock_irqsave(&cb->queue_lock, flags);
3262 	while (DAC960_PD_hw_mbox_status_available(base)) {
3263 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3264 		struct scsi_cmnd *scmd = NULL;
3265 		struct myrb_cmdblk *cmd_blk = NULL;
3266 
3267 		if (id == MYRB_DCMD_TAG)
3268 			cmd_blk = &cb->dcmd_blk;
3269 		else if (id == MYRB_MCMD_TAG)
3270 			cmd_blk = &cb->mcmd_blk;
3271 		else {
3272 			scmd = scsi_host_find_tag(cb->host, id - 3);
3273 			if (scmd)
3274 				cmd_blk = scsi_cmd_priv(scmd);
3275 		}
3276 		if (cmd_blk)
3277 			cmd_blk->status = DAC960_PD_read_status(base);
3278 		else
3279 			dev_err(&cb->pdev->dev,
3280 				"Unhandled command completion %d\n", id);
3281 
3282 		DAC960_PD_ack_intr(base);
3283 		DAC960_PD_ack_hw_mbox_status(base);
3284 
3285 		if (id < 3)
3286 			myrb_handle_cmdblk(cb, cmd_blk);
3287 		else
3288 			myrb_handle_scsi(cb, cmd_blk, scmd);
3289 	}
3290 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3291 	return IRQ_HANDLED;
3292 }
3293 
3294 struct myrb_privdata DAC960_PD_privdata = {
3295 	.hw_init =	DAC960_PD_hw_init,
3296 	.irq_handler =	DAC960_PD_intr_handler,
3297 	.mmio_size =	DAC960_PD_mmio_size,
3298 };
3299 
3300 
3301 /*
3302  * DAC960 P Series Controllers
3303  *
3304  * Similar to the DAC960 PD Series Controllers, but some commands have
3305  * to be translated.
3306  */
3307 
myrb_translate_enquiry(void *enq)3308 static inline void myrb_translate_enquiry(void *enq)
3309 {
3310 	memcpy(enq + 132, enq + 36, 64);
3311 	memset(enq + 36, 0, 96);
3312 }
3313 
myrb_translate_devstate(void *state)3314 static inline void myrb_translate_devstate(void *state)
3315 {
3316 	memcpy(state + 2, state + 3, 1);
3317 	memmove(state + 4, state + 5, 2);
3318 	memmove(state + 6, state + 8, 4);
3319 }
3320 
myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)3321 static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3322 {
3323 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3324 	int ldev_num = mbox->type5.ld.ldev_num;
3325 
3326 	mbox->bytes[3] &= 0x7;
3327 	mbox->bytes[3] |= mbox->bytes[7] << 6;
3328 	mbox->bytes[7] = ldev_num;
3329 }
3330 
myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)3331 static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3332 {
3333 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3334 	int ldev_num = mbox->bytes[7];
3335 
3336 	mbox->bytes[7] = mbox->bytes[3] >> 6;
3337 	mbox->bytes[3] &= 0x7;
3338 	mbox->bytes[3] |= ldev_num << 3;
3339 }
3340 
DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)3341 static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3342 {
3343 	void __iomem *base = cb->io_base;
3344 	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3345 
3346 	switch (mbox->common.opcode) {
3347 	case MYRB_CMD_ENQUIRY:
3348 		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3349 		break;
3350 	case MYRB_CMD_GET_DEVICE_STATE:
3351 		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3352 		break;
3353 	case MYRB_CMD_READ:
3354 		mbox->common.opcode = MYRB_CMD_READ_OLD;
3355 		myrb_translate_to_rw_command(cmd_blk);
3356 		break;
3357 	case MYRB_CMD_WRITE:
3358 		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3359 		myrb_translate_to_rw_command(cmd_blk);
3360 		break;
3361 	case MYRB_CMD_READ_SG:
3362 		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3363 		myrb_translate_to_rw_command(cmd_blk);
3364 		break;
3365 	case MYRB_CMD_WRITE_SG:
3366 		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3367 		myrb_translate_to_rw_command(cmd_blk);
3368 		break;
3369 	default:
3370 		break;
3371 	}
3372 	while (DAC960_PD_hw_mbox_is_full(base))
3373 		udelay(1);
3374 	DAC960_PD_write_cmd_mbox(base, mbox);
3375 	DAC960_PD_hw_mbox_new_cmd(base);
3376 }
3377 
3378 
DAC960_P_hw_init(struct pci_dev *pdev, struct myrb_hba *cb, void __iomem *base)3379 static int DAC960_P_hw_init(struct pci_dev *pdev,
3380 		struct myrb_hba *cb, void __iomem *base)
3381 {
3382 	int timeout = 0;
3383 	unsigned char error, parm0, parm1;
3384 
3385 	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3386 		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3387 			(unsigned long)cb->io_addr);
3388 		return -EBUSY;
3389 	}
3390 	DAC960_PD_disable_intr(base);
3391 	DAC960_PD_ack_hw_mbox_status(base);
3392 	udelay(1000);
3393 	while (DAC960_PD_init_in_progress(base) &&
3394 	       timeout < MYRB_MAILBOX_TIMEOUT) {
3395 		if (DAC960_PD_read_error_status(base, &error,
3396 						&parm0, &parm1) &&
3397 		    myrb_err_status(cb, error, parm0, parm1))
3398 			return -EAGAIN;
3399 		udelay(10);
3400 		timeout++;
3401 	}
3402 	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3403 		dev_err(&pdev->dev,
3404 			"Timeout waiting for Controller Initialisation\n");
3405 		return -ETIMEDOUT;
3406 	}
3407 	if (!myrb_enable_mmio(cb, NULL)) {
3408 		dev_err(&pdev->dev,
3409 			"Unable to allocate DMA mapped memory\n");
3410 		DAC960_PD_reset_ctrl(base);
3411 		return -ETIMEDOUT;
3412 	}
3413 	DAC960_PD_enable_intr(base);
3414 	cb->qcmd = DAC960_P_qcmd;
3415 	cb->disable_intr = DAC960_PD_disable_intr;
3416 	cb->reset = DAC960_PD_reset_ctrl;
3417 
3418 	return 0;
3419 }
3420 
DAC960_P_intr_handler(int irq, void *arg)3421 static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3422 {
3423 	struct myrb_hba *cb = arg;
3424 	void __iomem *base = cb->io_base;
3425 	unsigned long flags;
3426 
3427 	spin_lock_irqsave(&cb->queue_lock, flags);
3428 	while (DAC960_PD_hw_mbox_status_available(base)) {
3429 		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3430 		struct scsi_cmnd *scmd = NULL;
3431 		struct myrb_cmdblk *cmd_blk = NULL;
3432 		union myrb_cmd_mbox *mbox;
3433 		enum myrb_cmd_opcode op;
3434 
3435 
3436 		if (id == MYRB_DCMD_TAG)
3437 			cmd_blk = &cb->dcmd_blk;
3438 		else if (id == MYRB_MCMD_TAG)
3439 			cmd_blk = &cb->mcmd_blk;
3440 		else {
3441 			scmd = scsi_host_find_tag(cb->host, id - 3);
3442 			if (scmd)
3443 				cmd_blk = scsi_cmd_priv(scmd);
3444 		}
3445 		if (cmd_blk)
3446 			cmd_blk->status = DAC960_PD_read_status(base);
3447 		else
3448 			dev_err(&cb->pdev->dev,
3449 				"Unhandled command completion %d\n", id);
3450 
3451 		DAC960_PD_ack_intr(base);
3452 		DAC960_PD_ack_hw_mbox_status(base);
3453 
3454 		if (!cmd_blk)
3455 			continue;
3456 
3457 		mbox = &cmd_blk->mbox;
3458 		op = mbox->common.opcode;
3459 		switch (op) {
3460 		case MYRB_CMD_ENQUIRY_OLD:
3461 			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3462 			myrb_translate_enquiry(cb->enquiry);
3463 			break;
3464 		case MYRB_CMD_READ_OLD:
3465 			mbox->common.opcode = MYRB_CMD_READ;
3466 			myrb_translate_from_rw_command(cmd_blk);
3467 			break;
3468 		case MYRB_CMD_WRITE_OLD:
3469 			mbox->common.opcode = MYRB_CMD_WRITE;
3470 			myrb_translate_from_rw_command(cmd_blk);
3471 			break;
3472 		case MYRB_CMD_READ_SG_OLD:
3473 			mbox->common.opcode = MYRB_CMD_READ_SG;
3474 			myrb_translate_from_rw_command(cmd_blk);
3475 			break;
3476 		case MYRB_CMD_WRITE_SG_OLD:
3477 			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3478 			myrb_translate_from_rw_command(cmd_blk);
3479 			break;
3480 		default:
3481 			break;
3482 		}
3483 		if (id < 3)
3484 			myrb_handle_cmdblk(cb, cmd_blk);
3485 		else
3486 			myrb_handle_scsi(cb, cmd_blk, scmd);
3487 	}
3488 	spin_unlock_irqrestore(&cb->queue_lock, flags);
3489 	return IRQ_HANDLED;
3490 }
3491 
3492 struct myrb_privdata DAC960_P_privdata = {
3493 	.hw_init =	DAC960_P_hw_init,
3494 	.irq_handler =	DAC960_P_intr_handler,
3495 	.mmio_size =	DAC960_PD_mmio_size,
3496 };
3497 
myrb_detect(struct pci_dev *pdev, const struct pci_device_id *entry)3498 static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3499 		const struct pci_device_id *entry)
3500 {
3501 	struct myrb_privdata *privdata =
3502 		(struct myrb_privdata *)entry->driver_data;
3503 	irq_handler_t irq_handler = privdata->irq_handler;
3504 	unsigned int mmio_size = privdata->mmio_size;
3505 	struct Scsi_Host *shost;
3506 	struct myrb_hba *cb = NULL;
3507 
3508 	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3509 	if (!shost) {
3510 		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3511 		return NULL;
3512 	}
3513 	shost->max_cmd_len = 12;
3514 	shost->max_lun = 256;
3515 	cb = shost_priv(shost);
3516 	mutex_init(&cb->dcmd_mutex);
3517 	mutex_init(&cb->dma_mutex);
3518 	cb->pdev = pdev;
3519 	cb->host = shost;
3520 
3521 	if (pci_enable_device(pdev)) {
3522 		dev_err(&pdev->dev, "Failed to enable PCI device\n");
3523 		scsi_host_put(shost);
3524 		return NULL;
3525 	}
3526 
3527 	if (privdata->hw_init == DAC960_PD_hw_init ||
3528 	    privdata->hw_init == DAC960_P_hw_init) {
3529 		cb->io_addr = pci_resource_start(pdev, 0);
3530 		cb->pci_addr = pci_resource_start(pdev, 1);
3531 	} else
3532 		cb->pci_addr = pci_resource_start(pdev, 0);
3533 
3534 	pci_set_drvdata(pdev, cb);
3535 	spin_lock_init(&cb->queue_lock);
3536 	if (mmio_size < PAGE_SIZE)
3537 		mmio_size = PAGE_SIZE;
3538 	cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size);
3539 	if (cb->mmio_base == NULL) {
3540 		dev_err(&pdev->dev,
3541 			"Unable to map Controller Register Window\n");
3542 		goto failure;
3543 	}
3544 
3545 	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3546 	if (privdata->hw_init(pdev, cb, cb->io_base))
3547 		goto failure;
3548 
3549 	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3550 		dev_err(&pdev->dev,
3551 			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3552 		goto failure;
3553 	}
3554 	cb->irq = pdev->irq;
3555 	return cb;
3556 
3557 failure:
3558 	dev_err(&pdev->dev,
3559 		"Failed to initialize Controller\n");
3560 	myrb_cleanup(cb);
3561 	return NULL;
3562 }
3563 
myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)3564 static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3565 {
3566 	struct myrb_hba *cb;
3567 	int ret;
3568 
3569 	cb = myrb_detect(dev, entry);
3570 	if (!cb)
3571 		return -ENODEV;
3572 
3573 	ret = myrb_get_hba_config(cb);
3574 	if (ret < 0) {
3575 		myrb_cleanup(cb);
3576 		return ret;
3577 	}
3578 
3579 	if (!myrb_create_mempools(dev, cb)) {
3580 		ret = -ENOMEM;
3581 		goto failed;
3582 	}
3583 
3584 	ret = scsi_add_host(cb->host, &dev->dev);
3585 	if (ret) {
3586 		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3587 		myrb_destroy_mempools(cb);
3588 		goto failed;
3589 	}
3590 	scsi_scan_host(cb->host);
3591 	return 0;
3592 failed:
3593 	myrb_cleanup(cb);
3594 	return ret;
3595 }
3596 
3597 
myrb_remove(struct pci_dev *pdev)3598 static void myrb_remove(struct pci_dev *pdev)
3599 {
3600 	struct myrb_hba *cb = pci_get_drvdata(pdev);
3601 
3602 	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3603 	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3604 	myrb_cleanup(cb);
3605 	myrb_destroy_mempools(cb);
3606 }
3607 
3608 
3609 static const struct pci_device_id myrb_id_table[] = {
3610 	{
3611 		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3612 			       PCI_DEVICE_ID_DEC_21285,
3613 			       PCI_VENDOR_ID_MYLEX,
3614 			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3615 		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3616 	},
3617 	{
3618 		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3619 	},
3620 	{
3621 		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3622 	},
3623 	{
3624 		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3625 	},
3626 	{0, },
3627 };
3628 
3629 MODULE_DEVICE_TABLE(pci, myrb_id_table);
3630 
3631 static struct pci_driver myrb_pci_driver = {
3632 	.name		= "myrb",
3633 	.id_table	= myrb_id_table,
3634 	.probe		= myrb_probe,
3635 	.remove		= myrb_remove,
3636 };
3637 
myrb_init_module(void)3638 static int __init myrb_init_module(void)
3639 {
3640 	int ret;
3641 
3642 	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3643 	if (!myrb_raid_template)
3644 		return -ENODEV;
3645 
3646 	ret = pci_register_driver(&myrb_pci_driver);
3647 	if (ret)
3648 		raid_class_release(myrb_raid_template);
3649 
3650 	return ret;
3651 }
3652 
myrb_cleanup_module(void)3653 static void __exit myrb_cleanup_module(void)
3654 {
3655 	pci_unregister_driver(&myrb_pci_driver);
3656 	raid_class_release(myrb_raid_template);
3657 }
3658 
3659 module_init(myrb_init_module);
3660 module_exit(myrb_cleanup_module);
3661 
3662 MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3663 MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3664 MODULE_LICENSE("GPL");
3665