1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * CXL Flash Device Driver
4 *
5 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
6 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
7 *
8 * Copyright (C) 2015 IBM Corporation
9 */
10
11#include <linux/delay.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15
16#include <asm/unaligned.h>
17
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20#include <uapi/scsi/cxlflash_ioctl.h>
21
22#include "main.h"
23#include "sislite.h"
24#include "common.h"
25
26MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
27MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
28MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
29MODULE_LICENSE("GPL");
30
31static struct class *cxlflash_class;
32static u32 cxlflash_major;
33static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
34
35/**
36 * process_cmd_err() - command error handler
37 * @cmd:	AFU command that experienced the error.
38 * @scp:	SCSI command associated with the AFU command in error.
39 *
40 * Translates error bits from AFU command to SCSI command results.
41 */
42static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
43{
44	struct afu *afu = cmd->parent;
45	struct cxlflash_cfg *cfg = afu->parent;
46	struct device *dev = &cfg->dev->dev;
47	struct sisl_ioasa *ioasa;
48	u32 resid;
49
50	ioasa = &(cmd->sa);
51
52	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
53		resid = ioasa->resid;
54		scsi_set_resid(scp, resid);
55		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
56			__func__, cmd, scp, resid);
57	}
58
59	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
60		dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n",
61			__func__, cmd, scp);
62		scp->result = (DID_ERROR << 16);
63	}
64
65	dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x "
66		"afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n", __func__,
67		ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc,
68		ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra);
69
70	if (ioasa->rc.scsi_rc) {
71		/* We have a SCSI status */
72		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
73			memcpy(scp->sense_buffer, ioasa->sense_data,
74			       SISL_SENSE_DATA_LEN);
75			scp->result = ioasa->rc.scsi_rc;
76		} else
77			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
78	}
79
80	/*
81	 * We encountered an error. Set scp->result based on nature
82	 * of error.
83	 */
84	if (ioasa->rc.fc_rc) {
85		/* We have an FC status */
86		switch (ioasa->rc.fc_rc) {
87		case SISL_FC_RC_LINKDOWN:
88			scp->result = (DID_REQUEUE << 16);
89			break;
90		case SISL_FC_RC_RESID:
91			/* This indicates an FCP resid underrun */
92			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
93				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
94				 * then we will handle this error else where.
95				 * If not then we must handle it here.
96				 * This is probably an AFU bug.
97				 */
98				scp->result = (DID_ERROR << 16);
99			}
100			break;
101		case SISL_FC_RC_RESIDERR:
102			/* Resid mismatch between adapter and device */
103		case SISL_FC_RC_TGTABORT:
104		case SISL_FC_RC_ABORTOK:
105		case SISL_FC_RC_ABORTFAIL:
106		case SISL_FC_RC_NOLOGI:
107		case SISL_FC_RC_ABORTPEND:
108		case SISL_FC_RC_WRABORTPEND:
109		case SISL_FC_RC_NOEXP:
110		case SISL_FC_RC_INUSE:
111			scp->result = (DID_ERROR << 16);
112			break;
113		}
114	}
115
116	if (ioasa->rc.afu_rc) {
117		/* We have an AFU error */
118		switch (ioasa->rc.afu_rc) {
119		case SISL_AFU_RC_NO_CHANNELS:
120			scp->result = (DID_NO_CONNECT << 16);
121			break;
122		case SISL_AFU_RC_DATA_DMA_ERR:
123			switch (ioasa->afu_extra) {
124			case SISL_AFU_DMA_ERR_PAGE_IN:
125				/* Retry */
126				scp->result = (DID_IMM_RETRY << 16);
127				break;
128			case SISL_AFU_DMA_ERR_INVALID_EA:
129			default:
130				scp->result = (DID_ERROR << 16);
131			}
132			break;
133		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
134			/* Retry */
135			scp->result = (DID_ERROR << 16);
136			break;
137		default:
138			scp->result = (DID_ERROR << 16);
139		}
140	}
141}
142
143/**
144 * cmd_complete() - command completion handler
145 * @cmd:	AFU command that has completed.
146 *
147 * For SCSI commands this routine prepares and submits commands that have
148 * either completed or timed out to the SCSI stack. For internal commands
149 * (TMF or AFU), this routine simply notifies the originator that the
150 * command has completed.
151 */
152static void cmd_complete(struct afu_cmd *cmd)
153{
154	struct scsi_cmnd *scp;
155	ulong lock_flags;
156	struct afu *afu = cmd->parent;
157	struct cxlflash_cfg *cfg = afu->parent;
158	struct device *dev = &cfg->dev->dev;
159	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
160
161	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
162	list_del(&cmd->list);
163	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
164
165	if (cmd->scp) {
166		scp = cmd->scp;
167		if (unlikely(cmd->sa.ioasc))
168			process_cmd_err(cmd, scp);
169		else
170			scp->result = (DID_OK << 16);
171
172		dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n",
173				    __func__, scp, scp->result, cmd->sa.ioasc);
174		scsi_done(scp);
175	} else if (cmd->cmd_tmf) {
176		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
177		cfg->tmf_active = false;
178		wake_up_all_locked(&cfg->tmf_waitq);
179		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
180	} else
181		complete(&cmd->cevent);
182}
183
184/**
185 * flush_pending_cmds() - flush all pending commands on this hardware queue
186 * @hwq:	Hardware queue to flush.
187 *
188 * The hardware send queue lock associated with this hardware queue must be
189 * held when calling this routine.
190 */
191static void flush_pending_cmds(struct hwq *hwq)
192{
193	struct cxlflash_cfg *cfg = hwq->afu->parent;
194	struct afu_cmd *cmd, *tmp;
195	struct scsi_cmnd *scp;
196	ulong lock_flags;
197
198	list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) {
199		/* Bypass command when on a doneq, cmd_complete() will handle */
200		if (!list_empty(&cmd->queue))
201			continue;
202
203		list_del(&cmd->list);
204
205		if (cmd->scp) {
206			scp = cmd->scp;
207			scp->result = (DID_IMM_RETRY << 16);
208			scsi_done(scp);
209		} else {
210			cmd->cmd_aborted = true;
211
212			if (cmd->cmd_tmf) {
213				spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
214				cfg->tmf_active = false;
215				wake_up_all_locked(&cfg->tmf_waitq);
216				spin_unlock_irqrestore(&cfg->tmf_slock,
217						       lock_flags);
218			} else
219				complete(&cmd->cevent);
220		}
221	}
222}
223
224/**
225 * context_reset() - reset context via specified register
226 * @hwq:	Hardware queue owning the context to be reset.
227 * @reset_reg:	MMIO register to perform reset.
228 *
229 * When the reset is successful, the SISLite specification guarantees that
230 * the AFU has aborted all currently pending I/O. Accordingly, these commands
231 * must be flushed.
232 *
233 * Return: 0 on success, -errno on failure
234 */
235static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg)
236{
237	struct cxlflash_cfg *cfg = hwq->afu->parent;
238	struct device *dev = &cfg->dev->dev;
239	int rc = -ETIMEDOUT;
240	int nretry = 0;
241	u64 val = 0x1;
242	ulong lock_flags;
243
244	dev_dbg(dev, "%s: hwq=%p\n", __func__, hwq);
245
246	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
247
248	writeq_be(val, reset_reg);
249	do {
250		val = readq_be(reset_reg);
251		if ((val & 0x1) == 0x0) {
252			rc = 0;
253			break;
254		}
255
256		/* Double delay each time */
257		udelay(1 << nretry);
258	} while (nretry++ < MC_ROOM_RETRY_CNT);
259
260	if (!rc)
261		flush_pending_cmds(hwq);
262
263	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
264
265	dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n",
266		__func__, rc, val, nretry);
267	return rc;
268}
269
270/**
271 * context_reset_ioarrin() - reset context via IOARRIN register
272 * @hwq:	Hardware queue owning the context to be reset.
273 *
274 * Return: 0 on success, -errno on failure
275 */
276static int context_reset_ioarrin(struct hwq *hwq)
277{
278	return context_reset(hwq, &hwq->host_map->ioarrin);
279}
280
281/**
282 * context_reset_sq() - reset context via SQ_CONTEXT_RESET register
283 * @hwq:	Hardware queue owning the context to be reset.
284 *
285 * Return: 0 on success, -errno on failure
286 */
287static int context_reset_sq(struct hwq *hwq)
288{
289	return context_reset(hwq, &hwq->host_map->sq_ctx_reset);
290}
291
292/**
293 * send_cmd_ioarrin() - sends an AFU command via IOARRIN register
294 * @afu:	AFU associated with the host.
295 * @cmd:	AFU command to send.
296 *
297 * Return:
298 *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
299 */
300static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd)
301{
302	struct cxlflash_cfg *cfg = afu->parent;
303	struct device *dev = &cfg->dev->dev;
304	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
305	int rc = 0;
306	s64 room;
307	ulong lock_flags;
308
309	/*
310	 * To avoid the performance penalty of MMIO, spread the update of
311	 * 'room' over multiple commands.
312	 */
313	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
314	if (--hwq->room < 0) {
315		room = readq_be(&hwq->host_map->cmd_room);
316		if (room <= 0) {
317			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
318					    "0x%02X, room=0x%016llX\n",
319					    __func__, cmd->rcb.cdb[0], room);
320			hwq->room = 0;
321			rc = SCSI_MLQUEUE_HOST_BUSY;
322			goto out;
323		}
324		hwq->room = room - 1;
325	}
326
327	list_add(&cmd->list, &hwq->pending_cmds);
328	writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin);
329out:
330	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
331	dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n",
332		__func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc);
333	return rc;
334}
335
336/**
337 * send_cmd_sq() - sends an AFU command via SQ ring
338 * @afu:	AFU associated with the host.
339 * @cmd:	AFU command to send.
340 *
341 * Return:
342 *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
343 */
344static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd)
345{
346	struct cxlflash_cfg *cfg = afu->parent;
347	struct device *dev = &cfg->dev->dev;
348	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
349	int rc = 0;
350	int newval;
351	ulong lock_flags;
352
353	newval = atomic_dec_if_positive(&hwq->hsq_credits);
354	if (newval <= 0) {
355		rc = SCSI_MLQUEUE_HOST_BUSY;
356		goto out;
357	}
358
359	cmd->rcb.ioasa = &cmd->sa;
360
361	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
362
363	*hwq->hsq_curr = cmd->rcb;
364	if (hwq->hsq_curr < hwq->hsq_end)
365		hwq->hsq_curr++;
366	else
367		hwq->hsq_curr = hwq->hsq_start;
368
369	list_add(&cmd->list, &hwq->pending_cmds);
370	writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail);
371
372	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
373out:
374	dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p "
375	       "head=%016llx tail=%016llx\n", __func__, cmd, cmd->rcb.data_len,
376	       cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr,
377	       readq_be(&hwq->host_map->sq_head),
378	       readq_be(&hwq->host_map->sq_tail));
379	return rc;
380}
381
382/**
383 * wait_resp() - polls for a response or timeout to a sent AFU command
384 * @afu:	AFU associated with the host.
385 * @cmd:	AFU command that was sent.
386 *
387 * Return: 0 on success, -errno on failure
388 */
389static int wait_resp(struct afu *afu, struct afu_cmd *cmd)
390{
391	struct cxlflash_cfg *cfg = afu->parent;
392	struct device *dev = &cfg->dev->dev;
393	int rc = 0;
394	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);
395
396	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
397	if (!timeout)
398		rc = -ETIMEDOUT;
399
400	if (cmd->cmd_aborted)
401		rc = -EAGAIN;
402
403	if (unlikely(cmd->sa.ioasc != 0)) {
404		dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n",
405			__func__, cmd->rcb.cdb[0], cmd->sa.ioasc);
406		rc = -EIO;
407	}
408
409	return rc;
410}
411
412/**
413 * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command
414 * @host:	SCSI host associated with device.
415 * @scp:	SCSI command to send.
416 * @afu:	SCSI command to send.
417 *
418 * Hashes a command based upon the hardware queue mode.
419 *
420 * Return: Trusted index of target hardware queue
421 */
422static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp,
423			     struct afu *afu)
424{
425	u32 tag;
426	u32 hwq = 0;
427
428	if (afu->num_hwqs == 1)
429		return 0;
430
431	switch (afu->hwq_mode) {
432	case HWQ_MODE_RR:
433		hwq = afu->hwq_rr_count++ % afu->num_hwqs;
434		break;
435	case HWQ_MODE_TAG:
436		tag = blk_mq_unique_tag(scsi_cmd_to_rq(scp));
437		hwq = blk_mq_unique_tag_to_hwq(tag);
438		break;
439	case HWQ_MODE_CPU:
440		hwq = smp_processor_id() % afu->num_hwqs;
441		break;
442	default:
443		WARN_ON_ONCE(1);
444	}
445
446	return hwq;
447}
448
449/**
450 * send_tmf() - sends a Task Management Function (TMF)
451 * @cfg:	Internal structure associated with the host.
452 * @sdev:	SCSI device destined for TMF.
453 * @tmfcmd:	TMF command to send.
454 *
455 * Return:
456 *	0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure
457 */
458static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev,
459		    u64 tmfcmd)
460{
461	struct afu *afu = cfg->afu;
462	struct afu_cmd *cmd = NULL;
463	struct device *dev = &cfg->dev->dev;
464	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
465	bool needs_deletion = false;
466	char *buf = NULL;
467	ulong lock_flags;
468	int rc = 0;
469	ulong to;
470
471	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
472	if (unlikely(!buf)) {
473		dev_err(dev, "%s: no memory for command\n", __func__);
474		rc = -ENOMEM;
475		goto out;
476	}
477
478	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
479	INIT_LIST_HEAD(&cmd->queue);
480
481	/* When Task Management Function is active do not send another */
482	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
483	if (cfg->tmf_active)
484		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
485						  !cfg->tmf_active,
486						  cfg->tmf_slock);
487	cfg->tmf_active = true;
488	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
489
490	cmd->parent = afu;
491	cmd->cmd_tmf = true;
492	cmd->hwq_index = hwq->index;
493
494	cmd->rcb.ctx_id = hwq->ctx_hndl;
495	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
496	cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel);
497	cmd->rcb.lun_id = lun_to_lunid(sdev->lun);
498	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
499			      SISL_REQ_FLAGS_SUP_UNDERRUN |
500			      SISL_REQ_FLAGS_TMF_CMD);
501	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));
502
503	rc = afu->send_cmd(afu, cmd);
504	if (unlikely(rc)) {
505		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
506		cfg->tmf_active = false;
507		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
508		goto out;
509	}
510
511	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
512	to = msecs_to_jiffies(5000);
513	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
514						       !cfg->tmf_active,
515						       cfg->tmf_slock,
516						       to);
517	if (!to) {
518		dev_err(dev, "%s: TMF timed out\n", __func__);
519		rc = -ETIMEDOUT;
520		needs_deletion = true;
521	} else if (cmd->cmd_aborted) {
522		dev_err(dev, "%s: TMF aborted\n", __func__);
523		rc = -EAGAIN;
524	} else if (cmd->sa.ioasc) {
525		dev_err(dev, "%s: TMF failed ioasc=%08x\n",
526			__func__, cmd->sa.ioasc);
527		rc = -EIO;
528	}
529	cfg->tmf_active = false;
530	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
531
532	if (needs_deletion) {
533		spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
534		list_del(&cmd->list);
535		spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
536	}
537out:
538	kfree(buf);
539	return rc;
540}
541
542/**
543 * cxlflash_driver_info() - information handler for this host driver
544 * @host:	SCSI host associated with device.
545 *
546 * Return: A string describing the device.
547 */
548static const char *cxlflash_driver_info(struct Scsi_Host *host)
549{
550	return CXLFLASH_ADAPTER_NAME;
551}
552
553/**
554 * cxlflash_queuecommand() - sends a mid-layer request
555 * @host:	SCSI host associated with device.
556 * @scp:	SCSI command to send.
557 *
558 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
559 */
560static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
561{
562	struct cxlflash_cfg *cfg = shost_priv(host);
563	struct afu *afu = cfg->afu;
564	struct device *dev = &cfg->dev->dev;
565	struct afu_cmd *cmd = sc_to_afuci(scp);
566	struct scatterlist *sg = scsi_sglist(scp);
567	int hwq_index = cmd_to_target_hwq(host, scp, afu);
568	struct hwq *hwq = get_hwq(afu, hwq_index);
569	u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN;
570	ulong lock_flags;
571	int rc = 0;
572
573	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
574			    "cdb=(%08x-%08x-%08x-%08x)\n",
575			    __func__, scp, host->host_no, scp->device->channel,
576			    scp->device->id, scp->device->lun,
577			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
578			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
579			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
580			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
581
582	/*
583	 * If a Task Management Function is active, wait for it to complete
584	 * before continuing with regular commands.
585	 */
586	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
587	if (cfg->tmf_active) {
588		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
589		rc = SCSI_MLQUEUE_HOST_BUSY;
590		goto out;
591	}
592	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
593
594	switch (cfg->state) {
595	case STATE_PROBING:
596	case STATE_PROBED:
597	case STATE_RESET:
598		dev_dbg_ratelimited(dev, "%s: device is in reset\n", __func__);
599		rc = SCSI_MLQUEUE_HOST_BUSY;
600		goto out;
601	case STATE_FAILTERM:
602		dev_dbg_ratelimited(dev, "%s: device has failed\n", __func__);
603		scp->result = (DID_NO_CONNECT << 16);
604		scsi_done(scp);
605		rc = 0;
606		goto out;
607	default:
608		atomic_inc(&afu->cmds_active);
609		break;
610	}
611
612	if (likely(sg)) {
613		cmd->rcb.data_len = sg->length;
614		cmd->rcb.data_ea = (uintptr_t)sg_virt(sg);
615	}
616
617	cmd->scp = scp;
618	cmd->parent = afu;
619	cmd->hwq_index = hwq_index;
620
621	cmd->sa.ioasc = 0;
622	cmd->rcb.ctx_id = hwq->ctx_hndl;
623	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
624	cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel);
625	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);
626
627	if (scp->sc_data_direction == DMA_TO_DEVICE)
628		req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
629
630	cmd->rcb.req_flags = req_flags;
631	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));
632
633	rc = afu->send_cmd(afu, cmd);
634	atomic_dec(&afu->cmds_active);
635out:
636	return rc;
637}
638
639/**
640 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
641 * @cfg:	Internal structure associated with the host.
642 */
643static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
644{
645	struct pci_dev *pdev = cfg->dev;
646
647	if (pci_channel_offline(pdev))
648		wait_event_timeout(cfg->reset_waitq,
649				   !pci_channel_offline(pdev),
650				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
651}
652
653/**
654 * free_mem() - free memory associated with the AFU
655 * @cfg:	Internal structure associated with the host.
656 */
657static void free_mem(struct cxlflash_cfg *cfg)
658{
659	struct afu *afu = cfg->afu;
660
661	if (cfg->afu) {
662		free_pages((ulong)afu, get_order(sizeof(struct afu)));
663		cfg->afu = NULL;
664	}
665}
666
667/**
668 * cxlflash_reset_sync() - synchronizing point for asynchronous resets
669 * @cfg:	Internal structure associated with the host.
670 */
671static void cxlflash_reset_sync(struct cxlflash_cfg *cfg)
672{
673	if (cfg->async_reset_cookie == 0)
674		return;
675
676	/* Wait until all async calls prior to this cookie have completed */
677	async_synchronize_cookie(cfg->async_reset_cookie + 1);
678	cfg->async_reset_cookie = 0;
679}
680
681/**
682 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
683 * @cfg:	Internal structure associated with the host.
684 *
685 * Safe to call with AFU in a partially allocated/initialized state.
686 *
687 * Cancels scheduled worker threads, waits for any active internal AFU
688 * commands to timeout, disables IRQ polling and then unmaps the MMIO space.
689 */
690static void stop_afu(struct cxlflash_cfg *cfg)
691{
692	struct afu *afu = cfg->afu;
693	struct hwq *hwq;
694	int i;
695
696	cancel_work_sync(&cfg->work_q);
697	if (!current_is_async())
698		cxlflash_reset_sync(cfg);
699
700	if (likely(afu)) {
701		while (atomic_read(&afu->cmds_active))
702			ssleep(1);
703
704		if (afu_is_irqpoll_enabled(afu)) {
705			for (i = 0; i < afu->num_hwqs; i++) {
706				hwq = get_hwq(afu, i);
707
708				irq_poll_disable(&hwq->irqpoll);
709			}
710		}
711
712		if (likely(afu->afu_map)) {
713			cfg->ops->psa_unmap(afu->afu_map);
714			afu->afu_map = NULL;
715		}
716	}
717}
718
719/**
720 * term_intr() - disables all AFU interrupts
721 * @cfg:	Internal structure associated with the host.
722 * @level:	Depth of allocation, where to begin waterfall tear down.
723 * @index:	Index of the hardware queue.
724 *
725 * Safe to call with AFU/MC in partially allocated/initialized state.
726 */
727static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level,
728		      u32 index)
729{
730	struct afu *afu = cfg->afu;
731	struct device *dev = &cfg->dev->dev;
732	struct hwq *hwq;
733
734	if (!afu) {
735		dev_err(dev, "%s: returning with NULL afu\n", __func__);
736		return;
737	}
738
739	hwq = get_hwq(afu, index);
740
741	if (!hwq->ctx_cookie) {
742		dev_err(dev, "%s: returning with NULL MC\n", __func__);
743		return;
744	}
745
746	switch (level) {
747	case UNMAP_THREE:
748		/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
749		if (index == PRIMARY_HWQ)
750			cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq);
751		fallthrough;
752	case UNMAP_TWO:
753		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq);
754		fallthrough;
755	case UNMAP_ONE:
756		cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq);
757		fallthrough;
758	case FREE_IRQ:
759		cfg->ops->free_afu_irqs(hwq->ctx_cookie);
760		fallthrough;
761	case UNDO_NOOP:
762		/* No action required */
763		break;
764	}
765}
766
767/**
768 * term_mc() - terminates the master context
769 * @cfg:	Internal structure associated with the host.
770 * @index:	Index of the hardware queue.
771 *
772 * Safe to call with AFU/MC in partially allocated/initialized state.
773 */
774static void term_mc(struct cxlflash_cfg *cfg, u32 index)
775{
776	struct afu *afu = cfg->afu;
777	struct device *dev = &cfg->dev->dev;
778	struct hwq *hwq;
779	ulong lock_flags;
780
781	if (!afu) {
782		dev_err(dev, "%s: returning with NULL afu\n", __func__);
783		return;
784	}
785
786	hwq = get_hwq(afu, index);
787
788	if (!hwq->ctx_cookie) {
789		dev_err(dev, "%s: returning with NULL MC\n", __func__);
790		return;
791	}
792
793	WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie));
794	if (index != PRIMARY_HWQ)
795		WARN_ON(cfg->ops->release_context(hwq->ctx_cookie));
796	hwq->ctx_cookie = NULL;
797
798	spin_lock_irqsave(&hwq->hrrq_slock, lock_flags);
799	hwq->hrrq_online = false;
800	spin_unlock_irqrestore(&hwq->hrrq_slock, lock_flags);
801
802	spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
803	flush_pending_cmds(hwq);
804	spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
805}
806
807/**
808 * term_afu() - terminates the AFU
809 * @cfg:	Internal structure associated with the host.
810 *
811 * Safe to call with AFU/MC in partially allocated/initialized state.
812 */
813static void term_afu(struct cxlflash_cfg *cfg)
814{
815	struct device *dev = &cfg->dev->dev;
816	int k;
817
818	/*
819	 * Tear down is carefully orchestrated to ensure
820	 * no interrupts can come in when the problem state
821	 * area is unmapped.
822	 *
823	 * 1) Disable all AFU interrupts for each master
824	 * 2) Unmap the problem state area
825	 * 3) Stop each master context
826	 */
827	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
828		term_intr(cfg, UNMAP_THREE, k);
829
830	stop_afu(cfg);
831
832	for (k = cfg->afu->num_hwqs - 1; k >= 0; k--)
833		term_mc(cfg, k);
834
835	dev_dbg(dev, "%s: returning\n", __func__);
836}
837
838/**
839 * notify_shutdown() - notifies device of pending shutdown
840 * @cfg:	Internal structure associated with the host.
841 * @wait:	Whether to wait for shutdown processing to complete.
842 *
843 * This function will notify the AFU that the adapter is being shutdown
844 * and will wait for shutdown processing to complete if wait is true.
845 * This notification should flush pending I/Os to the device and halt
846 * further I/Os until the next AFU reset is issued and device restarted.
847 */
848static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
849{
850	struct afu *afu = cfg->afu;
851	struct device *dev = &cfg->dev->dev;
852	struct dev_dependent_vals *ddv;
853	__be64 __iomem *fc_port_regs;
854	u64 reg, status;
855	int i, retry_cnt = 0;
856
857	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
858	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
859		return;
860
861	if (!afu || !afu->afu_map) {
862		dev_dbg(dev, "%s: Problem state area not mapped\n", __func__);
863		return;
864	}
865
866	/* Notify AFU */
867	for (i = 0; i < cfg->num_fc_ports; i++) {
868		fc_port_regs = get_fc_port_regs(cfg, i);
869
870		reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
871		reg |= SISL_FC_SHUTDOWN_NORMAL;
872		writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
873	}
874
875	if (!wait)
876		return;
877
878	/* Wait up to 1.5 seconds for shutdown processing to complete */
879	for (i = 0; i < cfg->num_fc_ports; i++) {
880		fc_port_regs = get_fc_port_regs(cfg, i);
881		retry_cnt = 0;
882
883		while (true) {
884			status = readq_be(&fc_port_regs[FC_STATUS / 8]);
885			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
886				break;
887			if (++retry_cnt >= MC_RETRY_CNT) {
888				dev_dbg(dev, "%s: port %d shutdown processing "
889					"not yet completed\n", __func__, i);
890				break;
891			}
892			msleep(100 * retry_cnt);
893		}
894	}
895}
896
897/**
898 * cxlflash_get_minor() - gets the first available minor number
899 *
900 * Return: Unique minor number that can be used to create the character device.
901 */
902static int cxlflash_get_minor(void)
903{
904	int minor;
905	long bit;
906
907	bit = find_first_zero_bit(cxlflash_minor, CXLFLASH_MAX_ADAPTERS);
908	if (bit >= CXLFLASH_MAX_ADAPTERS)
909		return -1;
910
911	minor = bit & MINORMASK;
912	set_bit(minor, cxlflash_minor);
913	return minor;
914}
915
916/**
917 * cxlflash_put_minor() - releases the minor number
918 * @minor:	Minor number that is no longer needed.
919 */
920static void cxlflash_put_minor(int minor)
921{
922	clear_bit(minor, cxlflash_minor);
923}
924
925/**
926 * cxlflash_release_chrdev() - release the character device for the host
927 * @cfg:	Internal structure associated with the host.
928 */
929static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg)
930{
931	device_unregister(cfg->chardev);
932	cfg->chardev = NULL;
933	cdev_del(&cfg->cdev);
934	cxlflash_put_minor(MINOR(cfg->cdev.dev));
935}
936
937/**
938 * cxlflash_remove() - PCI entry point to tear down host
939 * @pdev:	PCI device associated with the host.
940 *
941 * Safe to use as a cleanup in partially allocated/initialized state. Note that
942 * the reset_waitq is flushed as part of the stop/termination of user contexts.
943 */
944static void cxlflash_remove(struct pci_dev *pdev)
945{
946	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
947	struct device *dev = &pdev->dev;
948	ulong lock_flags;
949
950	if (!pci_is_enabled(pdev)) {
951		dev_dbg(dev, "%s: Device is disabled\n", __func__);
952		return;
953	}
954
955	/* Yield to running recovery threads before continuing with remove */
956	wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
957				     cfg->state != STATE_PROBING);
958	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
959	if (cfg->tmf_active)
960		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
961						  !cfg->tmf_active,
962						  cfg->tmf_slock);
963	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
964
965	/* Notify AFU and wait for shutdown processing to complete */
966	notify_shutdown(cfg, true);
967
968	cfg->state = STATE_FAILTERM;
969	cxlflash_stop_term_user_contexts(cfg);
970
971	switch (cfg->init_state) {
972	case INIT_STATE_CDEV:
973		cxlflash_release_chrdev(cfg);
974		fallthrough;
975	case INIT_STATE_SCSI:
976		cxlflash_term_local_luns(cfg);
977		scsi_remove_host(cfg->host);
978		fallthrough;
979	case INIT_STATE_AFU:
980		term_afu(cfg);
981		fallthrough;
982	case INIT_STATE_PCI:
983		cfg->ops->destroy_afu(cfg->afu_cookie);
984		pci_disable_device(pdev);
985		fallthrough;
986	case INIT_STATE_NONE:
987		free_mem(cfg);
988		scsi_host_put(cfg->host);
989		break;
990	}
991
992	dev_dbg(dev, "%s: returning\n", __func__);
993}
994
995/**
996 * alloc_mem() - allocates the AFU and its command pool
997 * @cfg:	Internal structure associated with the host.
998 *
999 * A partially allocated state remains on failure.
1000 *
1001 * Return:
1002 *	0 on success
1003 *	-ENOMEM on failure to allocate memory
1004 */
1005static int alloc_mem(struct cxlflash_cfg *cfg)
1006{
1007	int rc = 0;
1008	struct device *dev = &cfg->dev->dev;
1009
1010	/* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */
1011	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1012					    get_order(sizeof(struct afu)));
1013	if (unlikely(!cfg->afu)) {
1014		dev_err(dev, "%s: cannot get %d free pages\n",
1015			__func__, get_order(sizeof(struct afu)));
1016		rc = -ENOMEM;
1017		goto out;
1018	}
1019	cfg->afu->parent = cfg;
1020	cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS;
1021	cfg->afu->afu_map = NULL;
1022out:
1023	return rc;
1024}
1025
1026/**
1027 * init_pci() - initializes the host as a PCI device
1028 * @cfg:	Internal structure associated with the host.
1029 *
1030 * Return: 0 on success, -errno on failure
1031 */
1032static int init_pci(struct cxlflash_cfg *cfg)
1033{
1034	struct pci_dev *pdev = cfg->dev;
1035	struct device *dev = &cfg->dev->dev;
1036	int rc = 0;
1037
1038	rc = pci_enable_device(pdev);
1039	if (rc || pci_channel_offline(pdev)) {
1040		if (pci_channel_offline(pdev)) {
1041			cxlflash_wait_for_pci_err_recovery(cfg);
1042			rc = pci_enable_device(pdev);
1043		}
1044
1045		if (rc) {
1046			dev_err(dev, "%s: Cannot enable adapter\n", __func__);
1047			cxlflash_wait_for_pci_err_recovery(cfg);
1048			goto out;
1049		}
1050	}
1051
1052out:
1053	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1054	return rc;
1055}
1056
1057/**
1058 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
1059 * @cfg:	Internal structure associated with the host.
1060 *
1061 * Return: 0 on success, -errno on failure
1062 */
1063static int init_scsi(struct cxlflash_cfg *cfg)
1064{
1065	struct pci_dev *pdev = cfg->dev;
1066	struct device *dev = &cfg->dev->dev;
1067	int rc = 0;
1068
1069	rc = scsi_add_host(cfg->host, &pdev->dev);
1070	if (rc) {
1071		dev_err(dev, "%s: scsi_add_host failed rc=%d\n", __func__, rc);
1072		goto out;
1073	}
1074
1075	scsi_scan_host(cfg->host);
1076
1077out:
1078	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1079	return rc;
1080}
1081
1082/**
1083 * set_port_online() - transitions the specified host FC port to online state
1084 * @fc_regs:	Top of MMIO region defined for specified port.
1085 *
1086 * The provided MMIO region must be mapped prior to call. Online state means
1087 * that the FC link layer has synced, completed the handshaking process, and
1088 * is ready for login to start.
1089 */
1090static void set_port_online(__be64 __iomem *fc_regs)
1091{
1092	u64 cmdcfg;
1093
1094	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1095	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
1096	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
1097	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1098}
1099
1100/**
1101 * set_port_offline() - transitions the specified host FC port to offline state
1102 * @fc_regs:	Top of MMIO region defined for specified port.
1103 *
1104 * The provided MMIO region must be mapped prior to call.
1105 */
1106static void set_port_offline(__be64 __iomem *fc_regs)
1107{
1108	u64 cmdcfg;
1109
1110	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
1111	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
1112	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
1113	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
1114}
1115
1116/**
1117 * wait_port_online() - waits for the specified host FC port come online
1118 * @fc_regs:	Top of MMIO region defined for specified port.
1119 * @delay_us:	Number of microseconds to delay between reading port status.
1120 * @nretry:	Number of cycles to retry reading port status.
1121 *
1122 * The provided MMIO region must be mapped prior to call. This will timeout
1123 * when the cable is not plugged in.
1124 *
1125 * Return:
1126 *	TRUE (1) when the specified port is online
1127 *	FALSE (0) when the specified port fails to come online after timeout
1128 */
1129static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1130{
1131	u64 status;
1132
1133	WARN_ON(delay_us < 1000);
1134
1135	do {
1136		msleep(delay_us / 1000);
1137		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1138		if (status == U64_MAX)
1139			nretry /= 2;
1140	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
1141		 nretry--);
1142
1143	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
1144}
1145
1146/**
1147 * wait_port_offline() - waits for the specified host FC port go offline
1148 * @fc_regs:	Top of MMIO region defined for specified port.
1149 * @delay_us:	Number of microseconds to delay between reading port status.
1150 * @nretry:	Number of cycles to retry reading port status.
1151 *
1152 * The provided MMIO region must be mapped prior to call.
1153 *
1154 * Return:
1155 *	TRUE (1) when the specified port is offline
1156 *	FALSE (0) when the specified port fails to go offline after timeout
1157 */
1158static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
1159{
1160	u64 status;
1161
1162	WARN_ON(delay_us < 1000);
1163
1164	do {
1165		msleep(delay_us / 1000);
1166		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
1167		if (status == U64_MAX)
1168			nretry /= 2;
1169	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
1170		 nretry--);
1171
1172	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
1173}
1174
1175/**
1176 * afu_set_wwpn() - configures the WWPN for the specified host FC port
1177 * @afu:	AFU associated with the host that owns the specified FC port.
1178 * @port:	Port number being configured.
1179 * @fc_regs:	Top of MMIO region defined for specified port.
1180 * @wwpn:	The world-wide-port-number previously discovered for port.
1181 *
1182 * The provided MMIO region must be mapped prior to call. As part of the
1183 * sequence to configure the WWPN, the port is toggled offline and then back
1184 * online. This toggling action can cause this routine to delay up to a few
1185 * seconds. When configured to use the internal LUN feature of the AFU, a
1186 * failure to come online is overridden.
1187 */
1188static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
1189			 u64 wwpn)
1190{
1191	struct cxlflash_cfg *cfg = afu->parent;
1192	struct device *dev = &cfg->dev->dev;
1193
1194	set_port_offline(fc_regs);
1195	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1196			       FC_PORT_STATUS_RETRY_CNT)) {
1197		dev_dbg(dev, "%s: wait on port %d to go offline timed out\n",
1198			__func__, port);
1199	}
1200
1201	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1202
1203	set_port_online(fc_regs);
1204	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1205			      FC_PORT_STATUS_RETRY_CNT)) {
1206		dev_dbg(dev, "%s: wait on port %d to go online timed out\n",
1207			__func__, port);
1208	}
1209}
1210
1211/**
1212 * afu_link_reset() - resets the specified host FC port
1213 * @afu:	AFU associated with the host that owns the specified FC port.
1214 * @port:	Port number being configured.
1215 * @fc_regs:	Top of MMIO region defined for specified port.
1216 *
1217 * The provided MMIO region must be mapped prior to call. The sequence to
1218 * reset the port involves toggling it offline and then back online. This
1219 * action can cause this routine to delay up to a few seconds. An effort
1220 * is made to maintain link with the device by switching to host to use
1221 * the alternate port exclusively while the reset takes place.
1222 * failure to come online is overridden.
1223 */
1224static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1225{
1226	struct cxlflash_cfg *cfg = afu->parent;
1227	struct device *dev = &cfg->dev->dev;
1228	u64 port_sel;
1229
1230	/* first switch the AFU to the other links, if any */
1231	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1232	port_sel &= ~(1ULL << port);
1233	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1234	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1235
1236	set_port_offline(fc_regs);
1237	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1238			       FC_PORT_STATUS_RETRY_CNT))
1239		dev_err(dev, "%s: wait on port %d to go offline timed out\n",
1240			__func__, port);
1241
1242	set_port_online(fc_regs);
1243	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
1244			      FC_PORT_STATUS_RETRY_CNT))
1245		dev_err(dev, "%s: wait on port %d to go online timed out\n",
1246			__func__, port);
1247
1248	/* switch back to include this port */
1249	port_sel |= (1ULL << port);
1250	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
1251	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);
1252
1253	dev_dbg(dev, "%s: returning port_sel=%016llx\n", __func__, port_sel);
1254}
1255
1256/**
1257 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
1258 * @afu:	AFU associated with the host.
1259 */
1260static void afu_err_intr_init(struct afu *afu)
1261{
1262	struct cxlflash_cfg *cfg = afu->parent;
1263	__be64 __iomem *fc_port_regs;
1264	int i;
1265	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
1266	u64 reg;
1267
1268	/* global async interrupts: AFU clears afu_ctrl on context exit
1269	 * if async interrupts were sent to that context. This prevents
1270	 * the AFU form sending further async interrupts when
1271	 * there is
1272	 * nobody to receive them.
1273	 */
1274
1275	/* mask all */
1276	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
1277	/* set LISN# to send and point to primary master context */
1278	reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);
1279
1280	if (afu->internal_lun)
1281		reg |= 1;	/* Bit 63 indicates local lun */
1282	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
1283	/* clear all */
1284	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1285	/* unmask bits that are of interest */
1286	/* note: afu can send an interrupt after this step */
1287	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
1288	/* clear again in case a bit came on after previous clear but before */
1289	/* unmask */
1290	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
1291
1292	/* Clear/Set internal lun bits */
1293	fc_port_regs = get_fc_port_regs(cfg, 0);
1294	reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]);
1295	reg &= SISL_FC_INTERNAL_MASK;
1296	if (afu->internal_lun)
1297		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
1298	writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]);
1299
1300	/* now clear FC errors */
1301	for (i = 0; i < cfg->num_fc_ports; i++) {
1302		fc_port_regs = get_fc_port_regs(cfg, i);
1303
1304		writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]);
1305		writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1306	}
1307
1308	/* sync interrupts for master's IOARRIN write */
1309	/* note that unlike asyncs, there can be no pending sync interrupts */
1310	/* at this time (this is a fresh context and master has not written */
1311	/* IOARRIN yet), so there is nothing to clear. */
1312
1313	/* set LISN#, it is always sent to the context that wrote IOARRIN */
1314	for (i = 0; i < afu->num_hwqs; i++) {
1315		hwq = get_hwq(afu, i);
1316
1317		reg = readq_be(&hwq->host_map->ctx_ctrl);
1318		WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0);
1319		reg |= SISL_MSI_SYNC_ERROR;
1320		writeq_be(reg, &hwq->host_map->ctx_ctrl);
1321		writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask);
1322	}
1323}
1324
1325/**
1326 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
1327 * @irq:	Interrupt number.
1328 * @data:	Private data provided at interrupt registration, the AFU.
1329 *
1330 * Return: Always return IRQ_HANDLED.
1331 */
1332static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
1333{
1334	struct hwq *hwq = (struct hwq *)data;
1335	struct cxlflash_cfg *cfg = hwq->afu->parent;
1336	struct device *dev = &cfg->dev->dev;
1337	u64 reg;
1338	u64 reg_unmasked;
1339
1340	reg = readq_be(&hwq->host_map->intr_status);
1341	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);
1342
1343	if (reg_unmasked == 0UL) {
1344		dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n",
1345			__func__, reg);
1346		goto cxlflash_sync_err_irq_exit;
1347	}
1348
1349	dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n",
1350		__func__, reg);
1351
1352	writeq_be(reg_unmasked, &hwq->host_map->intr_clear);
1353
1354cxlflash_sync_err_irq_exit:
1355	return IRQ_HANDLED;
1356}
1357
1358/**
1359 * process_hrrq() - process the read-response queue
1360 * @hwq:	HWQ associated with the host.
1361 * @doneq:	Queue of commands harvested from the RRQ.
1362 * @budget:	Threshold of RRQ entries to process.
1363 *
1364 * This routine must be called holding the disabled RRQ spin lock.
1365 *
1366 * Return: The number of entries processed.
1367 */
1368static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget)
1369{
1370	struct afu *afu = hwq->afu;
1371	struct afu_cmd *cmd;
1372	struct sisl_ioasa *ioasa;
1373	struct sisl_ioarcb *ioarcb;
1374	bool toggle = hwq->toggle;
1375	int num_hrrq = 0;
1376	u64 entry,
1377	    *hrrq_start = hwq->hrrq_start,
1378	    *hrrq_end = hwq->hrrq_end,
1379	    *hrrq_curr = hwq->hrrq_curr;
1380
1381	/* Process ready RRQ entries up to the specified budget (if any) */
1382	while (true) {
1383		entry = *hrrq_curr;
1384
1385		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
1386			break;
1387
1388		entry &= ~SISL_RESP_HANDLE_T_BIT;
1389
1390		if (afu_is_sq_cmd_mode(afu)) {
1391			ioasa = (struct sisl_ioasa *)entry;
1392			cmd = container_of(ioasa, struct afu_cmd, sa);
1393		} else {
1394			ioarcb = (struct sisl_ioarcb *)entry;
1395			cmd = container_of(ioarcb, struct afu_cmd, rcb);
1396		}
1397
1398		list_add_tail(&cmd->queue, doneq);
1399
1400		/* Advance to next entry or wrap and flip the toggle bit */
1401		if (hrrq_curr < hrrq_end)
1402			hrrq_curr++;
1403		else {
1404			hrrq_curr = hrrq_start;
1405			toggle ^= SISL_RESP_HANDLE_T_BIT;
1406		}
1407
1408		atomic_inc(&hwq->hsq_credits);
1409		num_hrrq++;
1410
1411		if (budget > 0 && num_hrrq >= budget)
1412			break;
1413	}
1414
1415	hwq->hrrq_curr = hrrq_curr;
1416	hwq->toggle = toggle;
1417
1418	return num_hrrq;
1419}
1420
1421/**
1422 * process_cmd_doneq() - process a queue of harvested RRQ commands
1423 * @doneq:	Queue of completed commands.
1424 *
1425 * Note that upon return the queue can no longer be trusted.
1426 */
1427static void process_cmd_doneq(struct list_head *doneq)
1428{
1429	struct afu_cmd *cmd, *tmp;
1430
1431	WARN_ON(list_empty(doneq));
1432
1433	list_for_each_entry_safe(cmd, tmp, doneq, queue)
1434		cmd_complete(cmd);
1435}
1436
1437/**
1438 * cxlflash_irqpoll() - process a queue of harvested RRQ commands
1439 * @irqpoll:	IRQ poll structure associated with queue to poll.
1440 * @budget:	Threshold of RRQ entries to process per poll.
1441 *
1442 * Return: The number of entries processed.
1443 */
1444static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget)
1445{
1446	struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll);
1447	unsigned long hrrq_flags;
1448	LIST_HEAD(doneq);
1449	int num_entries = 0;
1450
1451	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1452
1453	num_entries = process_hrrq(hwq, &doneq, budget);
1454	if (num_entries < budget)
1455		irq_poll_complete(irqpoll);
1456
1457	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1458
1459	process_cmd_doneq(&doneq);
1460	return num_entries;
1461}
1462
1463/**
1464 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
1465 * @irq:	Interrupt number.
1466 * @data:	Private data provided at interrupt registration, the AFU.
1467 *
1468 * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
1469 */
1470static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
1471{
1472	struct hwq *hwq = (struct hwq *)data;
1473	struct afu *afu = hwq->afu;
1474	unsigned long hrrq_flags;
1475	LIST_HEAD(doneq);
1476	int num_entries = 0;
1477
1478	spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags);
1479
1480	/* Silently drop spurious interrupts when queue is not online */
1481	if (!hwq->hrrq_online) {
1482		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1483		return IRQ_HANDLED;
1484	}
1485
1486	if (afu_is_irqpoll_enabled(afu)) {
1487		irq_poll_sched(&hwq->irqpoll);
1488		spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1489		return IRQ_HANDLED;
1490	}
1491
1492	num_entries = process_hrrq(hwq, &doneq, -1);
1493	spin_unlock_irqrestore(&hwq->hrrq_slock, hrrq_flags);
1494
1495	if (num_entries == 0)
1496		return IRQ_NONE;
1497
1498	process_cmd_doneq(&doneq);
1499	return IRQ_HANDLED;
1500}
1501
1502/*
1503 * Asynchronous interrupt information table
1504 *
1505 * NOTE:
1506 *	- Order matters here as this array is indexed by bit position.
1507 *
1508 *	- The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro
1509 *	  as complex and complains due to a lack of parentheses/braces.
1510 */
1511#define ASTATUS_FC(_a, _b, _c, _d)					 \
1512	{ SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) }
1513
1514#define BUILD_SISL_ASTATUS_FC_PORT(_a)					 \
1515	ASTATUS_FC(_a, LINK_UP, "link up", 0),				 \
1516	ASTATUS_FC(_a, LINK_DN, "link down", 0),			 \
1517	ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST),		 \
1518	ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR),		 \
1519	ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \
1520	ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET),	 \
1521	ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0),		 \
1522	ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET)
1523
1524static const struct asyc_intr_info ainfo[] = {
1525	BUILD_SISL_ASTATUS_FC_PORT(1),
1526	BUILD_SISL_ASTATUS_FC_PORT(0),
1527	BUILD_SISL_ASTATUS_FC_PORT(3),
1528	BUILD_SISL_ASTATUS_FC_PORT(2)
1529};
1530
1531/**
1532 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
1533 * @irq:	Interrupt number.
1534 * @data:	Private data provided at interrupt registration, the AFU.
1535 *
1536 * Return: Always return IRQ_HANDLED.
1537 */
1538static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
1539{
1540	struct hwq *hwq = (struct hwq *)data;
1541	struct afu *afu = hwq->afu;
1542	struct cxlflash_cfg *cfg = afu->parent;
1543	struct device *dev = &cfg->dev->dev;
1544	const struct asyc_intr_info *info;
1545	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1546	__be64 __iomem *fc_port_regs;
1547	u64 reg_unmasked;
1548	u64 reg;
1549	u64 bit;
1550	u8 port;
1551
1552	reg = readq_be(&global->regs.aintr_status);
1553	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);
1554
1555	if (unlikely(reg_unmasked == 0)) {
1556		dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n",
1557			__func__, reg);
1558		goto out;
1559	}
1560
1561	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1562	writeq_be(reg_unmasked, &global->regs.aintr_clear);
1563
1564	/* Check each bit that is on */
1565	for_each_set_bit(bit, (ulong *)&reg_unmasked, BITS_PER_LONG) {
1566		if (unlikely(bit >= ARRAY_SIZE(ainfo))) {
1567			WARN_ON_ONCE(1);
1568			continue;
1569		}
1570
1571		info = &ainfo[bit];
1572		if (unlikely(info->status != 1ULL << bit)) {
1573			WARN_ON_ONCE(1);
1574			continue;
1575		}
1576
1577		port = info->port;
1578		fc_port_regs = get_fc_port_regs(cfg, port);
1579
1580		dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n",
1581			__func__, port, info->desc,
1582		       readq_be(&fc_port_regs[FC_STATUS / 8]));
1583
1584		/*
1585		 * Do link reset first, some OTHER errors will set FC_ERROR
1586		 * again if cleared before or w/o a reset
1587		 */
1588		if (info->action & LINK_RESET) {
1589			dev_err(dev, "%s: FC Port %d: resetting link\n",
1590				__func__, port);
1591			cfg->lr_state = LINK_RESET_REQUIRED;
1592			cfg->lr_port = port;
1593			schedule_work(&cfg->work_q);
1594		}
1595
1596		if (info->action & CLR_FC_ERROR) {
1597			reg = readq_be(&fc_port_regs[FC_ERROR / 8]);
1598
1599			/*
1600			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1601			 * should be the same and tracing one is sufficient.
1602			 */
1603
1604			dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n",
1605				__func__, port, reg);
1606
1607			writeq_be(reg, &fc_port_regs[FC_ERROR / 8]);
1608			writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]);
1609		}
1610
1611		if (info->action & SCAN_HOST) {
1612			atomic_inc(&cfg->scan_host_needed);
1613			schedule_work(&cfg->work_q);
1614		}
1615	}
1616
1617out:
1618	return IRQ_HANDLED;
1619}
1620
1621/**
1622 * read_vpd() - obtains the WWPNs from VPD
1623 * @cfg:	Internal structure associated with the host.
1624 * @wwpn:	Array of size MAX_FC_PORTS to pass back WWPNs
1625 *
1626 * Return: 0 on success, -errno on failure
1627 */
1628static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
1629{
1630	struct device *dev = &cfg->dev->dev;
1631	struct pci_dev *pdev = cfg->dev;
1632	int i, k, rc = 0;
1633	unsigned int kw_size;
1634	ssize_t vpd_size;
1635	char vpd_data[CXLFLASH_VPD_LEN];
1636	char tmp_buf[WWPN_BUF_LEN] = { 0 };
1637	const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *)
1638						cfg->dev_id->driver_data;
1639	const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED;
1640	const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5", "V6", "V7", "V8" };
1641
1642	/* Get the VPD data from the device */
1643	vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data));
1644	if (unlikely(vpd_size <= 0)) {
1645		dev_err(dev, "%s: Unable to read VPD (size = %ld)\n",
1646			__func__, vpd_size);
1647		rc = -ENODEV;
1648		goto out;
1649	}
1650
1651	/*
1652	 * Find the offset of the WWPN tag within the read only
1653	 * VPD data and validate the found field (partials are
1654	 * no good to us). Convert the ASCII data to an integer
1655	 * value. Note that we must copy to a temporary buffer
1656	 * because the conversion service requires that the ASCII
1657	 * string be terminated.
1658	 *
1659	 * Allow for WWPN not being found for all devices, setting
1660	 * the returned WWPN to zero when not found. Notify with a
1661	 * log error for cards that should have had WWPN keywords
1662	 * in the VPD - cards requiring WWPN will not have their
1663	 * ports programmed and operate in an undefined state.
1664	 */
1665	for (k = 0; k < cfg->num_fc_ports; k++) {
1666		i = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size,
1667						 wwpn_vpd_tags[k], &kw_size);
1668		if (i == -ENOENT) {
1669			if (wwpn_vpd_required)
1670				dev_err(dev, "%s: Port %d WWPN not found\n",
1671					__func__, k);
1672			wwpn[k] = 0ULL;
1673			continue;
1674		}
1675
1676		if (i < 0 || kw_size != WWPN_LEN) {
1677			dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n",
1678				__func__, k);
1679			rc = -ENODEV;
1680			goto out;
1681		}
1682
1683		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
1684		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
1685		if (unlikely(rc)) {
1686			dev_err(dev, "%s: WWPN conversion failed for port %d\n",
1687				__func__, k);
1688			rc = -ENODEV;
1689			goto out;
1690		}
1691
1692		dev_dbg(dev, "%s: wwpn%d=%016llx\n", __func__, k, wwpn[k]);
1693	}
1694
1695out:
1696	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1697	return rc;
1698}
1699
1700/**
1701 * init_pcr() - initialize the provisioning and control registers
1702 * @cfg:	Internal structure associated with the host.
1703 *
1704 * Also sets up fast access to the mapped registers and initializes AFU
1705 * command fields that never change.
1706 */
1707static void init_pcr(struct cxlflash_cfg *cfg)
1708{
1709	struct afu *afu = cfg->afu;
1710	struct sisl_ctrl_map __iomem *ctrl_map;
1711	struct hwq *hwq;
1712	void *cookie;
1713	int i;
1714
1715	for (i = 0; i < MAX_CONTEXT; i++) {
1716		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1717		/* Disrupt any clients that could be running */
1718		/* e.g. clients that survived a master restart */
1719		writeq_be(0, &ctrl_map->rht_start);
1720		writeq_be(0, &ctrl_map->rht_cnt_id);
1721		writeq_be(0, &ctrl_map->ctx_cap);
1722	}
1723
1724	/* Copy frequently used fields into hwq */
1725	for (i = 0; i < afu->num_hwqs; i++) {
1726		hwq = get_hwq(afu, i);
1727		cookie = hwq->ctx_cookie;
1728
1729		hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie);
1730		hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host;
1731		hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl;
1732
1733		/* Program the Endian Control for the master context */
1734		writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl);
1735	}
1736}
1737
1738/**
1739 * init_global() - initialize AFU global registers
1740 * @cfg:	Internal structure associated with the host.
1741 */
1742static int init_global(struct cxlflash_cfg *cfg)
1743{
1744	struct afu *afu = cfg->afu;
1745	struct device *dev = &cfg->dev->dev;
1746	struct hwq *hwq;
1747	struct sisl_host_map __iomem *hmap;
1748	__be64 __iomem *fc_port_regs;
1749	u64 wwpn[MAX_FC_PORTS];	/* wwpn of AFU ports */
1750	int i = 0, num_ports = 0;
1751	int rc = 0;
1752	int j;
1753	void *ctx;
1754	u64 reg;
1755
1756	rc = read_vpd(cfg, &wwpn[0]);
1757	if (rc) {
1758		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1759		goto out;
1760	}
1761
1762	/* Set up RRQ and SQ in HWQ for master issued cmds */
1763	for (i = 0; i < afu->num_hwqs; i++) {
1764		hwq = get_hwq(afu, i);
1765		hmap = hwq->host_map;
1766
1767		writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start);
1768		writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end);
1769		hwq->hrrq_online = true;
1770
1771		if (afu_is_sq_cmd_mode(afu)) {
1772			writeq_be((u64)hwq->hsq_start, &hmap->sq_start);
1773			writeq_be((u64)hwq->hsq_end, &hmap->sq_end);
1774		}
1775	}
1776
1777	/* AFU configuration */
1778	reg = readq_be(&afu->afu_map->global.regs.afu_config);
1779	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
1780	/* enable all auto retry options and control endianness */
1781	/* leave others at default: */
1782	/* CTX_CAP write protected, mbox_r does not clear on read and */
1783	/* checker on if dual afu */
1784	writeq_be(reg, &afu->afu_map->global.regs.afu_config);
1785
1786	/* Global port select: select either port */
1787	if (afu->internal_lun) {
1788		/* Only use port 0 */
1789		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
1790		num_ports = 0;
1791	} else {
1792		writeq_be(PORT_MASK(cfg->num_fc_ports),
1793			  &afu->afu_map->global.regs.afu_port_sel);
1794		num_ports = cfg->num_fc_ports;
1795	}
1796
1797	for (i = 0; i < num_ports; i++) {
1798		fc_port_regs = get_fc_port_regs(cfg, i);
1799
1800		/* Unmask all errors (but they are still masked at AFU) */
1801		writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]);
1802		/* Clear CRC error cnt & set a threshold */
1803		(void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]);
1804		writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]);
1805
1806		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1807		if (wwpn[i] != 0)
1808			afu_set_wwpn(afu, i, &fc_port_regs[0], wwpn[i]);
1809		/* Programming WWPN back to back causes additional
1810		 * offline/online transitions and a PLOGI
1811		 */
1812		msleep(100);
1813	}
1814
1815	if (afu_is_ocxl_lisn(afu)) {
1816		/* Set up the LISN effective address for each master */
1817		for (i = 0; i < afu->num_hwqs; i++) {
1818			hwq = get_hwq(afu, i);
1819			ctx = hwq->ctx_cookie;
1820
1821			for (j = 0; j < hwq->num_irqs; j++) {
1822				reg = cfg->ops->get_irq_objhndl(ctx, j);
1823				writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]);
1824			}
1825
1826			reg = hwq->ctx_hndl;
1827			writeq_be(SISL_LISN_PASID(reg, reg),
1828				  &hwq->ctrl_map->lisn_pasid[0]);
1829			writeq_be(SISL_LISN_PASID(0UL, reg),
1830				  &hwq->ctrl_map->lisn_pasid[1]);
1831		}
1832	}
1833
1834	/* Set up master's own CTX_CAP to allow real mode, host translation */
1835	/* tables, afu cmds and read/write GSCSI cmds. */
1836	/* First, unlock ctx_cap write by reading mbox */
1837	for (i = 0; i < afu->num_hwqs; i++) {
1838		hwq = get_hwq(afu, i);
1839
1840		(void)readq_be(&hwq->ctrl_map->mbox_r);	/* unlock ctx_cap */
1841		writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
1842			SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
1843			SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
1844			&hwq->ctrl_map->ctx_cap);
1845	}
1846
1847	/*
1848	 * Determine write-same unmap support for host by evaluating the unmap
1849	 * sector support bit of the context control register associated with
1850	 * the primary hardware queue. Note that while this status is reflected
1851	 * in a context register, the outcome can be assumed to be host-wide.
1852	 */
1853	hwq = get_hwq(afu, PRIMARY_HWQ);
1854	reg = readq_be(&hwq->host_map->ctx_ctrl);
1855	if (reg & SISL_CTX_CTRL_UNMAP_SECTOR)
1856		cfg->ws_unmap = true;
1857
1858	/* Initialize heartbeat */
1859	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);
1860out:
1861	return rc;
1862}
1863
1864/**
1865 * start_afu() - initializes and starts the AFU
1866 * @cfg:	Internal structure associated with the host.
1867 */
1868static int start_afu(struct cxlflash_cfg *cfg)
1869{
1870	struct afu *afu = cfg->afu;
1871	struct device *dev = &cfg->dev->dev;
1872	struct hwq *hwq;
1873	int rc = 0;
1874	int i;
1875
1876	init_pcr(cfg);
1877
1878	/* Initialize each HWQ */
1879	for (i = 0; i < afu->num_hwqs; i++) {
1880		hwq = get_hwq(afu, i);
1881
1882		/* After an AFU reset, RRQ entries are stale, clear them */
1883		memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry));
1884
1885		/* Initialize RRQ pointers */
1886		hwq->hrrq_start = &hwq->rrq_entry[0];
1887		hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1];
1888		hwq->hrrq_curr = hwq->hrrq_start;
1889		hwq->toggle = 1;
1890
1891		/* Initialize spin locks */
1892		spin_lock_init(&hwq->hrrq_slock);
1893		spin_lock_init(&hwq->hsq_slock);
1894
1895		/* Initialize SQ */
1896		if (afu_is_sq_cmd_mode(afu)) {
1897			memset(&hwq->sq, 0, sizeof(hwq->sq));
1898			hwq->hsq_start = &hwq->sq[0];
1899			hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1];
1900			hwq->hsq_curr = hwq->hsq_start;
1901
1902			atomic_set(&hwq->hsq_credits, NUM_SQ_ENTRY - 1);
1903		}
1904
1905		/* Initialize IRQ poll */
1906		if (afu_is_irqpoll_enabled(afu))
1907			irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight,
1908				      cxlflash_irqpoll);
1909
1910	}
1911
1912	rc = init_global(cfg);
1913
1914	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
1915	return rc;
1916}
1917
1918/**
1919 * init_intr() - setup interrupt handlers for the master context
1920 * @cfg:	Internal structure associated with the host.
1921 * @hwq:	Hardware queue to initialize.
1922 *
1923 * Return: 0 on success, -errno on failure
1924 */
1925static enum undo_level init_intr(struct cxlflash_cfg *cfg,
1926				 struct hwq *hwq)
1927{
1928	struct device *dev = &cfg->dev->dev;
1929	void *ctx = hwq->ctx_cookie;
1930	int rc = 0;
1931	enum undo_level level = UNDO_NOOP;
1932	bool is_primary_hwq = (hwq->index == PRIMARY_HWQ);
1933	int num_irqs = hwq->num_irqs;
1934
1935	rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs);
1936	if (unlikely(rc)) {
1937		dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n",
1938			__func__, rc);
1939		level = UNDO_NOOP;
1940		goto out;
1941	}
1942
1943	rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq,
1944				   "SISL_MSI_SYNC_ERROR");
1945	if (unlikely(rc <= 0)) {
1946		dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n", __func__);
1947		level = FREE_IRQ;
1948		goto out;
1949	}
1950
1951	rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq,
1952				   "SISL_MSI_RRQ_UPDATED");
1953	if (unlikely(rc <= 0)) {
1954		dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n", __func__);
1955		level = UNMAP_ONE;
1956		goto out;
1957	}
1958
1959	/* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */
1960	if (!is_primary_hwq)
1961		goto out;
1962
1963	rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq,
1964				   "SISL_MSI_ASYNC_ERROR");
1965	if (unlikely(rc <= 0)) {
1966		dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n", __func__);
1967		level = UNMAP_TWO;
1968		goto out;
1969	}
1970out:
1971	return level;
1972}
1973
1974/**
1975 * init_mc() - create and register as the master context
1976 * @cfg:	Internal structure associated with the host.
1977 * @index:	HWQ Index of the master context.
1978 *
1979 * Return: 0 on success, -errno on failure
1980 */
1981static int init_mc(struct cxlflash_cfg *cfg, u32 index)
1982{
1983	void *ctx;
1984	struct device *dev = &cfg->dev->dev;
1985	struct hwq *hwq = get_hwq(cfg->afu, index);
1986	int rc = 0;
1987	int num_irqs;
1988	enum undo_level level;
1989
1990	hwq->afu = cfg->afu;
1991	hwq->index = index;
1992	INIT_LIST_HEAD(&hwq->pending_cmds);
1993
1994	if (index == PRIMARY_HWQ) {
1995		ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie);
1996		num_irqs = 3;
1997	} else {
1998		ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie);
1999		num_irqs = 2;
2000	}
2001	if (IS_ERR_OR_NULL(ctx)) {
2002		rc = -ENOMEM;
2003		goto err1;
2004	}
2005
2006	WARN_ON(hwq->ctx_cookie);
2007	hwq->ctx_cookie = ctx;
2008	hwq->num_irqs = num_irqs;
2009
2010	/* Set it up as a master with the CXL */
2011	cfg->ops->set_master(ctx);
2012
2013	/* Reset AFU when initializing primary context */
2014	if (index == PRIMARY_HWQ) {
2015		rc = cfg->ops->afu_reset(ctx);
2016		if (unlikely(rc)) {
2017			dev_err(dev, "%s: AFU reset failed rc=%d\n",
2018				      __func__, rc);
2019			goto err1;
2020		}
2021	}
2022
2023	level = init_intr(cfg, hwq);
2024	if (unlikely(level)) {
2025		dev_err(dev, "%s: interrupt init failed rc=%d\n", __func__, rc);
2026		goto err2;
2027	}
2028
2029	/* Finally, activate the context by starting it */
2030	rc = cfg->ops->start_context(hwq->ctx_cookie);
2031	if (unlikely(rc)) {
2032		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
2033		level = UNMAP_THREE;
2034		goto err2;
2035	}
2036
2037out:
2038	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2039	return rc;
2040err2:
2041	term_intr(cfg, level, index);
2042	if (index != PRIMARY_HWQ)
2043		cfg->ops->release_context(ctx);
2044err1:
2045	hwq->ctx_cookie = NULL;
2046	goto out;
2047}
2048
2049/**
2050 * get_num_afu_ports() - determines and configures the number of AFU ports
2051 * @cfg:	Internal structure associated with the host.
2052 *
2053 * This routine determines the number of AFU ports by converting the global
2054 * port selection mask. The converted value is only valid following an AFU
2055 * reset (explicit or power-on). This routine must be invoked shortly after
2056 * mapping as other routines are dependent on the number of ports during the
2057 * initialization sequence.
2058 *
2059 * To support legacy AFUs that might not have reflected an initial global
2060 * port mask (value read is 0), default to the number of ports originally
2061 * supported by the cxlflash driver (2) before hardware with other port
2062 * offerings was introduced.
2063 */
2064static void get_num_afu_ports(struct cxlflash_cfg *cfg)
2065{
2066	struct afu *afu = cfg->afu;
2067	struct device *dev = &cfg->dev->dev;
2068	u64 port_mask;
2069	int num_fc_ports = LEGACY_FC_PORTS;
2070
2071	port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel);
2072	if (port_mask != 0ULL)
2073		num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS);
2074
2075	dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n",
2076		__func__, port_mask, num_fc_ports);
2077
2078	cfg->num_fc_ports = num_fc_ports;
2079	cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports);
2080}
2081
2082/**
2083 * init_afu() - setup as master context and start AFU
2084 * @cfg:	Internal structure associated with the host.
2085 *
2086 * This routine is a higher level of control for configuring the
2087 * AFU on probe and reset paths.
2088 *
2089 * Return: 0 on success, -errno on failure
2090 */
2091static int init_afu(struct cxlflash_cfg *cfg)
2092{
2093	u64 reg;
2094	int rc = 0;
2095	struct afu *afu = cfg->afu;
2096	struct device *dev = &cfg->dev->dev;
2097	struct hwq *hwq;
2098	int i;
2099
2100	cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true);
2101
2102	mutex_init(&afu->sync_active);
2103	afu->num_hwqs = afu->desired_hwqs;
2104	for (i = 0; i < afu->num_hwqs; i++) {
2105		rc = init_mc(cfg, i);
2106		if (rc) {
2107			dev_err(dev, "%s: init_mc failed rc=%d index=%d\n",
2108				__func__, rc, i);
2109			goto err1;
2110		}
2111	}
2112
2113	/* Map the entire MMIO space of the AFU using the first context */
2114	hwq = get_hwq(afu, PRIMARY_HWQ);
2115	afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie);
2116	if (!afu->afu_map) {
2117		dev_err(dev, "%s: psa_map failed\n", __func__);
2118		rc = -ENOMEM;
2119		goto err1;
2120	}
2121
2122	/* No byte reverse on reading afu_version or string will be backwards */
2123	reg = readq(&afu->afu_map->global.regs.afu_version);
2124	memcpy(afu->version, &reg, sizeof(reg));
2125	afu->interface_version =
2126	    readq_be(&afu->afu_map->global.regs.interface_version);
2127	if ((afu->interface_version + 1) == 0) {
2128		dev_err(dev, "Back level AFU, please upgrade. AFU version %s "
2129			"interface version %016llx\n", afu->version,
2130		       afu->interface_version);
2131		rc = -EINVAL;
2132		goto err1;
2133	}
2134
2135	if (afu_is_sq_cmd_mode(afu)) {
2136		afu->send_cmd = send_cmd_sq;
2137		afu->context_reset = context_reset_sq;
2138	} else {
2139		afu->send_cmd = send_cmd_ioarrin;
2140		afu->context_reset = context_reset_ioarrin;
2141	}
2142
2143	dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n", __func__,
2144		afu->version, afu->interface_version);
2145
2146	get_num_afu_ports(cfg);
2147
2148	rc = start_afu(cfg);
2149	if (rc) {
2150		dev_err(dev, "%s: start_afu failed, rc=%d\n", __func__, rc);
2151		goto err1;
2152	}
2153
2154	afu_err_intr_init(cfg->afu);
2155	for (i = 0; i < afu->num_hwqs; i++) {
2156		hwq = get_hwq(afu, i);
2157
2158		hwq->room = readq_be(&hwq->host_map->cmd_room);
2159	}
2160
2161	/* Restore the LUN mappings */
2162	cxlflash_restore_luntable(cfg);
2163out:
2164	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2165	return rc;
2166
2167err1:
2168	for (i = afu->num_hwqs - 1; i >= 0; i--) {
2169		term_intr(cfg, UNMAP_THREE, i);
2170		term_mc(cfg, i);
2171	}
2172	goto out;
2173}
2174
2175/**
2176 * afu_reset() - resets the AFU
2177 * @cfg:	Internal structure associated with the host.
2178 *
2179 * Return: 0 on success, -errno on failure
2180 */
2181static int afu_reset(struct cxlflash_cfg *cfg)
2182{
2183	struct device *dev = &cfg->dev->dev;
2184	int rc = 0;
2185
2186	/* Stop the context before the reset. Since the context is
2187	 * no longer available restart it after the reset is complete
2188	 */
2189	term_afu(cfg);
2190
2191	rc = init_afu(cfg);
2192
2193	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2194	return rc;
2195}
2196
2197/**
2198 * drain_ioctls() - wait until all currently executing ioctls have completed
2199 * @cfg:	Internal structure associated with the host.
2200 *
2201 * Obtain write access to read/write semaphore that wraps ioctl
2202 * handling to 'drain' ioctls currently executing.
2203 */
2204static void drain_ioctls(struct cxlflash_cfg *cfg)
2205{
2206	down_write(&cfg->ioctl_rwsem);
2207	up_write(&cfg->ioctl_rwsem);
2208}
2209
2210/**
2211 * cxlflash_async_reset_host() - asynchronous host reset handler
2212 * @data:	Private data provided while scheduling reset.
2213 * @cookie:	Cookie that can be used for checkpointing.
2214 */
2215static void cxlflash_async_reset_host(void *data, async_cookie_t cookie)
2216{
2217	struct cxlflash_cfg *cfg = data;
2218	struct device *dev = &cfg->dev->dev;
2219	int rc = 0;
2220
2221	if (cfg->state != STATE_RESET) {
2222		dev_dbg(dev, "%s: Not performing a reset, state=%d\n",
2223			__func__, cfg->state);
2224		goto out;
2225	}
2226
2227	drain_ioctls(cfg);
2228	cxlflash_mark_contexts_error(cfg);
2229	rc = afu_reset(cfg);
2230	if (rc)
2231		cfg->state = STATE_FAILTERM;
2232	else
2233		cfg->state = STATE_NORMAL;
2234	wake_up_all(&cfg->reset_waitq);
2235
2236out:
2237	scsi_unblock_requests(cfg->host);
2238}
2239
2240/**
2241 * cxlflash_schedule_async_reset() - schedule an asynchronous host reset
2242 * @cfg:	Internal structure associated with the host.
2243 */
2244static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg)
2245{
2246	struct device *dev = &cfg->dev->dev;
2247
2248	if (cfg->state != STATE_NORMAL) {
2249		dev_dbg(dev, "%s: Not performing reset state=%d\n",
2250			__func__, cfg->state);
2251		return;
2252	}
2253
2254	cfg->state = STATE_RESET;
2255	scsi_block_requests(cfg->host);
2256	cfg->async_reset_cookie = async_schedule(cxlflash_async_reset_host,
2257						 cfg);
2258}
2259
2260/**
2261 * send_afu_cmd() - builds and sends an internal AFU command
2262 * @afu:	AFU associated with the host.
2263 * @rcb:	Pre-populated IOARCB describing command to send.
2264 *
2265 * The AFU can only take one internal AFU command at a time. This limitation is
2266 * enforced by using a mutex to provide exclusive access to the AFU during the
2267 * operation. This design point requires calling threads to not be on interrupt
2268 * context due to the possibility of sleeping during concurrent AFU operations.
2269 *
2270 * The command status is optionally passed back to the caller when the caller
2271 * populates the IOASA field of the IOARCB with a pointer to an IOASA structure.
2272 *
2273 * Return:
2274 *	0 on success, -errno on failure
2275 */
2276static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb)
2277{
2278	struct cxlflash_cfg *cfg = afu->parent;
2279	struct device *dev = &cfg->dev->dev;
2280	struct afu_cmd *cmd = NULL;
2281	struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ);
2282	ulong lock_flags;
2283	char *buf = NULL;
2284	int rc = 0;
2285	int nretry = 0;
2286
2287	if (cfg->state != STATE_NORMAL) {
2288		dev_dbg(dev, "%s: Sync not required state=%u\n",
2289			__func__, cfg->state);
2290		return 0;
2291	}
2292
2293	mutex_lock(&afu->sync_active);
2294	atomic_inc(&afu->cmds_active);
2295	buf = kmalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
2296	if (unlikely(!buf)) {
2297		dev_err(dev, "%s: no memory for command\n", __func__);
2298		rc = -ENOMEM;
2299		goto out;
2300	}
2301
2302	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
2303
2304retry:
2305	memset(cmd, 0, sizeof(*cmd));
2306	memcpy(&cmd->rcb, rcb, sizeof(*rcb));
2307	INIT_LIST_HEAD(&cmd->queue);
2308	init_completion(&cmd->cevent);
2309	cmd->parent = afu;
2310	cmd->hwq_index = hwq->index;
2311	cmd->rcb.ctx_id = hwq->ctx_hndl;
2312
2313	dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n",
2314		__func__, afu, cmd, cmd->rcb.cdb[0], nretry);
2315
2316	rc = afu->send_cmd(afu, cmd);
2317	if (unlikely(rc)) {
2318		rc = -ENOBUFS;
2319		goto out;
2320	}
2321
2322	rc = wait_resp(afu, cmd);
2323	switch (rc) {
2324	case -ETIMEDOUT:
2325		rc = afu->context_reset(hwq);
2326		if (rc) {
2327			/* Delete the command from pending_cmds list */
2328			spin_lock_irqsave(&hwq->hsq_slock, lock_flags);
2329			list_del(&cmd->list);
2330			spin_unlock_irqrestore(&hwq->hsq_slock, lock_flags);
2331
2332			cxlflash_schedule_async_reset(cfg);
2333			break;
2334		}
2335		fallthrough;	/* to retry */
2336	case -EAGAIN:
2337		if (++nretry < 2)
2338			goto retry;
2339		fallthrough;	/* to exit */
2340	default:
2341		break;
2342	}
2343
2344	if (rcb->ioasa)
2345		*rcb->ioasa = cmd->sa;
2346out:
2347	atomic_dec(&afu->cmds_active);
2348	mutex_unlock(&afu->sync_active);
2349	kfree(buf);
2350	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2351	return rc;
2352}
2353
2354/**
2355 * cxlflash_afu_sync() - builds and sends an AFU sync command
2356 * @afu:	AFU associated with the host.
2357 * @ctx:	Identifies context requesting sync.
2358 * @res:	Identifies resource requesting sync.
2359 * @mode:	Type of sync to issue (lightweight, heavyweight, global).
2360 *
2361 * AFU sync operations are only necessary and allowed when the device is
2362 * operating normally. When not operating normally, sync requests can occur as
2363 * part of cleaning up resources associated with an adapter prior to removal.
2364 * In this scenario, these requests are simply ignored (safe due to the AFU
2365 * going away).
2366 *
2367 * Return:
2368 *	0 on success, -errno on failure
2369 */
2370int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode)
2371{
2372	struct cxlflash_cfg *cfg = afu->parent;
2373	struct device *dev = &cfg->dev->dev;
2374	struct sisl_ioarcb rcb = { 0 };
2375
2376	dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n",
2377		__func__, afu, ctx, res, mode);
2378
2379	rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
2380	rcb.msi = SISL_MSI_RRQ_UPDATED;
2381	rcb.timeout = MC_AFU_SYNC_TIMEOUT;
2382
2383	rcb.cdb[0] = SISL_AFU_CMD_SYNC;
2384	rcb.cdb[1] = mode;
2385	put_unaligned_be16(ctx, &rcb.cdb[2]);
2386	put_unaligned_be32(res, &rcb.cdb[4]);
2387
2388	return send_afu_cmd(afu, &rcb);
2389}
2390
2391/**
2392 * cxlflash_eh_abort_handler() - abort a SCSI command
2393 * @scp:	SCSI command to abort.
2394 *
2395 * CXL Flash devices do not support a single command abort. Reset the context
2396 * as per SISLite specification. Flush any pending commands in the hardware
2397 * queue before the reset.
2398 *
2399 * Return: SUCCESS/FAILED as defined in scsi/scsi.h
2400 */
2401static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp)
2402{
2403	int rc = FAILED;
2404	struct Scsi_Host *host = scp->device->host;
2405	struct cxlflash_cfg *cfg = shost_priv(host);
2406	struct afu_cmd *cmd = sc_to_afuc(scp);
2407	struct device *dev = &cfg->dev->dev;
2408	struct afu *afu = cfg->afu;
2409	struct hwq *hwq = get_hwq(afu, cmd->hwq_index);
2410
2411	dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu "
2412		"cdb=(%08x-%08x-%08x-%08x)\n", __func__, scp, host->host_no,
2413		scp->device->channel, scp->device->id, scp->device->lun,
2414		get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
2415		get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
2416		get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
2417		get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
2418
2419	/* When the state is not normal, another reset/reload is in progress.
2420	 * Return failed and the mid-layer will invoke host reset handler.
2421	 */
2422	if (cfg->state != STATE_NORMAL) {
2423		dev_dbg(dev, "%s: Invalid state for abort, state=%d\n",
2424			__func__, cfg->state);
2425		goto out;
2426	}
2427
2428	rc = afu->context_reset(hwq);
2429	if (unlikely(rc))
2430		goto out;
2431
2432	rc = SUCCESS;
2433
2434out:
2435	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2436	return rc;
2437}
2438
2439/**
2440 * cxlflash_eh_device_reset_handler() - reset a single LUN
2441 * @scp:	SCSI command to send.
2442 *
2443 * Return:
2444 *	SUCCESS as defined in scsi/scsi.h
2445 *	FAILED as defined in scsi/scsi.h
2446 */
2447static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
2448{
2449	int rc = SUCCESS;
2450	struct scsi_device *sdev = scp->device;
2451	struct Scsi_Host *host = sdev->host;
2452	struct cxlflash_cfg *cfg = shost_priv(host);
2453	struct device *dev = &cfg->dev->dev;
2454	int rcr = 0;
2455
2456	dev_dbg(dev, "%s: %d/%d/%d/%llu\n", __func__,
2457		host->host_no, sdev->channel, sdev->id, sdev->lun);
2458retry:
2459	switch (cfg->state) {
2460	case STATE_NORMAL:
2461		rcr = send_tmf(cfg, sdev, TMF_LUN_RESET);
2462		if (unlikely(rcr))
2463			rc = FAILED;
2464		break;
2465	case STATE_RESET:
2466		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2467		goto retry;
2468	default:
2469		rc = FAILED;
2470		break;
2471	}
2472
2473	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2474	return rc;
2475}
2476
2477/**
2478 * cxlflash_eh_host_reset_handler() - reset the host adapter
2479 * @scp:	SCSI command from stack identifying host.
2480 *
2481 * Following a reset, the state is evaluated again in case an EEH occurred
2482 * during the reset. In such a scenario, the host reset will either yield
2483 * until the EEH recovery is complete or return success or failure based
2484 * upon the current device state.
2485 *
2486 * Return:
2487 *	SUCCESS as defined in scsi/scsi.h
2488 *	FAILED as defined in scsi/scsi.h
2489 */
2490static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
2491{
2492	int rc = SUCCESS;
2493	int rcr = 0;
2494	struct Scsi_Host *host = scp->device->host;
2495	struct cxlflash_cfg *cfg = shost_priv(host);
2496	struct device *dev = &cfg->dev->dev;
2497
2498	dev_dbg(dev, "%s: %d\n", __func__, host->host_no);
2499
2500	switch (cfg->state) {
2501	case STATE_NORMAL:
2502		cfg->state = STATE_RESET;
2503		drain_ioctls(cfg);
2504		cxlflash_mark_contexts_error(cfg);
2505		rcr = afu_reset(cfg);
2506		if (rcr) {
2507			rc = FAILED;
2508			cfg->state = STATE_FAILTERM;
2509		} else
2510			cfg->state = STATE_NORMAL;
2511		wake_up_all(&cfg->reset_waitq);
2512		ssleep(1);
2513		fallthrough;
2514	case STATE_RESET:
2515		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2516		if (cfg->state == STATE_NORMAL)
2517			break;
2518		fallthrough;
2519	default:
2520		rc = FAILED;
2521		break;
2522	}
2523
2524	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
2525	return rc;
2526}
2527
2528/**
2529 * cxlflash_change_queue_depth() - change the queue depth for the device
2530 * @sdev:	SCSI device destined for queue depth change.
2531 * @qdepth:	Requested queue depth value to set.
2532 *
2533 * The requested queue depth is capped to the maximum supported value.
2534 *
2535 * Return: The actual queue depth set.
2536 */
2537static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
2538{
2539
2540	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
2541		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;
2542
2543	scsi_change_queue_depth(sdev, qdepth);
2544	return sdev->queue_depth;
2545}
2546
2547/**
2548 * cxlflash_show_port_status() - queries and presents the current port status
2549 * @port:	Desired port for status reporting.
2550 * @cfg:	Internal structure associated with the host.
2551 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2552 *
2553 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2554 */
2555static ssize_t cxlflash_show_port_status(u32 port,
2556					 struct cxlflash_cfg *cfg,
2557					 char *buf)
2558{
2559	struct device *dev = &cfg->dev->dev;
2560	char *disp_status;
2561	u64 status;
2562	__be64 __iomem *fc_port_regs;
2563
2564	WARN_ON(port >= MAX_FC_PORTS);
2565
2566	if (port >= cfg->num_fc_ports) {
2567		dev_info(dev, "%s: Port %d not supported on this card.\n",
2568			__func__, port);
2569		return -EINVAL;
2570	}
2571
2572	fc_port_regs = get_fc_port_regs(cfg, port);
2573	status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]);
2574	status &= FC_MTIP_STATUS_MASK;
2575
2576	if (status == FC_MTIP_STATUS_ONLINE)
2577		disp_status = "online";
2578	else if (status == FC_MTIP_STATUS_OFFLINE)
2579		disp_status = "offline";
2580	else
2581		disp_status = "unknown";
2582
2583	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
2584}
2585
2586/**
2587 * port0_show() - queries and presents the current status of port 0
2588 * @dev:	Generic device associated with the host owning the port.
2589 * @attr:	Device attribute representing the port.
2590 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2591 *
2592 * Return: The size of the ASCII string returned in @buf.
2593 */
2594static ssize_t port0_show(struct device *dev,
2595			  struct device_attribute *attr,
2596			  char *buf)
2597{
2598	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2599
2600	return cxlflash_show_port_status(0, cfg, buf);
2601}
2602
2603/**
2604 * port1_show() - queries and presents the current status of port 1
2605 * @dev:	Generic device associated with the host owning the port.
2606 * @attr:	Device attribute representing the port.
2607 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2608 *
2609 * Return: The size of the ASCII string returned in @buf.
2610 */
2611static ssize_t port1_show(struct device *dev,
2612			  struct device_attribute *attr,
2613			  char *buf)
2614{
2615	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2616
2617	return cxlflash_show_port_status(1, cfg, buf);
2618}
2619
2620/**
2621 * port2_show() - queries and presents the current status of port 2
2622 * @dev:	Generic device associated with the host owning the port.
2623 * @attr:	Device attribute representing the port.
2624 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2625 *
2626 * Return: The size of the ASCII string returned in @buf.
2627 */
2628static ssize_t port2_show(struct device *dev,
2629			  struct device_attribute *attr,
2630			  char *buf)
2631{
2632	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2633
2634	return cxlflash_show_port_status(2, cfg, buf);
2635}
2636
2637/**
2638 * port3_show() - queries and presents the current status of port 3
2639 * @dev:	Generic device associated with the host owning the port.
2640 * @attr:	Device attribute representing the port.
2641 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2642 *
2643 * Return: The size of the ASCII string returned in @buf.
2644 */
2645static ssize_t port3_show(struct device *dev,
2646			  struct device_attribute *attr,
2647			  char *buf)
2648{
2649	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2650
2651	return cxlflash_show_port_status(3, cfg, buf);
2652}
2653
2654/**
2655 * lun_mode_show() - presents the current LUN mode of the host
2656 * @dev:	Generic device associated with the host.
2657 * @attr:	Device attribute representing the LUN mode.
2658 * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
2659 *
2660 * Return: The size of the ASCII string returned in @buf.
2661 */
2662static ssize_t lun_mode_show(struct device *dev,
2663			     struct device_attribute *attr, char *buf)
2664{
2665	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2666	struct afu *afu = cfg->afu;
2667
2668	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
2669}
2670
2671/**
2672 * lun_mode_store() - sets the LUN mode of the host
2673 * @dev:	Generic device associated with the host.
2674 * @attr:	Device attribute representing the LUN mode.
2675 * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
2676 * @count:	Length of data resizing in @buf.
2677 *
2678 * The CXL Flash AFU supports a dummy LUN mode where the external
2679 * links and storage are not required. Space on the FPGA is used
2680 * to create 1 or 2 small LUNs which are presented to the system
2681 * as if they were a normal storage device. This feature is useful
2682 * during development and also provides manufacturing with a way
2683 * to test the AFU without an actual device.
2684 *
2685 * 0 = external LUN[s] (default)
2686 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
2687 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
2688 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
2689 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
2690 *
2691 * Return: The size of the ASCII string returned in @buf.
2692 */
2693static ssize_t lun_mode_store(struct device *dev,
2694			      struct device_attribute *attr,
2695			      const char *buf, size_t count)
2696{
2697	struct Scsi_Host *shost = class_to_shost(dev);
2698	struct cxlflash_cfg *cfg = shost_priv(shost);
2699	struct afu *afu = cfg->afu;
2700	int rc;
2701	u32 lun_mode;
2702
2703	rc = kstrtouint(buf, 10, &lun_mode);
2704	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
2705		afu->internal_lun = lun_mode;
2706
2707		/*
2708		 * When configured for internal LUN, there is only one channel,
2709		 * channel number 0, else there will be one less than the number
2710		 * of fc ports for this card.
2711		 */
2712		if (afu->internal_lun)
2713			shost->max_channel = 0;
2714		else
2715			shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports);
2716
2717		afu_reset(cfg);
2718		scsi_scan_host(cfg->host);
2719	}
2720
2721	return count;
2722}
2723
2724/**
2725 * ioctl_version_show() - presents the current ioctl version of the host
2726 * @dev:	Generic device associated with the host.
2727 * @attr:	Device attribute representing the ioctl version.
2728 * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
2729 *
2730 * Return: The size of the ASCII string returned in @buf.
2731 */
2732static ssize_t ioctl_version_show(struct device *dev,
2733				  struct device_attribute *attr, char *buf)
2734{
2735	ssize_t bytes = 0;
2736
2737	bytes = scnprintf(buf, PAGE_SIZE,
2738			  "disk: %u\n", DK_CXLFLASH_VERSION_0);
2739	bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2740			   "host: %u\n", HT_CXLFLASH_VERSION_0);
2741
2742	return bytes;
2743}
2744
2745/**
2746 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
2747 * @port:	Desired port for status reporting.
2748 * @cfg:	Internal structure associated with the host.
2749 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2750 *
2751 * Return: The size of the ASCII string returned in @buf or -EINVAL.
2752 */
2753static ssize_t cxlflash_show_port_lun_table(u32 port,
2754					    struct cxlflash_cfg *cfg,
2755					    char *buf)
2756{
2757	struct device *dev = &cfg->dev->dev;
2758	__be64 __iomem *fc_port_luns;
2759	int i;
2760	ssize_t bytes = 0;
2761
2762	WARN_ON(port >= MAX_FC_PORTS);
2763
2764	if (port >= cfg->num_fc_ports) {
2765		dev_info(dev, "%s: Port %d not supported on this card.\n",
2766			__func__, port);
2767		return -EINVAL;
2768	}
2769
2770	fc_port_luns = get_fc_port_luns(cfg, port);
2771
2772	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
2773		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
2774				   "%03d: %016llx\n",
2775				   i, readq_be(&fc_port_luns[i]));
2776	return bytes;
2777}
2778
2779/**
2780 * port0_lun_table_show() - presents the current LUN table of port 0
2781 * @dev:	Generic device associated with the host owning the port.
2782 * @attr:	Device attribute representing the port.
2783 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2784 *
2785 * Return: The size of the ASCII string returned in @buf.
2786 */
2787static ssize_t port0_lun_table_show(struct device *dev,
2788				    struct device_attribute *attr,
2789				    char *buf)
2790{
2791	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2792
2793	return cxlflash_show_port_lun_table(0, cfg, buf);
2794}
2795
2796/**
2797 * port1_lun_table_show() - presents the current LUN table of port 1
2798 * @dev:	Generic device associated with the host owning the port.
2799 * @attr:	Device attribute representing the port.
2800 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2801 *
2802 * Return: The size of the ASCII string returned in @buf.
2803 */
2804static ssize_t port1_lun_table_show(struct device *dev,
2805				    struct device_attribute *attr,
2806				    char *buf)
2807{
2808	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2809
2810	return cxlflash_show_port_lun_table(1, cfg, buf);
2811}
2812
2813/**
2814 * port2_lun_table_show() - presents the current LUN table of port 2
2815 * @dev:	Generic device associated with the host owning the port.
2816 * @attr:	Device attribute representing the port.
2817 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2818 *
2819 * Return: The size of the ASCII string returned in @buf.
2820 */
2821static ssize_t port2_lun_table_show(struct device *dev,
2822				    struct device_attribute *attr,
2823				    char *buf)
2824{
2825	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2826
2827	return cxlflash_show_port_lun_table(2, cfg, buf);
2828}
2829
2830/**
2831 * port3_lun_table_show() - presents the current LUN table of port 3
2832 * @dev:	Generic device associated with the host owning the port.
2833 * @attr:	Device attribute representing the port.
2834 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
2835 *
2836 * Return: The size of the ASCII string returned in @buf.
2837 */
2838static ssize_t port3_lun_table_show(struct device *dev,
2839				    struct device_attribute *attr,
2840				    char *buf)
2841{
2842	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2843
2844	return cxlflash_show_port_lun_table(3, cfg, buf);
2845}
2846
2847/**
2848 * irqpoll_weight_show() - presents the current IRQ poll weight for the host
2849 * @dev:	Generic device associated with the host.
2850 * @attr:	Device attribute representing the IRQ poll weight.
2851 * @buf:	Buffer of length PAGE_SIZE to report back the current IRQ poll
2852 *		weight in ASCII.
2853 *
2854 * An IRQ poll weight of 0 indicates polling is disabled.
2855 *
2856 * Return: The size of the ASCII string returned in @buf.
2857 */
2858static ssize_t irqpoll_weight_show(struct device *dev,
2859				   struct device_attribute *attr, char *buf)
2860{
2861	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2862	struct afu *afu = cfg->afu;
2863
2864	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->irqpoll_weight);
2865}
2866
2867/**
2868 * irqpoll_weight_store() - sets the current IRQ poll weight for the host
2869 * @dev:	Generic device associated with the host.
2870 * @attr:	Device attribute representing the IRQ poll weight.
2871 * @buf:	Buffer of length PAGE_SIZE containing the desired IRQ poll
2872 *		weight in ASCII.
2873 * @count:	Length of data resizing in @buf.
2874 *
2875 * An IRQ poll weight of 0 indicates polling is disabled.
2876 *
2877 * Return: The size of the ASCII string returned in @buf.
2878 */
2879static ssize_t irqpoll_weight_store(struct device *dev,
2880				    struct device_attribute *attr,
2881				    const char *buf, size_t count)
2882{
2883	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2884	struct device *cfgdev = &cfg->dev->dev;
2885	struct afu *afu = cfg->afu;
2886	struct hwq *hwq;
2887	u32 weight;
2888	int rc, i;
2889
2890	rc = kstrtouint(buf, 10, &weight);
2891	if (rc)
2892		return -EINVAL;
2893
2894	if (weight > 256) {
2895		dev_info(cfgdev,
2896			 "Invalid IRQ poll weight. It must be 256 or less.\n");
2897		return -EINVAL;
2898	}
2899
2900	if (weight == afu->irqpoll_weight) {
2901		dev_info(cfgdev,
2902			 "Current IRQ poll weight has the same weight.\n");
2903		return -EINVAL;
2904	}
2905
2906	if (afu_is_irqpoll_enabled(afu)) {
2907		for (i = 0; i < afu->num_hwqs; i++) {
2908			hwq = get_hwq(afu, i);
2909
2910			irq_poll_disable(&hwq->irqpoll);
2911		}
2912	}
2913
2914	afu->irqpoll_weight = weight;
2915
2916	if (weight > 0) {
2917		for (i = 0; i < afu->num_hwqs; i++) {
2918			hwq = get_hwq(afu, i);
2919
2920			irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll);
2921		}
2922	}
2923
2924	return count;
2925}
2926
2927/**
2928 * num_hwqs_show() - presents the number of hardware queues for the host
2929 * @dev:	Generic device associated with the host.
2930 * @attr:	Device attribute representing the number of hardware queues.
2931 * @buf:	Buffer of length PAGE_SIZE to report back the number of hardware
2932 *		queues in ASCII.
2933 *
2934 * Return: The size of the ASCII string returned in @buf.
2935 */
2936static ssize_t num_hwqs_show(struct device *dev,
2937			     struct device_attribute *attr, char *buf)
2938{
2939	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2940	struct afu *afu = cfg->afu;
2941
2942	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->num_hwqs);
2943}
2944
2945/**
2946 * num_hwqs_store() - sets the number of hardware queues for the host
2947 * @dev:	Generic device associated with the host.
2948 * @attr:	Device attribute representing the number of hardware queues.
2949 * @buf:	Buffer of length PAGE_SIZE containing the number of hardware
2950 *		queues in ASCII.
2951 * @count:	Length of data resizing in @buf.
2952 *
2953 * n > 0: num_hwqs = n
2954 * n = 0: num_hwqs = num_online_cpus()
2955 * n < 0: num_online_cpus() / abs(n)
2956 *
2957 * Return: The size of the ASCII string returned in @buf.
2958 */
2959static ssize_t num_hwqs_store(struct device *dev,
2960			      struct device_attribute *attr,
2961			      const char *buf, size_t count)
2962{
2963	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
2964	struct afu *afu = cfg->afu;
2965	int rc;
2966	int nhwqs, num_hwqs;
2967
2968	rc = kstrtoint(buf, 10, &nhwqs);
2969	if (rc)
2970		return -EINVAL;
2971
2972	if (nhwqs >= 1)
2973		num_hwqs = nhwqs;
2974	else if (nhwqs == 0)
2975		num_hwqs = num_online_cpus();
2976	else
2977		num_hwqs = num_online_cpus() / abs(nhwqs);
2978
2979	afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS);
2980	WARN_ON_ONCE(afu->desired_hwqs == 0);
2981
2982retry:
2983	switch (cfg->state) {
2984	case STATE_NORMAL:
2985		cfg->state = STATE_RESET;
2986		drain_ioctls(cfg);
2987		cxlflash_mark_contexts_error(cfg);
2988		rc = afu_reset(cfg);
2989		if (rc)
2990			cfg->state = STATE_FAILTERM;
2991		else
2992			cfg->state = STATE_NORMAL;
2993		wake_up_all(&cfg->reset_waitq);
2994		break;
2995	case STATE_RESET:
2996		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
2997		if (cfg->state == STATE_NORMAL)
2998			goto retry;
2999		fallthrough;
3000	default:
3001		/* Ideally should not happen */
3002		dev_err(dev, "%s: Device is not ready, state=%d\n",
3003			__func__, cfg->state);
3004		break;
3005	}
3006
3007	return count;
3008}
3009
3010static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr", "tag", "cpu" };
3011
3012/**
3013 * hwq_mode_show() - presents the HWQ steering mode for the host
3014 * @dev:	Generic device associated with the host.
3015 * @attr:	Device attribute representing the HWQ steering mode.
3016 * @buf:	Buffer of length PAGE_SIZE to report back the HWQ steering mode
3017 *		as a character string.
3018 *
3019 * Return: The size of the ASCII string returned in @buf.
3020 */
3021static ssize_t hwq_mode_show(struct device *dev,
3022			     struct device_attribute *attr, char *buf)
3023{
3024	struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev));
3025	struct afu *afu = cfg->afu;
3026
3027	return scnprintf(buf, PAGE_SIZE, "%s\n", hwq_mode_name[afu->hwq_mode]);
3028}
3029
3030/**
3031 * hwq_mode_store() - sets the HWQ steering mode for the host
3032 * @dev:	Generic device associated with the host.
3033 * @attr:	Device attribute representing the HWQ steering mode.
3034 * @buf:	Buffer of length PAGE_SIZE containing the HWQ steering mode
3035 *		as a character string.
3036 * @count:	Length of data resizing in @buf.
3037 *
3038 * rr = Round-Robin
3039 * tag = Block MQ Tagging
3040 * cpu = CPU Affinity
3041 *
3042 * Return: The size of the ASCII string returned in @buf.
3043 */
3044static ssize_t hwq_mode_store(struct device *dev,
3045			      struct device_attribute *attr,
3046			      const char *buf, size_t count)
3047{
3048	struct Scsi_Host *shost = class_to_shost(dev);
3049	struct cxlflash_cfg *cfg = shost_priv(shost);
3050	struct device *cfgdev = &cfg->dev->dev;
3051	struct afu *afu = cfg->afu;
3052	int i;
3053	u32 mode = MAX_HWQ_MODE;
3054
3055	for (i = 0; i < MAX_HWQ_MODE; i++) {
3056		if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) {
3057			mode = i;
3058			break;
3059		}
3060	}
3061
3062	if (mode >= MAX_HWQ_MODE) {
3063		dev_info(cfgdev, "Invalid HWQ steering mode.\n");
3064		return -EINVAL;
3065	}
3066
3067	afu->hwq_mode = mode;
3068
3069	return count;
3070}
3071
3072/**
3073 * mode_show() - presents the current mode of the device
3074 * @dev:	Generic device associated with the device.
3075 * @attr:	Device attribute representing the device mode.
3076 * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
3077 *
3078 * Return: The size of the ASCII string returned in @buf.
3079 */
3080static ssize_t mode_show(struct device *dev,
3081			 struct device_attribute *attr, char *buf)
3082{
3083	struct scsi_device *sdev = to_scsi_device(dev);
3084
3085	return scnprintf(buf, PAGE_SIZE, "%s\n",
3086			 sdev->hostdata ? "superpipe" : "legacy");
3087}
3088
3089/*
3090 * Host attributes
3091 */
3092static DEVICE_ATTR_RO(port0);
3093static DEVICE_ATTR_RO(port1);
3094static DEVICE_ATTR_RO(port2);
3095static DEVICE_ATTR_RO(port3);
3096static DEVICE_ATTR_RW(lun_mode);
3097static DEVICE_ATTR_RO(ioctl_version);
3098static DEVICE_ATTR_RO(port0_lun_table);
3099static DEVICE_ATTR_RO(port1_lun_table);
3100static DEVICE_ATTR_RO(port2_lun_table);
3101static DEVICE_ATTR_RO(port3_lun_table);
3102static DEVICE_ATTR_RW(irqpoll_weight);
3103static DEVICE_ATTR_RW(num_hwqs);
3104static DEVICE_ATTR_RW(hwq_mode);
3105
3106static struct attribute *cxlflash_host_attrs[] = {
3107	&dev_attr_port0.attr,
3108	&dev_attr_port1.attr,
3109	&dev_attr_port2.attr,
3110	&dev_attr_port3.attr,
3111	&dev_attr_lun_mode.attr,
3112	&dev_attr_ioctl_version.attr,
3113	&dev_attr_port0_lun_table.attr,
3114	&dev_attr_port1_lun_table.attr,
3115	&dev_attr_port2_lun_table.attr,
3116	&dev_attr_port3_lun_table.attr,
3117	&dev_attr_irqpoll_weight.attr,
3118	&dev_attr_num_hwqs.attr,
3119	&dev_attr_hwq_mode.attr,
3120	NULL
3121};
3122
3123ATTRIBUTE_GROUPS(cxlflash_host);
3124
3125/*
3126 * Device attributes
3127 */
3128static DEVICE_ATTR_RO(mode);
3129
3130static struct attribute *cxlflash_dev_attrs[] = {
3131	&dev_attr_mode.attr,
3132	NULL
3133};
3134
3135ATTRIBUTE_GROUPS(cxlflash_dev);
3136
3137/*
3138 * Host template
3139 */
3140static struct scsi_host_template driver_template = {
3141	.module = THIS_MODULE,
3142	.name = CXLFLASH_ADAPTER_NAME,
3143	.info = cxlflash_driver_info,
3144	.ioctl = cxlflash_ioctl,
3145	.proc_name = CXLFLASH_NAME,
3146	.queuecommand = cxlflash_queuecommand,
3147	.eh_abort_handler = cxlflash_eh_abort_handler,
3148	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
3149	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
3150	.change_queue_depth = cxlflash_change_queue_depth,
3151	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
3152	.can_queue = CXLFLASH_MAX_CMDS,
3153	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
3154	.this_id = -1,
3155	.sg_tablesize = 1,	/* No scatter gather support */
3156	.max_sectors = CXLFLASH_MAX_SECTORS,
3157	.shost_groups = cxlflash_host_groups,
3158	.sdev_groups = cxlflash_dev_groups,
3159};
3160
3161/*
3162 * Device dependent values
3163 */
3164static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
3165					CXLFLASH_WWPN_VPD_REQUIRED };
3166static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
3167					CXLFLASH_NOTIFY_SHUTDOWN };
3168static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS,
3169					(CXLFLASH_NOTIFY_SHUTDOWN |
3170					CXLFLASH_OCXL_DEV) };
3171
3172/*
3173 * PCI device binding table
3174 */
3175static struct pci_device_id cxlflash_pci_table[] = {
3176	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
3177	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
3178	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
3179	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
3180	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD,
3181	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals},
3182	{}
3183};
3184
3185MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);
3186
3187/**
3188 * cxlflash_worker_thread() - work thread handler for the AFU
3189 * @work:	Work structure contained within cxlflash associated with host.
3190 *
3191 * Handles the following events:
3192 * - Link reset which cannot be performed on interrupt context due to
3193 * blocking up to a few seconds
3194 * - Rescan the host
3195 */
3196static void cxlflash_worker_thread(struct work_struct *work)
3197{
3198	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
3199						work_q);
3200	struct afu *afu = cfg->afu;
3201	struct device *dev = &cfg->dev->dev;
3202	__be64 __iomem *fc_port_regs;
3203	int port;
3204	ulong lock_flags;
3205
3206	/* Avoid MMIO if the device has failed */
3207
3208	if (cfg->state != STATE_NORMAL)
3209		return;
3210
3211	spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3212
3213	if (cfg->lr_state == LINK_RESET_REQUIRED) {
3214		port = cfg->lr_port;
3215		if (port < 0)
3216			dev_err(dev, "%s: invalid port index %d\n",
3217				__func__, port);
3218		else {
3219			spin_unlock_irqrestore(cfg->host->host_lock,
3220					       lock_flags);
3221
3222			/* The reset can block... */
3223			fc_port_regs = get_fc_port_regs(cfg, port);
3224			afu_link_reset(afu, port, fc_port_regs);
3225			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
3226		}
3227
3228		cfg->lr_state = LINK_RESET_COMPLETE;
3229	}
3230
3231	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
3232
3233	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
3234		scsi_scan_host(cfg->host);
3235}
3236
3237/**
3238 * cxlflash_chr_open() - character device open handler
3239 * @inode:	Device inode associated with this character device.
3240 * @file:	File pointer for this device.
3241 *
3242 * Only users with admin privileges are allowed to open the character device.
3243 *
3244 * Return: 0 on success, -errno on failure
3245 */
3246static int cxlflash_chr_open(struct inode *inode, struct file *file)
3247{
3248	struct cxlflash_cfg *cfg;
3249
3250	if (!capable(CAP_SYS_ADMIN))
3251		return -EACCES;
3252
3253	cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev);
3254	file->private_data = cfg;
3255
3256	return 0;
3257}
3258
3259/**
3260 * decode_hioctl() - translates encoded host ioctl to easily identifiable string
3261 * @cmd:        The host ioctl command to decode.
3262 *
3263 * Return: A string identifying the decoded host ioctl.
3264 */
3265static char *decode_hioctl(unsigned int cmd)
3266{
3267	switch (cmd) {
3268	case HT_CXLFLASH_LUN_PROVISION:
3269		return __stringify_1(HT_CXLFLASH_LUN_PROVISION);
3270	}
3271
3272	return "UNKNOWN";
3273}
3274
3275/**
3276 * cxlflash_lun_provision() - host LUN provisioning handler
3277 * @cfg:	Internal structure associated with the host.
3278 * @lunprov:	Kernel copy of userspace ioctl data structure.
3279 *
3280 * Return: 0 on success, -errno on failure
3281 */
3282static int cxlflash_lun_provision(struct cxlflash_cfg *cfg,
3283				  struct ht_cxlflash_lun_provision *lunprov)
3284{
3285	struct afu *afu = cfg->afu;
3286	struct device *dev = &cfg->dev->dev;
3287	struct sisl_ioarcb rcb;
3288	struct sisl_ioasa asa;
3289	__be64 __iomem *fc_port_regs;
3290	u16 port = lunprov->port;
3291	u16 scmd = lunprov->hdr.subcmd;
3292	u16 type;
3293	u64 reg;
3294	u64 size;
3295	u64 lun_id;
3296	int rc = 0;
3297
3298	if (!afu_is_lun_provision(afu)) {
3299		rc = -ENOTSUPP;
3300		goto out;
3301	}
3302
3303	if (port >= cfg->num_fc_ports) {
3304		rc = -EINVAL;
3305		goto out;
3306	}
3307
3308	switch (scmd) {
3309	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN:
3310		type = SISL_AFU_LUN_PROVISION_CREATE;
3311		size = lunprov->size;
3312		lun_id = 0;
3313		break;
3314	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN:
3315		type = SISL_AFU_LUN_PROVISION_DELETE;
3316		size = 0;
3317		lun_id = lunprov->lun_id;
3318		break;
3319	case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT:
3320		fc_port_regs = get_fc_port_regs(cfg, port);
3321
3322		reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]);
3323		lunprov->max_num_luns = reg;
3324		reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]);
3325		lunprov->cur_num_luns = reg;
3326		reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]);
3327		lunprov->max_cap_port = reg;
3328		reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]);
3329		lunprov->cur_cap_port = reg;
3330
3331		goto out;
3332	default:
3333		rc = -EINVAL;
3334		goto out;
3335	}
3336
3337	memset(&rcb, 0, sizeof(rcb));
3338	memset(&asa, 0, sizeof(asa));
3339	rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
3340	rcb.lun_id = lun_id;
3341	rcb.msi = SISL_MSI_RRQ_UPDATED;
3342	rcb.timeout = MC_LUN_PROV_TIMEOUT;
3343	rcb.ioasa = &asa;
3344
3345	rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION;
3346	rcb.cdb[1] = type;
3347	rcb.cdb[2] = port;
3348	put_unaligned_be64(size, &rcb.cdb[8]);
3349
3350	rc = send_afu_cmd(afu, &rcb);
3351	if (rc) {
3352		dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3353			__func__, rc, asa.ioasc, asa.afu_extra);
3354		goto out;
3355	}
3356
3357	if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) {
3358		lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo;
3359		memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid));
3360	}
3361out:
3362	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3363	return rc;
3364}
3365
3366/**
3367 * cxlflash_afu_debug() - host AFU debug handler
3368 * @cfg:	Internal structure associated with the host.
3369 * @afu_dbg:	Kernel copy of userspace ioctl data structure.
3370 *
3371 * For debug requests requiring a data buffer, always provide an aligned
3372 * (cache line) buffer to the AFU to appease any alignment requirements.
3373 *
3374 * Return: 0 on success, -errno on failure
3375 */
3376static int cxlflash_afu_debug(struct cxlflash_cfg *cfg,
3377			      struct ht_cxlflash_afu_debug *afu_dbg)
3378{
3379	struct afu *afu = cfg->afu;
3380	struct device *dev = &cfg->dev->dev;
3381	struct sisl_ioarcb rcb;
3382	struct sisl_ioasa asa;
3383	char *buf = NULL;
3384	char *kbuf = NULL;
3385	void __user *ubuf = (__force void __user *)afu_dbg->data_ea;
3386	u16 req_flags = SISL_REQ_FLAGS_AFU_CMD;
3387	u32 ulen = afu_dbg->data_len;
3388	bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE;
3389	int rc = 0;
3390
3391	if (!afu_is_afu_debug(afu)) {
3392		rc = -ENOTSUPP;
3393		goto out;
3394	}
3395
3396	if (ulen) {
3397		req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN;
3398
3399		if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) {
3400			rc = -EINVAL;
3401			goto out;
3402		}
3403
3404		buf = kmalloc(ulen + cache_line_size() - 1, GFP_KERNEL);
3405		if (unlikely(!buf)) {
3406			rc = -ENOMEM;
3407			goto out;
3408		}
3409
3410		kbuf = PTR_ALIGN(buf, cache_line_size());
3411
3412		if (is_write) {
3413			req_flags |= SISL_REQ_FLAGS_HOST_WRITE;
3414
3415			if (copy_from_user(kbuf, ubuf, ulen)) {
3416				rc = -EFAULT;
3417				goto out;
3418			}
3419		}
3420	}
3421
3422	memset(&rcb, 0, sizeof(rcb));
3423	memset(&asa, 0, sizeof(asa));
3424
3425	rcb.req_flags = req_flags;
3426	rcb.msi = SISL_MSI_RRQ_UPDATED;
3427	rcb.timeout = MC_AFU_DEBUG_TIMEOUT;
3428	rcb.ioasa = &asa;
3429
3430	if (ulen) {
3431		rcb.data_len = ulen;
3432		rcb.data_ea = (uintptr_t)kbuf;
3433	}
3434
3435	rcb.cdb[0] = SISL_AFU_CMD_DEBUG;
3436	memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd,
3437	       HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN);
3438
3439	rc = send_afu_cmd(afu, &rcb);
3440	if (rc) {
3441		dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n",
3442			__func__, rc, asa.ioasc, asa.afu_extra);
3443		goto out;
3444	}
3445
3446	if (ulen && !is_write) {
3447		if (copy_to_user(ubuf, kbuf, ulen))
3448			rc = -EFAULT;
3449	}
3450out:
3451	kfree(buf);
3452	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3453	return rc;
3454}
3455
3456/**
3457 * cxlflash_chr_ioctl() - character device IOCTL handler
3458 * @file:	File pointer for this device.
3459 * @cmd:	IOCTL command.
3460 * @arg:	Userspace ioctl data structure.
3461 *
3462 * A read/write semaphore is used to implement a 'drain' of currently
3463 * running ioctls. The read semaphore is taken at the beginning of each
3464 * ioctl thread and released upon concluding execution. Additionally the
3465 * semaphore should be released and then reacquired in any ioctl execution
3466 * path which will wait for an event to occur that is outside the scope of
3467 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running,
3468 * a thread simply needs to acquire the write semaphore.
3469 *
3470 * Return: 0 on success, -errno on failure
3471 */
3472static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd,
3473			       unsigned long arg)
3474{
3475	typedef int (*hioctl) (struct cxlflash_cfg *, void *);
3476
3477	struct cxlflash_cfg *cfg = file->private_data;
3478	struct device *dev = &cfg->dev->dev;
3479	char buf[sizeof(union cxlflash_ht_ioctls)];
3480	void __user *uarg = (void __user *)arg;
3481	struct ht_cxlflash_hdr *hdr;
3482	size_t size = 0;
3483	bool known_ioctl = false;
3484	int idx = 0;
3485	int rc = 0;
3486	hioctl do_ioctl = NULL;
3487
3488	static const struct {
3489		size_t size;
3490		hioctl ioctl;
3491	} ioctl_tbl[] = {	/* NOTE: order matters here */
3492	{ sizeof(struct ht_cxlflash_lun_provision),
3493		(hioctl)cxlflash_lun_provision },
3494	{ sizeof(struct ht_cxlflash_afu_debug),
3495		(hioctl)cxlflash_afu_debug },
3496	};
3497
3498	/* Hold read semaphore so we can drain if needed */
3499	down_read(&cfg->ioctl_rwsem);
3500
3501	dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n",
3502		__func__, cmd, idx, sizeof(ioctl_tbl));
3503
3504	switch (cmd) {
3505	case HT_CXLFLASH_LUN_PROVISION:
3506	case HT_CXLFLASH_AFU_DEBUG:
3507		known_ioctl = true;
3508		idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd);
3509		size = ioctl_tbl[idx].size;
3510		do_ioctl = ioctl_tbl[idx].ioctl;
3511
3512		if (likely(do_ioctl))
3513			break;
3514
3515		fallthrough;
3516	default:
3517		rc = -EINVAL;
3518		goto out;
3519	}
3520
3521	if (unlikely(copy_from_user(&buf, uarg, size))) {
3522		dev_err(dev, "%s: copy_from_user() fail "
3523			"size=%lu cmd=%d (%s) uarg=%p\n",
3524			__func__, size, cmd, decode_hioctl(cmd), uarg);
3525		rc = -EFAULT;
3526		goto out;
3527	}
3528
3529	hdr = (struct ht_cxlflash_hdr *)&buf;
3530	if (hdr->version != HT_CXLFLASH_VERSION_0) {
3531		dev_dbg(dev, "%s: Version %u not supported for %s\n",
3532			__func__, hdr->version, decode_hioctl(cmd));
3533		rc = -EINVAL;
3534		goto out;
3535	}
3536
3537	if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) {
3538		dev_dbg(dev, "%s: Reserved/rflags populated\n", __func__);
3539		rc = -EINVAL;
3540		goto out;
3541	}
3542
3543	rc = do_ioctl(cfg, (void *)&buf);
3544	if (likely(!rc))
3545		if (unlikely(copy_to_user(uarg, &buf, size))) {
3546			dev_err(dev, "%s: copy_to_user() fail "
3547				"size=%lu cmd=%d (%s) uarg=%p\n",
3548				__func__, size, cmd, decode_hioctl(cmd), uarg);
3549			rc = -EFAULT;
3550		}
3551
3552	/* fall through to exit */
3553
3554out:
3555	up_read(&cfg->ioctl_rwsem);
3556	if (unlikely(rc && known_ioctl))
3557		dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3558			__func__, decode_hioctl(cmd), cmd, rc);
3559	else
3560		dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n",
3561			__func__, decode_hioctl(cmd), cmd, rc);
3562	return rc;
3563}
3564
3565/*
3566 * Character device file operations
3567 */
3568static const struct file_operations cxlflash_chr_fops = {
3569	.owner          = THIS_MODULE,
3570	.open           = cxlflash_chr_open,
3571	.unlocked_ioctl	= cxlflash_chr_ioctl,
3572	.compat_ioctl	= compat_ptr_ioctl,
3573};
3574
3575/**
3576 * init_chrdev() - initialize the character device for the host
3577 * @cfg:	Internal structure associated with the host.
3578 *
3579 * Return: 0 on success, -errno on failure
3580 */
3581static int init_chrdev(struct cxlflash_cfg *cfg)
3582{
3583	struct device *dev = &cfg->dev->dev;
3584	struct device *char_dev;
3585	dev_t devno;
3586	int minor;
3587	int rc = 0;
3588
3589	minor = cxlflash_get_minor();
3590	if (unlikely(minor < 0)) {
3591		dev_err(dev, "%s: Exhausted allowed adapters\n", __func__);
3592		rc = -ENOSPC;
3593		goto out;
3594	}
3595
3596	devno = MKDEV(cxlflash_major, minor);
3597	cdev_init(&cfg->cdev, &cxlflash_chr_fops);
3598
3599	rc = cdev_add(&cfg->cdev, devno, 1);
3600	if (rc) {
3601		dev_err(dev, "%s: cdev_add failed rc=%d\n", __func__, rc);
3602		goto err1;
3603	}
3604
3605	char_dev = device_create(cxlflash_class, NULL, devno,
3606				 NULL, "cxlflash%d", minor);
3607	if (IS_ERR(char_dev)) {
3608		rc = PTR_ERR(char_dev);
3609		dev_err(dev, "%s: device_create failed rc=%d\n",
3610			__func__, rc);
3611		goto err2;
3612	}
3613
3614	cfg->chardev = char_dev;
3615out:
3616	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3617	return rc;
3618err2:
3619	cdev_del(&cfg->cdev);
3620err1:
3621	cxlflash_put_minor(minor);
3622	goto out;
3623}
3624
3625/**
3626 * cxlflash_probe() - PCI entry point to add host
3627 * @pdev:	PCI device associated with the host.
3628 * @dev_id:	PCI device id associated with device.
3629 *
3630 * The device will initially start out in a 'probing' state and
3631 * transition to the 'normal' state at the end of a successful
3632 * probe. Should an EEH event occur during probe, the notification
3633 * thread (error_detected()) will wait until the probe handler
3634 * is nearly complete. At that time, the device will be moved to
3635 * a 'probed' state and the EEH thread woken up to drive the slot
3636 * reset and recovery (device moves to 'normal' state). Meanwhile,
3637 * the probe will be allowed to exit successfully.
3638 *
3639 * Return: 0 on success, -errno on failure
3640 */
3641static int cxlflash_probe(struct pci_dev *pdev,
3642			  const struct pci_device_id *dev_id)
3643{
3644	struct Scsi_Host *host;
3645	struct cxlflash_cfg *cfg = NULL;
3646	struct device *dev = &pdev->dev;
3647	struct dev_dependent_vals *ddv;
3648	int rc = 0;
3649	int k;
3650
3651	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
3652		__func__, pdev->irq);
3653
3654	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
3655	driver_template.max_sectors = ddv->max_sectors;
3656
3657	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
3658	if (!host) {
3659		dev_err(dev, "%s: scsi_host_alloc failed\n", __func__);
3660		rc = -ENOMEM;
3661		goto out;
3662	}
3663
3664	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
3665	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
3666	host->unique_id = host->host_no;
3667	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
3668
3669	cfg = shost_priv(host);
3670	cfg->state = STATE_PROBING;
3671	cfg->host = host;
3672	rc = alloc_mem(cfg);
3673	if (rc) {
3674		dev_err(dev, "%s: alloc_mem failed\n", __func__);
3675		rc = -ENOMEM;
3676		scsi_host_put(cfg->host);
3677		goto out;
3678	}
3679
3680	cfg->init_state = INIT_STATE_NONE;
3681	cfg->dev = pdev;
3682	cfg->cxl_fops = cxlflash_cxl_fops;
3683	cfg->ops = cxlflash_assign_ops(ddv);
3684	WARN_ON_ONCE(!cfg->ops);
3685
3686	/*
3687	 * Promoted LUNs move to the top of the LUN table. The rest stay on
3688	 * the bottom half. The bottom half grows from the end (index = 255),
3689	 * whereas the top half grows from the beginning (index = 0).
3690	 *
3691	 * Initialize the last LUN index for all possible ports.
3692	 */
3693	cfg->promote_lun_index = 0;
3694
3695	for (k = 0; k < MAX_FC_PORTS; k++)
3696		cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1;
3697
3698	cfg->dev_id = (struct pci_device_id *)dev_id;
3699
3700	init_waitqueue_head(&cfg->tmf_waitq);
3701	init_waitqueue_head(&cfg->reset_waitq);
3702
3703	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
3704	cfg->lr_state = LINK_RESET_INVALID;
3705	cfg->lr_port = -1;
3706	spin_lock_init(&cfg->tmf_slock);
3707	mutex_init(&cfg->ctx_tbl_list_mutex);
3708	mutex_init(&cfg->ctx_recovery_mutex);
3709	init_rwsem(&cfg->ioctl_rwsem);
3710	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
3711	INIT_LIST_HEAD(&cfg->lluns);
3712
3713	pci_set_drvdata(pdev, cfg);
3714
3715	rc = init_pci(cfg);
3716	if (rc) {
3717		dev_err(dev, "%s: init_pci failed rc=%d\n", __func__, rc);
3718		goto out_remove;
3719	}
3720	cfg->init_state = INIT_STATE_PCI;
3721
3722	cfg->afu_cookie = cfg->ops->create_afu(pdev);
3723	if (unlikely(!cfg->afu_cookie)) {
3724		dev_err(dev, "%s: create_afu failed\n", __func__);
3725		rc = -ENOMEM;
3726		goto out_remove;
3727	}
3728
3729	rc = init_afu(cfg);
3730	if (rc && !wq_has_sleeper(&cfg->reset_waitq)) {
3731		dev_err(dev, "%s: init_afu failed rc=%d\n", __func__, rc);
3732		goto out_remove;
3733	}
3734	cfg->init_state = INIT_STATE_AFU;
3735
3736	rc = init_scsi(cfg);
3737	if (rc) {
3738		dev_err(dev, "%s: init_scsi failed rc=%d\n", __func__, rc);
3739		goto out_remove;
3740	}
3741	cfg->init_state = INIT_STATE_SCSI;
3742
3743	rc = init_chrdev(cfg);
3744	if (rc) {
3745		dev_err(dev, "%s: init_chrdev failed rc=%d\n", __func__, rc);
3746		goto out_remove;
3747	}
3748	cfg->init_state = INIT_STATE_CDEV;
3749
3750	if (wq_has_sleeper(&cfg->reset_waitq)) {
3751		cfg->state = STATE_PROBED;
3752		wake_up_all(&cfg->reset_waitq);
3753	} else
3754		cfg->state = STATE_NORMAL;
3755out:
3756	dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
3757	return rc;
3758
3759out_remove:
3760	cfg->state = STATE_PROBED;
3761	cxlflash_remove(pdev);
3762	goto out;
3763}
3764
3765/**
3766 * cxlflash_pci_error_detected() - called when a PCI error is detected
3767 * @pdev:	PCI device struct.
3768 * @state:	PCI channel state.
3769 *
3770 * When an EEH occurs during an active reset, wait until the reset is
3771 * complete and then take action based upon the device state.
3772 *
3773 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
3774 */
3775static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
3776						    pci_channel_state_t state)
3777{
3778	int rc = 0;
3779	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3780	struct device *dev = &cfg->dev->dev;
3781
3782	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);
3783
3784	switch (state) {
3785	case pci_channel_io_frozen:
3786		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET &&
3787					     cfg->state != STATE_PROBING);
3788		if (cfg->state == STATE_FAILTERM)
3789			return PCI_ERS_RESULT_DISCONNECT;
3790
3791		cfg->state = STATE_RESET;
3792		scsi_block_requests(cfg->host);
3793		drain_ioctls(cfg);
3794		rc = cxlflash_mark_contexts_error(cfg);
3795		if (unlikely(rc))
3796			dev_err(dev, "%s: Failed to mark user contexts rc=%d\n",
3797				__func__, rc);
3798		term_afu(cfg);
3799		return PCI_ERS_RESULT_NEED_RESET;
3800	case pci_channel_io_perm_failure:
3801		cfg->state = STATE_FAILTERM;
3802		wake_up_all(&cfg->reset_waitq);
3803		scsi_unblock_requests(cfg->host);
3804		return PCI_ERS_RESULT_DISCONNECT;
3805	default:
3806		break;
3807	}
3808	return PCI_ERS_RESULT_NEED_RESET;
3809}
3810
3811/**
3812 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
3813 * @pdev:	PCI device struct.
3814 *
3815 * This routine is called by the pci error recovery code after the PCI
3816 * slot has been reset, just before we should resume normal operations.
3817 *
3818 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
3819 */
3820static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
3821{
3822	int rc = 0;
3823	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3824	struct device *dev = &cfg->dev->dev;
3825
3826	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3827
3828	rc = init_afu(cfg);
3829	if (unlikely(rc)) {
3830		dev_err(dev, "%s: EEH recovery failed rc=%d\n", __func__, rc);
3831		return PCI_ERS_RESULT_DISCONNECT;
3832	}
3833
3834	return PCI_ERS_RESULT_RECOVERED;
3835}
3836
3837/**
3838 * cxlflash_pci_resume() - called when normal operation can resume
3839 * @pdev:	PCI device struct
3840 */
3841static void cxlflash_pci_resume(struct pci_dev *pdev)
3842{
3843	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
3844	struct device *dev = &cfg->dev->dev;
3845
3846	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);
3847
3848	cfg->state = STATE_NORMAL;
3849	wake_up_all(&cfg->reset_waitq);
3850	scsi_unblock_requests(cfg->host);
3851}
3852
3853/**
3854 * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class
3855 * @dev:	Character device.
3856 * @mode:	Mode that can be used to verify access.
3857 *
3858 * Return: Allocated string describing the devtmpfs structure.
3859 */
3860static char *cxlflash_devnode(const struct device *dev, umode_t *mode)
3861{
3862	return kasprintf(GFP_KERNEL, "cxlflash/%s", dev_name(dev));
3863}
3864
3865/**
3866 * cxlflash_class_init() - create character device class
3867 *
3868 * Return: 0 on success, -errno on failure
3869 */
3870static int cxlflash_class_init(void)
3871{
3872	dev_t devno;
3873	int rc = 0;
3874
3875	rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash");
3876	if (unlikely(rc)) {
3877		pr_err("%s: alloc_chrdev_region failed rc=%d\n", __func__, rc);
3878		goto out;
3879	}
3880
3881	cxlflash_major = MAJOR(devno);
3882
3883	cxlflash_class = class_create("cxlflash");
3884	if (IS_ERR(cxlflash_class)) {
3885		rc = PTR_ERR(cxlflash_class);
3886		pr_err("%s: class_create failed rc=%d\n", __func__, rc);
3887		goto err;
3888	}
3889
3890	cxlflash_class->devnode = cxlflash_devnode;
3891out:
3892	pr_debug("%s: returning rc=%d\n", __func__, rc);
3893	return rc;
3894err:
3895	unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3896	goto out;
3897}
3898
3899/**
3900 * cxlflash_class_exit() - destroy character device class
3901 */
3902static void cxlflash_class_exit(void)
3903{
3904	dev_t devno = MKDEV(cxlflash_major, 0);
3905
3906	class_destroy(cxlflash_class);
3907	unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS);
3908}
3909
3910static const struct pci_error_handlers cxlflash_err_handler = {
3911	.error_detected = cxlflash_pci_error_detected,
3912	.slot_reset = cxlflash_pci_slot_reset,
3913	.resume = cxlflash_pci_resume,
3914};
3915
3916/*
3917 * PCI device structure
3918 */
3919static struct pci_driver cxlflash_driver = {
3920	.name = CXLFLASH_NAME,
3921	.id_table = cxlflash_pci_table,
3922	.probe = cxlflash_probe,
3923	.remove = cxlflash_remove,
3924	.shutdown = cxlflash_remove,
3925	.err_handler = &cxlflash_err_handler,
3926};
3927
3928/**
3929 * init_cxlflash() - module entry point
3930 *
3931 * Return: 0 on success, -errno on failure
3932 */
3933static int __init init_cxlflash(void)
3934{
3935	int rc;
3936
3937	check_sizes();
3938	cxlflash_list_init();
3939	rc = cxlflash_class_init();
3940	if (unlikely(rc))
3941		goto out;
3942
3943	rc = pci_register_driver(&cxlflash_driver);
3944	if (unlikely(rc))
3945		goto err;
3946out:
3947	pr_debug("%s: returning rc=%d\n", __func__, rc);
3948	return rc;
3949err:
3950	cxlflash_class_exit();
3951	goto out;
3952}
3953
3954/**
3955 * exit_cxlflash() - module exit point
3956 */
3957static void __exit exit_cxlflash(void)
3958{
3959	cxlflash_term_global_luns();
3960	cxlflash_free_errpage();
3961
3962	pci_unregister_driver(&cxlflash_driver);
3963	cxlflash_class_exit();
3964}
3965
3966module_init(init_cxlflash);
3967module_exit(exit_cxlflash);
3968