1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c)  2003-2017 QLogic Corporation
5 */
6#include "qla_nvme.h"
7#include <linux/scatterlist.h>
8#include <linux/delay.h>
9#include <linux/nvme.h>
10#include <linux/nvme-fc.h>
11
12static struct nvme_fc_port_template qla_nvme_fc_transport;
13
14int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
15{
16	struct qla_nvme_rport *rport;
17	struct nvme_fc_port_info req;
18	int ret;
19
20	if (!IS_ENABLED(CONFIG_NVME_FC))
21		return 0;
22
23	if (!vha->flags.nvme_enabled) {
24		ql_log(ql_log_info, vha, 0x2100,
25		    "%s: Not registering target since Host NVME is not enabled\n",
26		    __func__);
27		return 0;
28	}
29
30	if (qla_nvme_register_hba(vha))
31		return 0;
32
33	if (!vha->nvme_local_port)
34		return 0;
35
36	if (!(fcport->nvme_prli_service_param &
37	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)) ||
38		(fcport->nvme_flag & NVME_FLAG_REGISTERED))
39		return 0;
40
41	fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
42
43	memset(&req, 0, sizeof(struct nvme_fc_port_info));
44	req.port_name = wwn_to_u64(fcport->port_name);
45	req.node_name = wwn_to_u64(fcport->node_name);
46	req.port_role = 0;
47	req.dev_loss_tmo = 0;
48
49	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
50		req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
51
52	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
53		req.port_role |= FC_PORT_ROLE_NVME_TARGET;
54
55	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
56		req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
57
58	req.port_id = fcport->d_id.b24;
59
60	ql_log(ql_log_info, vha, 0x2102,
61	    "%s: traddr=nn-0x%016llx:pn-0x%016llx PortID:%06x\n",
62	    __func__, req.node_name, req.port_name,
63	    req.port_id);
64
65	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
66	    &fcport->nvme_remote_port);
67	if (ret) {
68		ql_log(ql_log_warn, vha, 0x212e,
69		    "Failed to register remote port. Transport returned %d\n",
70		    ret);
71		return ret;
72	}
73
74	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
75		ql_log(ql_log_info, vha, 0x212a,
76		       "PortID:%06x Supports SLER\n", req.port_id);
77
78	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
79		ql_log(ql_log_info, vha, 0x212b,
80		       "PortID:%06x Supports PI control\n", req.port_id);
81
82	rport = fcport->nvme_remote_port->private;
83	rport->fcport = fcport;
84
85	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
86	return 0;
87}
88
89/* Allocate a queue for NVMe traffic */
90static int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport,
91    unsigned int qidx, u16 qsize, void **handle)
92{
93	struct scsi_qla_host *vha;
94	struct qla_hw_data *ha;
95	struct qla_qpair *qpair;
96
97	/* Map admin queue and 1st IO queue to index 0 */
98	if (qidx)
99		qidx--;
100
101	vha = (struct scsi_qla_host *)lport->private;
102	ha = vha->hw;
103
104	ql_log(ql_log_info, vha, 0x2104,
105	    "%s: handle %p, idx =%d, qsize %d\n",
106	    __func__, handle, qidx, qsize);
107
108	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
109		ql_log(ql_log_warn, vha, 0x212f,
110		    "%s: Illegal qidx=%d. Max=%d\n",
111		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
112		return -EINVAL;
113	}
114
115	/* Use base qpair if max_qpairs is 0 */
116	if (!ha->max_qpairs) {
117		qpair = ha->base_qpair;
118	} else {
119		if (ha->queue_pair_map[qidx]) {
120			*handle = ha->queue_pair_map[qidx];
121			ql_log(ql_log_info, vha, 0x2121,
122			       "Returning existing qpair of %p for idx=%x\n",
123			       *handle, qidx);
124			return 0;
125		}
126
127		qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
128		if (!qpair) {
129			ql_log(ql_log_warn, vha, 0x2122,
130			       "Failed to allocate qpair\n");
131			return -EINVAL;
132		}
133	}
134	*handle = qpair;
135
136	return 0;
137}
138
139static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
140{
141	struct srb *sp = container_of(kref, struct srb, cmd_kref);
142	struct nvme_private *priv = (struct nvme_private *)sp->priv;
143	struct nvmefc_fcp_req *fd;
144	struct srb_iocb *nvme;
145	unsigned long flags;
146
147	if (!priv)
148		goto out;
149
150	nvme = &sp->u.iocb_cmd;
151	fd = nvme->u.nvme.desc;
152
153	spin_lock_irqsave(&priv->cmd_lock, flags);
154	priv->sp = NULL;
155	sp->priv = NULL;
156	if (priv->comp_status == QLA_SUCCESS) {
157		fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
158		fd->status = NVME_SC_SUCCESS;
159	} else {
160		fd->rcv_rsplen = 0;
161		fd->transferred_length = 0;
162		fd->status = NVME_SC_INTERNAL;
163	}
164	spin_unlock_irqrestore(&priv->cmd_lock, flags);
165
166	fd->done(fd);
167out:
168	qla2xxx_rel_qpair_sp(sp->qpair, sp);
169}
170
171static void qla_nvme_release_ls_cmd_kref(struct kref *kref)
172{
173	struct srb *sp = container_of(kref, struct srb, cmd_kref);
174	struct nvme_private *priv = (struct nvme_private *)sp->priv;
175	struct nvmefc_ls_req *fd;
176	unsigned long flags;
177
178	if (!priv)
179		goto out;
180
181	spin_lock_irqsave(&priv->cmd_lock, flags);
182	priv->sp = NULL;
183	sp->priv = NULL;
184	spin_unlock_irqrestore(&priv->cmd_lock, flags);
185
186	fd = priv->fd;
187
188	fd->done(fd, priv->comp_status);
189out:
190	qla2x00_rel_sp(sp);
191}
192
193static void qla_nvme_ls_complete(struct work_struct *work)
194{
195	struct nvme_private *priv =
196		container_of(work, struct nvme_private, ls_work);
197
198	kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref);
199}
200
201static void qla_nvme_sp_ls_done(srb_t *sp, int res)
202{
203	struct nvme_private *priv = sp->priv;
204
205	if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0))
206		return;
207
208	if (res)
209		res = -EINVAL;
210
211	priv->comp_status = res;
212	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
213	schedule_work(&priv->ls_work);
214}
215
216/* it assumed that QPair lock is held. */
217static void qla_nvme_sp_done(srb_t *sp, int res)
218{
219	struct nvme_private *priv = sp->priv;
220
221	priv->comp_status = res;
222	kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref);
223
224	return;
225}
226
227static void qla_nvme_abort_work(struct work_struct *work)
228{
229	struct nvme_private *priv =
230		container_of(work, struct nvme_private, abort_work);
231	srb_t *sp = priv->sp;
232	fc_port_t *fcport = sp->fcport;
233	struct qla_hw_data *ha = fcport->vha->hw;
234	int rval;
235
236	ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
237	       "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n",
238	       __func__, sp, sp->handle, fcport, fcport->deleted);
239
240	if (!ha->flags.fw_started && fcport->deleted)
241		goto out;
242
243	if (ha->flags.host_shutting_down) {
244		ql_log(ql_log_info, sp->fcport->vha, 0xffff,
245		    "%s Calling done on sp: %p, type: 0x%x\n",
246		    __func__, sp, sp->type);
247		sp->done(sp, 0);
248		goto out;
249	}
250
251	rval = ha->isp_ops->abort_command(sp);
252
253	ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
254	    "%s: %s command for sp=%p, handle=%x on fcport=%p rval=%x\n",
255	    __func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
256	    sp, sp->handle, fcport, rval);
257
258out:
259	/* kref_get was done before work was schedule. */
260	kref_put(&sp->cmd_kref, sp->put_fn);
261}
262
263static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
264    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
265{
266	struct nvme_private *priv = fd->private;
267	unsigned long flags;
268
269	spin_lock_irqsave(&priv->cmd_lock, flags);
270	if (!priv->sp) {
271		spin_unlock_irqrestore(&priv->cmd_lock, flags);
272		return;
273	}
274
275	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
276		spin_unlock_irqrestore(&priv->cmd_lock, flags);
277		return;
278	}
279	spin_unlock_irqrestore(&priv->cmd_lock, flags);
280
281	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
282	schedule_work(&priv->abort_work);
283}
284
285static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
286    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
287{
288	struct qla_nvme_rport *qla_rport = rport->private;
289	fc_port_t *fcport = qla_rport->fcport;
290	struct srb_iocb   *nvme;
291	struct nvme_private *priv = fd->private;
292	struct scsi_qla_host *vha;
293	int     rval = QLA_FUNCTION_FAILED;
294	struct qla_hw_data *ha;
295	srb_t           *sp;
296
297
298	if (!fcport || (fcport && fcport->deleted))
299		return rval;
300
301	vha = fcport->vha;
302	ha = vha->hw;
303
304	if (!ha->flags.fw_started)
305		return rval;
306
307	/* Alloc SRB structure */
308	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
309	if (!sp)
310		return rval;
311
312	sp->type = SRB_NVME_LS;
313	sp->name = "nvme_ls";
314	sp->done = qla_nvme_sp_ls_done;
315	sp->put_fn = qla_nvme_release_ls_cmd_kref;
316	sp->priv = priv;
317	priv->sp = sp;
318	kref_init(&sp->cmd_kref);
319	spin_lock_init(&priv->cmd_lock);
320	nvme = &sp->u.iocb_cmd;
321	priv->fd = fd;
322	nvme->u.nvme.desc = fd;
323	nvme->u.nvme.dir = 0;
324	nvme->u.nvme.dl = 0;
325	nvme->u.nvme.cmd_len = fd->rqstlen;
326	nvme->u.nvme.rsp_len = fd->rsplen;
327	nvme->u.nvme.rsp_dma = fd->rspdma;
328	nvme->u.nvme.timeout_sec = fd->timeout;
329	nvme->u.nvme.cmd_dma = fd->rqstdma;
330	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
331	    fd->rqstlen, DMA_TO_DEVICE);
332
333	rval = qla2x00_start_sp(sp);
334	if (rval != QLA_SUCCESS) {
335		ql_log(ql_log_warn, vha, 0x700e,
336		    "qla2x00_start_sp failed = %d\n", rval);
337		sp->priv = NULL;
338		priv->sp = NULL;
339		qla2x00_rel_sp(sp);
340		return rval;
341	}
342
343	return rval;
344}
345
346static void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
347    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
348    struct nvmefc_fcp_req *fd)
349{
350	struct nvme_private *priv = fd->private;
351	unsigned long flags;
352
353	spin_lock_irqsave(&priv->cmd_lock, flags);
354	if (!priv->sp) {
355		spin_unlock_irqrestore(&priv->cmd_lock, flags);
356		return;
357	}
358	if (!kref_get_unless_zero(&priv->sp->cmd_kref)) {
359		spin_unlock_irqrestore(&priv->cmd_lock, flags);
360		return;
361	}
362	spin_unlock_irqrestore(&priv->cmd_lock, flags);
363
364	INIT_WORK(&priv->abort_work, qla_nvme_abort_work);
365	schedule_work(&priv->abort_work);
366}
367
368static inline int qla2x00_start_nvme_mq(srb_t *sp)
369{
370	unsigned long   flags;
371	uint32_t        *clr_ptr;
372	uint32_t        handle;
373	struct cmd_nvme *cmd_pkt;
374	uint16_t        cnt, i;
375	uint16_t        req_cnt;
376	uint16_t        tot_dsds;
377	uint16_t	avail_dsds;
378	struct dsd64	*cur_dsd;
379	struct req_que *req = NULL;
380	struct scsi_qla_host *vha = sp->fcport->vha;
381	struct qla_hw_data *ha = vha->hw;
382	struct qla_qpair *qpair = sp->qpair;
383	struct srb_iocb *nvme = &sp->u.iocb_cmd;
384	struct scatterlist *sgl, *sg;
385	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
386	struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
387	uint32_t        rval = QLA_SUCCESS;
388
389	/* Setup qpair pointers */
390	req = qpair->req;
391	tot_dsds = fd->sg_cnt;
392
393	/* Acquire qpair specific lock */
394	spin_lock_irqsave(&qpair->qp_lock, flags);
395
396	handle = qla2xxx_get_next_handle(req);
397	if (handle == 0) {
398		rval = -EBUSY;
399		goto queuing_error;
400	}
401	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
402	if (req->cnt < (req_cnt + 2)) {
403		if (IS_SHADOW_REG_CAPABLE(ha)) {
404			cnt = *req->out_ptr;
405		} else {
406			cnt = rd_reg_dword_relaxed(req->req_q_out);
407			if (qla2x00_check_reg16_for_disconnect(vha, cnt))
408				goto queuing_error;
409		}
410
411		if (req->ring_index < cnt)
412			req->cnt = cnt - req->ring_index;
413		else
414			req->cnt = req->length - (req->ring_index - cnt);
415
416		if (req->cnt < (req_cnt + 2)){
417			rval = -EBUSY;
418			goto queuing_error;
419		}
420	}
421
422	if (unlikely(!fd->sqid)) {
423		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
424			nvme->u.nvme.aen_op = 1;
425			atomic_inc(&ha->nvme_active_aen_cnt);
426		}
427	}
428
429	/* Build command packet. */
430	req->current_outstanding_cmd = handle;
431	req->outstanding_cmds[handle] = sp;
432	sp->handle = handle;
433	req->cnt -= req_cnt;
434
435	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
436	cmd_pkt->handle = make_handle(req->id, handle);
437
438	/* Zero out remaining portion of packet. */
439	clr_ptr = (uint32_t *)cmd_pkt + 2;
440	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
441
442	cmd_pkt->entry_status = 0;
443
444	/* Update entry type to indicate Command NVME IOCB */
445	cmd_pkt->entry_type = COMMAND_NVME;
446
447	/* No data transfer how do we check buffer len == 0?? */
448	if (fd->io_dir == NVMEFC_FCP_READ) {
449		cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
450		qpair->counters.input_bytes += fd->payload_length;
451		qpair->counters.input_requests++;
452	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
453		cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
454		if ((vha->flags.nvme_first_burst) &&
455		    (sp->fcport->nvme_prli_service_param &
456			NVME_PRLI_SP_FIRST_BURST)) {
457			if ((fd->payload_length <=
458			    sp->fcport->nvme_first_burst_size) ||
459				(sp->fcport->nvme_first_burst_size == 0))
460				cmd_pkt->control_flags |=
461					cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
462		}
463		qpair->counters.output_bytes += fd->payload_length;
464		qpair->counters.output_requests++;
465	} else if (fd->io_dir == 0) {
466		cmd_pkt->control_flags = 0;
467	}
468	/* Set BIT_13 of control flags for Async event */
469	if (vha->flags.nvme2_enabled &&
470	    cmd->sqe.common.opcode == nvme_admin_async_event) {
471		cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
472	}
473
474	/* Set NPORT-ID */
475	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
476	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
477	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
478	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
479	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
480
481	/* NVME RSP IU */
482	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
483	put_unaligned_le64(fd->rspdma, &cmd_pkt->nvme_rsp_dseg_address);
484
485	/* NVME CNMD IU */
486	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
487	cmd_pkt->nvme_cmnd_dseg_address = cpu_to_le64(fd->cmddma);
488
489	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
490	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
491
492	/* One DSD is available in the Command Type NVME IOCB */
493	avail_dsds = 1;
494	cur_dsd = &cmd_pkt->nvme_dsd;
495	sgl = fd->first_sgl;
496
497	/* Load data segments */
498	for_each_sg(sgl, sg, tot_dsds, i) {
499		cont_a64_entry_t *cont_pkt;
500
501		/* Allocate additional continuation packets? */
502		if (avail_dsds == 0) {
503			/*
504			 * Five DSDs are available in the Continuation
505			 * Type 1 IOCB.
506			 */
507
508			/* Adjust ring index */
509			req->ring_index++;
510			if (req->ring_index == req->length) {
511				req->ring_index = 0;
512				req->ring_ptr = req->ring;
513			} else {
514				req->ring_ptr++;
515			}
516			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
517			put_unaligned_le32(CONTINUE_A64_TYPE,
518					   &cont_pkt->entry_type);
519
520			cur_dsd = cont_pkt->dsd;
521			avail_dsds = ARRAY_SIZE(cont_pkt->dsd);
522		}
523
524		append_dsd64(&cur_dsd, sg);
525		avail_dsds--;
526	}
527
528	/* Set total entry count. */
529	cmd_pkt->entry_count = (uint8_t)req_cnt;
530	wmb();
531
532	/* Adjust ring index. */
533	req->ring_index++;
534	if (req->ring_index == req->length) {
535		req->ring_index = 0;
536		req->ring_ptr = req->ring;
537	} else {
538		req->ring_ptr++;
539	}
540
541	/* Set chip new ring index. */
542	wrt_reg_dword(req->req_q_in, req->ring_index);
543
544queuing_error:
545	spin_unlock_irqrestore(&qpair->qp_lock, flags);
546
547	return rval;
548}
549
550/* Post a command */
551static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
552    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
553    struct nvmefc_fcp_req *fd)
554{
555	fc_port_t *fcport;
556	struct srb_iocb *nvme;
557	struct scsi_qla_host *vha;
558	int rval;
559	srb_t *sp;
560	struct qla_qpair *qpair = hw_queue_handle;
561	struct nvme_private *priv = fd->private;
562	struct qla_nvme_rport *qla_rport = rport->private;
563
564	if (!priv) {
565		/* nvme association has been torn down */
566		return -ENODEV;
567	}
568
569	fcport = qla_rport->fcport;
570
571	if (!qpair || !fcport)
572		return -ENODEV;
573
574	if (!qpair->fw_started || fcport->deleted)
575		return -EBUSY;
576
577	vha = fcport->vha;
578
579	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
580		return -ENODEV;
581
582	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
583	    (qpair && !qpair->fw_started) || fcport->deleted)
584		return -EBUSY;
585
586	/*
587	 * If we know the dev is going away while the transport is still sending
588	 * IO's return busy back to stall the IO Q.  This happens when the
589	 * link goes away and fw hasn't notified us yet, but IO's are being
590	 * returned. If the dev comes back quickly we won't exhaust the IO
591	 * retry count at the core.
592	 */
593	if (fcport->nvme_flag & NVME_FLAG_RESETTING)
594		return -EBUSY;
595
596	/* Alloc SRB structure */
597	sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
598	if (!sp)
599		return -EBUSY;
600
601	kref_init(&sp->cmd_kref);
602	spin_lock_init(&priv->cmd_lock);
603	sp->priv = priv;
604	priv->sp = sp;
605	sp->type = SRB_NVME_CMD;
606	sp->name = "nvme_cmd";
607	sp->done = qla_nvme_sp_done;
608	sp->put_fn = qla_nvme_release_fcp_cmd_kref;
609	sp->qpair = qpair;
610	sp->vha = vha;
611	nvme = &sp->u.iocb_cmd;
612	nvme->u.nvme.desc = fd;
613
614	rval = qla2x00_start_nvme_mq(sp);
615	if (rval != QLA_SUCCESS) {
616		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
617		    "qla2x00_start_nvme_mq failed = %d\n", rval);
618		sp->priv = NULL;
619		priv->sp = NULL;
620		qla2xxx_rel_qpair_sp(sp->qpair, sp);
621	}
622
623	return rval;
624}
625
626static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
627{
628	struct scsi_qla_host *vha = lport->private;
629
630	ql_log(ql_log_info, vha, 0x210f,
631	    "localport delete of %p completed.\n", vha->nvme_local_port);
632	vha->nvme_local_port = NULL;
633	complete(&vha->nvme_del_done);
634}
635
636static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
637{
638	fc_port_t *fcport;
639	struct qla_nvme_rport *qla_rport = rport->private;
640
641	fcport = qla_rport->fcport;
642	fcport->nvme_remote_port = NULL;
643	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
644	fcport->nvme_flag &= ~NVME_FLAG_DELETING;
645	ql_log(ql_log_info, fcport->vha, 0x2110,
646	    "remoteport_delete of %p %8phN completed.\n",
647	    fcport, fcport->port_name);
648	complete(&fcport->nvme_del_done);
649}
650
651static struct nvme_fc_port_template qla_nvme_fc_transport = {
652	.localport_delete = qla_nvme_localport_delete,
653	.remoteport_delete = qla_nvme_remoteport_delete,
654	.create_queue   = qla_nvme_alloc_queue,
655	.delete_queue 	= NULL,
656	.ls_req		= qla_nvme_ls_req,
657	.ls_abort	= qla_nvme_ls_abort,
658	.fcp_io		= qla_nvme_post_cmd,
659	.fcp_abort	= qla_nvme_fcp_abort,
660	.max_hw_queues  = 8,
661	.max_sgl_segments = 1024,
662	.max_dif_sgl_segments = 64,
663	.dma_boundary = 0xFFFFFFFF,
664	.local_priv_sz  = 8,
665	.remote_priv_sz = sizeof(struct qla_nvme_rport),
666	.lsrqst_priv_sz = sizeof(struct nvme_private),
667	.fcprqst_priv_sz = sizeof(struct nvme_private),
668};
669
670void qla_nvme_unregister_remote_port(struct fc_port *fcport)
671{
672	int ret;
673
674	if (!IS_ENABLED(CONFIG_NVME_FC))
675		return;
676
677	ql_log(ql_log_warn, NULL, 0x2112,
678	    "%s: unregister remoteport on %p %8phN\n",
679	    __func__, fcport, fcport->port_name);
680
681	if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
682		nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
683
684	init_completion(&fcport->nvme_del_done);
685	ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
686	if (ret)
687		ql_log(ql_log_info, fcport->vha, 0x2114,
688			"%s: Failed to unregister nvme_remote_port (%d)\n",
689			    __func__, ret);
690	wait_for_completion(&fcport->nvme_del_done);
691}
692
693void qla_nvme_delete(struct scsi_qla_host *vha)
694{
695	int nv_ret;
696
697	if (!IS_ENABLED(CONFIG_NVME_FC))
698		return;
699
700	if (vha->nvme_local_port) {
701		init_completion(&vha->nvme_del_done);
702		ql_log(ql_log_info, vha, 0x2116,
703			"unregister localport=%p\n",
704			vha->nvme_local_port);
705		nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
706		if (nv_ret)
707			ql_log(ql_log_info, vha, 0x2115,
708			    "Unregister of localport failed\n");
709		else
710			wait_for_completion(&vha->nvme_del_done);
711	}
712}
713
714int qla_nvme_register_hba(struct scsi_qla_host *vha)
715{
716	struct nvme_fc_port_template *tmpl;
717	struct qla_hw_data *ha;
718	struct nvme_fc_port_info pinfo;
719	int ret = -EINVAL;
720
721	if (!IS_ENABLED(CONFIG_NVME_FC))
722		return ret;
723
724	ha = vha->hw;
725	tmpl = &qla_nvme_fc_transport;
726
727	WARN_ON(vha->nvme_local_port);
728
729	qla_nvme_fc_transport.max_hw_queues =
730	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
731		(uint8_t)(ha->max_qpairs ? ha->max_qpairs : 1));
732
733	pinfo.node_name = wwn_to_u64(vha->node_name);
734	pinfo.port_name = wwn_to_u64(vha->port_name);
735	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
736	pinfo.port_id = vha->d_id.b24;
737
738	ql_log(ql_log_info, vha, 0xffff,
739	    "register_localport: host-traddr=nn-0x%llx:pn-0x%llx on portID:%x\n",
740	    pinfo.node_name, pinfo.port_name, pinfo.port_id);
741	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
742
743	ret = nvme_fc_register_localport(&pinfo, tmpl,
744	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
745	if (ret) {
746		ql_log(ql_log_warn, vha, 0xffff,
747		    "register_localport failed: ret=%x\n", ret);
748	} else {
749		vha->nvme_local_port->private = vha;
750	}
751
752	return ret;
753}
754