1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6 * Copyright (C) 2009-2015 Emulex.  All rights reserved.           *
7 * EMULEX and SLI are trademarks of Emulex.                        *
8 * www.broadcom.com                                                *
9 *                                                                 *
10 * This program is free software; you can redistribute it and/or   *
11 * modify it under the terms of version 2 of the GNU General       *
12 * Public License as published by the Free Software Foundation.    *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19 * more details, a copy of which can be found in the file COPYING  *
20 * included with this package.                                     *
21 *******************************************************************/
22
23#include <linux/interrupt.h>
24#include <linux/mempool.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include <linux/delay.h>
28#include <linux/list.h>
29#include <linux/bsg-lib.h>
30#include <linux/vmalloc.h>
31
32#include <scsi/scsi.h>
33#include <scsi/scsi_host.h>
34#include <scsi/scsi_transport_fc.h>
35#include <scsi/scsi_bsg_fc.h>
36#include <scsi/fc/fc_fs.h>
37
38#include "lpfc_hw4.h"
39#include "lpfc_hw.h"
40#include "lpfc_sli.h"
41#include "lpfc_sli4.h"
42#include "lpfc_nl.h"
43#include "lpfc_bsg.h"
44#include "lpfc_disc.h"
45#include "lpfc_scsi.h"
46#include "lpfc.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_debugfs.h"
50#include "lpfc_vport.h"
51#include "lpfc_version.h"
52
53struct lpfc_bsg_event {
54	struct list_head node;
55	struct kref kref;
56	wait_queue_head_t wq;
57
58	/* Event type and waiter identifiers */
59	uint32_t type_mask;
60	uint32_t req_id;
61	uint32_t reg_id;
62
63	/* next two flags are here for the auto-delete logic */
64	unsigned long wait_time_stamp;
65	int waiting;
66
67	/* seen and not seen events */
68	struct list_head events_to_get;
69	struct list_head events_to_see;
70
71	/* driver data associated with the job */
72	void *dd_data;
73};
74
75struct lpfc_bsg_iocb {
76	struct lpfc_iocbq *cmdiocbq;
77	struct lpfc_dmabuf *rmp;
78	struct lpfc_nodelist *ndlp;
79};
80
81struct lpfc_bsg_mbox {
82	LPFC_MBOXQ_t *pmboxq;
83	MAILBOX_t *mb;
84	struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85	uint8_t *ext; /* extended mailbox data */
86	uint32_t mbOffset; /* from app */
87	uint32_t inExtWLen; /* from app */
88	uint32_t outExtWLen; /* from app */
89};
90
91#define MENLO_DID 0x0000FC0E
92
93struct lpfc_bsg_menlo {
94	struct lpfc_iocbq *cmdiocbq;
95	struct lpfc_dmabuf *rmp;
96};
97
98#define TYPE_EVT 	1
99#define TYPE_IOCB	2
100#define TYPE_MBOX	3
101#define TYPE_MENLO	4
102struct bsg_job_data {
103	uint32_t type;
104	struct bsg_job *set_job; /* job waiting for this iocb to finish */
105	union {
106		struct lpfc_bsg_event *evt;
107		struct lpfc_bsg_iocb iocb;
108		struct lpfc_bsg_mbox mbox;
109		struct lpfc_bsg_menlo menlo;
110	} context_un;
111};
112
113struct event_data {
114	struct list_head node;
115	uint32_t type;
116	uint32_t immed_dat;
117	void *data;
118	uint32_t len;
119};
120
121#define BUF_SZ_4K 4096
122#define SLI_CT_ELX_LOOPBACK 0x10
123
124enum ELX_LOOPBACK_CMD {
125	ELX_LOOPBACK_XRI_SETUP,
126	ELX_LOOPBACK_DATA,
127};
128
129#define ELX_LOOPBACK_HEADER_SZ \
130	(size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
131
132struct lpfc_dmabufext {
133	struct lpfc_dmabuf dma;
134	uint32_t size;
135	uint32_t flag;
136};
137
138static void
139lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
140{
141	struct lpfc_dmabuf *mlast, *next_mlast;
142
143	if (mlist) {
144		list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
145					 list) {
146			lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
147			list_del(&mlast->list);
148			kfree(mlast);
149		}
150		lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
151		kfree(mlist);
152	}
153	return;
154}
155
156static struct lpfc_dmabuf *
157lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
158		       int outbound_buffers, struct ulp_bde64 *bpl,
159		       int *bpl_entries)
160{
161	struct lpfc_dmabuf *mlist = NULL;
162	struct lpfc_dmabuf *mp;
163	unsigned int bytes_left = size;
164
165	/* Verify we can support the size specified */
166	if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
167		return NULL;
168
169	/* Determine the number of dma buffers to allocate */
170	*bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
171			size/LPFC_BPL_SIZE);
172
173	/* Allocate dma buffer and place in BPL passed */
174	while (bytes_left) {
175		/* Allocate dma buffer  */
176		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
177		if (!mp) {
178			if (mlist)
179				lpfc_free_bsg_buffers(phba, mlist);
180			return NULL;
181		}
182
183		INIT_LIST_HEAD(&mp->list);
184		mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
185
186		if (!mp->virt) {
187			kfree(mp);
188			if (mlist)
189				lpfc_free_bsg_buffers(phba, mlist);
190			return NULL;
191		}
192
193		/* Queue it to a linked list */
194		if (!mlist)
195			mlist = mp;
196		else
197			list_add_tail(&mp->list, &mlist->list);
198
199		/* Add buffer to buffer pointer list */
200		if (outbound_buffers)
201			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
202		else
203			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
204		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
205		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
206		bpl->tus.f.bdeSize = (uint16_t)
207			(bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
208			 bytes_left);
209		bytes_left -= bpl->tus.f.bdeSize;
210		bpl->tus.w = le32_to_cpu(bpl->tus.w);
211		bpl++;
212	}
213	return mlist;
214}
215
216static unsigned int
217lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
218		   struct bsg_buffer *bsg_buffers,
219		   unsigned int bytes_to_transfer, int to_buffers)
220{
221
222	struct lpfc_dmabuf *mp;
223	unsigned int transfer_bytes, bytes_copied = 0;
224	unsigned int sg_offset, dma_offset;
225	unsigned char *dma_address, *sg_address;
226	LIST_HEAD(temp_list);
227	struct sg_mapping_iter miter;
228	unsigned long flags;
229	unsigned int sg_flags = SG_MITER_ATOMIC;
230	bool sg_valid;
231
232	list_splice_init(&dma_buffers->list, &temp_list);
233	list_add(&dma_buffers->list, &temp_list);
234	sg_offset = 0;
235	if (to_buffers)
236		sg_flags |= SG_MITER_FROM_SG;
237	else
238		sg_flags |= SG_MITER_TO_SG;
239	sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
240		       sg_flags);
241	local_irq_save(flags);
242	sg_valid = sg_miter_next(&miter);
243	list_for_each_entry(mp, &temp_list, list) {
244		dma_offset = 0;
245		while (bytes_to_transfer && sg_valid &&
246		       (dma_offset < LPFC_BPL_SIZE)) {
247			dma_address = mp->virt + dma_offset;
248			if (sg_offset) {
249				/* Continue previous partial transfer of sg */
250				sg_address = miter.addr + sg_offset;
251				transfer_bytes = miter.length - sg_offset;
252			} else {
253				sg_address = miter.addr;
254				transfer_bytes = miter.length;
255			}
256			if (bytes_to_transfer < transfer_bytes)
257				transfer_bytes = bytes_to_transfer;
258			if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
259				transfer_bytes = LPFC_BPL_SIZE - dma_offset;
260			if (to_buffers)
261				memcpy(dma_address, sg_address, transfer_bytes);
262			else
263				memcpy(sg_address, dma_address, transfer_bytes);
264			dma_offset += transfer_bytes;
265			sg_offset += transfer_bytes;
266			bytes_to_transfer -= transfer_bytes;
267			bytes_copied += transfer_bytes;
268			if (sg_offset >= miter.length) {
269				sg_offset = 0;
270				sg_valid = sg_miter_next(&miter);
271			}
272		}
273	}
274	sg_miter_stop(&miter);
275	local_irq_restore(flags);
276	list_del_init(&dma_buffers->list);
277	list_splice(&temp_list, &dma_buffers->list);
278	return bytes_copied;
279}
280
281/**
282 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
283 * @phba: Pointer to HBA context object.
284 * @cmdiocbq: Pointer to command iocb.
285 * @rspiocbq: Pointer to response iocb.
286 *
287 * This function is the completion handler for iocbs issued using
288 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
289 * ring event handler function without any lock held. This function
290 * can be called from both worker thread context and interrupt
291 * context. This function also can be called from another thread which
292 * cleans up the SLI layer objects.
293 * This function copies the contents of the response iocb to the
294 * response iocb memory object provided by the caller of
295 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
296 * sleeps for the iocb completion.
297 **/
298static void
299lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
300			struct lpfc_iocbq *cmdiocbq,
301			struct lpfc_iocbq *rspiocbq)
302{
303	struct bsg_job_data *dd_data;
304	struct bsg_job *job;
305	struct fc_bsg_reply *bsg_reply;
306	IOCB_t *rsp;
307	struct lpfc_dmabuf *bmp, *cmp, *rmp;
308	struct lpfc_nodelist *ndlp;
309	struct lpfc_bsg_iocb *iocb;
310	unsigned long flags;
311	unsigned int rsp_size;
312	int rc = 0;
313
314	dd_data = cmdiocbq->context1;
315
316	/* Determine if job has been aborted */
317	spin_lock_irqsave(&phba->ct_ev_lock, flags);
318	job = dd_data->set_job;
319	if (job) {
320		bsg_reply = job->reply;
321		/* Prevent timeout handling from trying to abort job */
322		job->dd_data = NULL;
323	}
324	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
325
326	/* Close the timeout handler abort window */
327	spin_lock_irqsave(&phba->hbalock, flags);
328	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
329	spin_unlock_irqrestore(&phba->hbalock, flags);
330
331	iocb = &dd_data->context_un.iocb;
332	ndlp = iocb->ndlp;
333	rmp = iocb->rmp;
334	cmp = cmdiocbq->context2;
335	bmp = cmdiocbq->context3;
336	rsp = &rspiocbq->iocb;
337
338	/* Copy the completed data or set the error status */
339
340	if (job) {
341		if (rsp->ulpStatus) {
342			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
343				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
344				case IOERR_SEQUENCE_TIMEOUT:
345					rc = -ETIMEDOUT;
346					break;
347				case IOERR_INVALID_RPI:
348					rc = -EFAULT;
349					break;
350				default:
351					rc = -EACCES;
352					break;
353				}
354			} else {
355				rc = -EACCES;
356			}
357		} else {
358			rsp_size = rsp->un.genreq64.bdl.bdeSize;
359			bsg_reply->reply_payload_rcv_len =
360				lpfc_bsg_copy_data(rmp, &job->reply_payload,
361						   rsp_size, 0);
362		}
363	}
364
365	lpfc_free_bsg_buffers(phba, cmp);
366	lpfc_free_bsg_buffers(phba, rmp);
367	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
368	kfree(bmp);
369	lpfc_sli_release_iocbq(phba, cmdiocbq);
370	lpfc_nlp_put(ndlp);
371	kfree(dd_data);
372
373	/* Complete the job if the job is still active */
374
375	if (job) {
376		bsg_reply->result = rc;
377		bsg_job_done(job, bsg_reply->result,
378			       bsg_reply->reply_payload_rcv_len);
379	}
380	return;
381}
382
383/**
384 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
385 * @job: fc_bsg_job to handle
386 **/
387static int
388lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
389{
390	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
391	struct lpfc_hba *phba = vport->phba;
392	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
393	struct lpfc_nodelist *ndlp = rdata->pnode;
394	struct fc_bsg_reply *bsg_reply = job->reply;
395	struct ulp_bde64 *bpl = NULL;
396	uint32_t timeout;
397	struct lpfc_iocbq *cmdiocbq = NULL;
398	IOCB_t *cmd;
399	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
400	int request_nseg;
401	int reply_nseg;
402	struct bsg_job_data *dd_data;
403	unsigned long flags;
404	uint32_t creg_val;
405	int rc = 0;
406	int iocb_stat;
407
408	/* in case no data is transferred */
409	bsg_reply->reply_payload_rcv_len = 0;
410
411	/* allocate our bsg tracking structure */
412	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
413	if (!dd_data) {
414		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
415				"2733 Failed allocation of dd_data\n");
416		rc = -ENOMEM;
417		goto no_dd_data;
418	}
419
420	if (!lpfc_nlp_get(ndlp)) {
421		rc = -ENODEV;
422		goto no_ndlp;
423	}
424
425	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
426		rc = -ENODEV;
427		goto free_ndlp;
428	}
429
430	cmdiocbq = lpfc_sli_get_iocbq(phba);
431	if (!cmdiocbq) {
432		rc = -ENOMEM;
433		goto free_ndlp;
434	}
435
436	cmd = &cmdiocbq->iocb;
437
438	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
439	if (!bmp) {
440		rc = -ENOMEM;
441		goto free_cmdiocbq;
442	}
443	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
444	if (!bmp->virt) {
445		rc = -ENOMEM;
446		goto free_bmp;
447	}
448
449	INIT_LIST_HEAD(&bmp->list);
450
451	bpl = (struct ulp_bde64 *) bmp->virt;
452	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
453	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
454				     1, bpl, &request_nseg);
455	if (!cmp) {
456		rc = -ENOMEM;
457		goto free_bmp;
458	}
459	lpfc_bsg_copy_data(cmp, &job->request_payload,
460			   job->request_payload.payload_len, 1);
461
462	bpl += request_nseg;
463	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
464	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
465				     bpl, &reply_nseg);
466	if (!rmp) {
467		rc = -ENOMEM;
468		goto free_cmp;
469	}
470
471	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
472	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
473	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
474	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
475	cmd->un.genreq64.bdl.bdeSize =
476		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
477	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
478	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
479	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
480	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
481	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
482	cmd->ulpBdeCount = 1;
483	cmd->ulpLe = 1;
484	cmd->ulpClass = CLASS3;
485	cmd->ulpContext = ndlp->nlp_rpi;
486	if (phba->sli_rev == LPFC_SLI_REV4)
487		cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
488	cmd->ulpOwner = OWN_CHIP;
489	cmdiocbq->vport = phba->pport;
490	cmdiocbq->context3 = bmp;
491	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
492	timeout = phba->fc_ratov * 2;
493	cmd->ulpTimeout = timeout;
494
495	cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
496	cmdiocbq->context1 = dd_data;
497	cmdiocbq->context2 = cmp;
498	cmdiocbq->context3 = bmp;
499	cmdiocbq->context_un.ndlp = ndlp;
500	dd_data->type = TYPE_IOCB;
501	dd_data->set_job = job;
502	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
503	dd_data->context_un.iocb.ndlp = ndlp;
504	dd_data->context_un.iocb.rmp = rmp;
505	job->dd_data = dd_data;
506
507	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
508		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
509			rc = -EIO ;
510			goto free_rmp;
511		}
512		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
513		writel(creg_val, phba->HCregaddr);
514		readl(phba->HCregaddr); /* flush */
515	}
516
517	iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
518
519	if (iocb_stat == IOCB_SUCCESS) {
520		spin_lock_irqsave(&phba->hbalock, flags);
521		/* make sure the I/O had not been completed yet */
522		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
523			/* open up abort window to timeout handler */
524			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
525		}
526		spin_unlock_irqrestore(&phba->hbalock, flags);
527		return 0; /* done for now */
528	} else if (iocb_stat == IOCB_BUSY) {
529		rc = -EAGAIN;
530	} else {
531		rc = -EIO;
532	}
533
534	/* iocb failed so cleanup */
535	job->dd_data = NULL;
536
537free_rmp:
538	lpfc_free_bsg_buffers(phba, rmp);
539free_cmp:
540	lpfc_free_bsg_buffers(phba, cmp);
541free_bmp:
542	if (bmp->virt)
543		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
544	kfree(bmp);
545free_cmdiocbq:
546	lpfc_sli_release_iocbq(phba, cmdiocbq);
547free_ndlp:
548	lpfc_nlp_put(ndlp);
549no_ndlp:
550	kfree(dd_data);
551no_dd_data:
552	/* make error code available to userspace */
553	bsg_reply->result = rc;
554	job->dd_data = NULL;
555	return rc;
556}
557
558/**
559 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
560 * @phba: Pointer to HBA context object.
561 * @cmdiocbq: Pointer to command iocb.
562 * @rspiocbq: Pointer to response iocb.
563 *
564 * This function is the completion handler for iocbs issued using
565 * lpfc_bsg_rport_els_cmp function. This function is called by the
566 * ring event handler function without any lock held. This function
567 * can be called from both worker thread context and interrupt
568 * context. This function also can be called from other thread which
569 * cleans up the SLI layer objects.
570 * This function copies the contents of the response iocb to the
571 * response iocb memory object provided by the caller of
572 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
573 * sleeps for the iocb completion.
574 **/
575static void
576lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
577			struct lpfc_iocbq *cmdiocbq,
578			struct lpfc_iocbq *rspiocbq)
579{
580	struct bsg_job_data *dd_data;
581	struct bsg_job *job;
582	struct fc_bsg_reply *bsg_reply;
583	IOCB_t *rsp;
584	struct lpfc_nodelist *ndlp;
585	struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
586	struct fc_bsg_ctels_reply *els_reply;
587	uint8_t *rjt_data;
588	unsigned long flags;
589	unsigned int rsp_size;
590	int rc = 0;
591
592	dd_data = cmdiocbq->context1;
593	ndlp = dd_data->context_un.iocb.ndlp;
594	cmdiocbq->context1 = ndlp;
595
596	/* Determine if job has been aborted */
597	spin_lock_irqsave(&phba->ct_ev_lock, flags);
598	job = dd_data->set_job;
599	if (job) {
600		bsg_reply = job->reply;
601		/* Prevent timeout handling from trying to abort job  */
602		job->dd_data = NULL;
603	}
604	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
605
606	/* Close the timeout handler abort window */
607	spin_lock_irqsave(&phba->hbalock, flags);
608	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
609	spin_unlock_irqrestore(&phba->hbalock, flags);
610
611	rsp = &rspiocbq->iocb;
612	pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
613	prsp = (struct lpfc_dmabuf *)pcmd->list.next;
614
615	/* Copy the completed job data or determine the job status if job is
616	 * still active
617	 */
618
619	if (job) {
620		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
621			rsp_size = rsp->un.elsreq64.bdl.bdeSize;
622			bsg_reply->reply_payload_rcv_len =
623				sg_copy_from_buffer(job->reply_payload.sg_list,
624						    job->reply_payload.sg_cnt,
625						    prsp->virt,
626						    rsp_size);
627		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
628			bsg_reply->reply_payload_rcv_len =
629				sizeof(struct fc_bsg_ctels_reply);
630			/* LS_RJT data returned in word 4 */
631			rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
632			els_reply = &bsg_reply->reply_data.ctels_reply;
633			els_reply->status = FC_CTELS_STATUS_REJECT;
634			els_reply->rjt_data.action = rjt_data[3];
635			els_reply->rjt_data.reason_code = rjt_data[2];
636			els_reply->rjt_data.reason_explanation = rjt_data[1];
637			els_reply->rjt_data.vendor_unique = rjt_data[0];
638		} else {
639			rc = -EIO;
640		}
641	}
642
643	lpfc_nlp_put(ndlp);
644	lpfc_els_free_iocb(phba, cmdiocbq);
645	kfree(dd_data);
646
647	/* Complete the job if the job is still active */
648
649	if (job) {
650		bsg_reply->result = rc;
651		bsg_job_done(job, bsg_reply->result,
652			       bsg_reply->reply_payload_rcv_len);
653	}
654	return;
655}
656
657/**
658 * lpfc_bsg_rport_els - send an ELS command from a bsg request
659 * @job: fc_bsg_job to handle
660 **/
661static int
662lpfc_bsg_rport_els(struct bsg_job *job)
663{
664	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
665	struct lpfc_hba *phba = vport->phba;
666	struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
667	struct lpfc_nodelist *ndlp = rdata->pnode;
668	struct fc_bsg_request *bsg_request = job->request;
669	struct fc_bsg_reply *bsg_reply = job->reply;
670	uint32_t elscmd;
671	uint32_t cmdsize;
672	struct lpfc_iocbq *cmdiocbq;
673	uint16_t rpi = 0;
674	struct bsg_job_data *dd_data;
675	unsigned long flags;
676	uint32_t creg_val;
677	int rc = 0;
678
679	/* in case no data is transferred */
680	bsg_reply->reply_payload_rcv_len = 0;
681
682	/* verify the els command is not greater than the
683	 * maximum ELS transfer size.
684	 */
685
686	if (job->request_payload.payload_len > FCELSSIZE) {
687		rc = -EINVAL;
688		goto no_dd_data;
689	}
690
691	/* allocate our bsg tracking structure */
692	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
693	if (!dd_data) {
694		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
695				"2735 Failed allocation of dd_data\n");
696		rc = -ENOMEM;
697		goto no_dd_data;
698	}
699
700	elscmd = bsg_request->rqst_data.r_els.els_code;
701	cmdsize = job->request_payload.payload_len;
702
703	if (!lpfc_nlp_get(ndlp)) {
704		rc = -ENODEV;
705		goto free_dd_data;
706	}
707
708	/* We will use the allocated dma buffers by prep els iocb for command
709	 * and response to ensure if the job times out and the request is freed,
710	 * we won't be dma into memory that is no longer allocated to for the
711	 * request.
712	 */
713
714	cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
715				      ndlp->nlp_DID, elscmd);
716	if (!cmdiocbq) {
717		rc = -EIO;
718		goto release_ndlp;
719	}
720
721	rpi = ndlp->nlp_rpi;
722
723	/* Transfer the request payload to allocated command dma buffer */
724
725	sg_copy_to_buffer(job->request_payload.sg_list,
726			  job->request_payload.sg_cnt,
727			  ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
728			  cmdsize);
729
730	if (phba->sli_rev == LPFC_SLI_REV4)
731		cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
732	else
733		cmdiocbq->iocb.ulpContext = rpi;
734	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
735	cmdiocbq->context1 = dd_data;
736	cmdiocbq->context_un.ndlp = ndlp;
737	cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
738	dd_data->type = TYPE_IOCB;
739	dd_data->set_job = job;
740	dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
741	dd_data->context_un.iocb.ndlp = ndlp;
742	dd_data->context_un.iocb.rmp = NULL;
743	job->dd_data = dd_data;
744
745	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
746		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
747			rc = -EIO;
748			goto linkdown_err;
749		}
750		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
751		writel(creg_val, phba->HCregaddr);
752		readl(phba->HCregaddr); /* flush */
753	}
754
755	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
756
757	if (rc == IOCB_SUCCESS) {
758		spin_lock_irqsave(&phba->hbalock, flags);
759		/* make sure the I/O had not been completed/released */
760		if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
761			/* open up abort window to timeout handler */
762			cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
763		}
764		spin_unlock_irqrestore(&phba->hbalock, flags);
765		return 0; /* done for now */
766	} else if (rc == IOCB_BUSY) {
767		rc = -EAGAIN;
768	} else {
769		rc = -EIO;
770	}
771
772	/* iocb failed so cleanup */
773	job->dd_data = NULL;
774
775linkdown_err:
776	cmdiocbq->context1 = ndlp;
777	lpfc_els_free_iocb(phba, cmdiocbq);
778
779release_ndlp:
780	lpfc_nlp_put(ndlp);
781
782free_dd_data:
783	kfree(dd_data);
784
785no_dd_data:
786	/* make error code available to userspace */
787	bsg_reply->result = rc;
788	job->dd_data = NULL;
789	return rc;
790}
791
792/**
793 * lpfc_bsg_event_free - frees an allocated event structure
794 * @kref: Pointer to a kref.
795 *
796 * Called from kref_put. Back cast the kref into an event structure address.
797 * Free any events to get, delete associated nodes, free any events to see,
798 * free any data then free the event itself.
799 **/
800static void
801lpfc_bsg_event_free(struct kref *kref)
802{
803	struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
804						  kref);
805	struct event_data *ed;
806
807	list_del(&evt->node);
808
809	while (!list_empty(&evt->events_to_get)) {
810		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
811		list_del(&ed->node);
812		kfree(ed->data);
813		kfree(ed);
814	}
815
816	while (!list_empty(&evt->events_to_see)) {
817		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
818		list_del(&ed->node);
819		kfree(ed->data);
820		kfree(ed);
821	}
822
823	kfree(evt->dd_data);
824	kfree(evt);
825}
826
827/**
828 * lpfc_bsg_event_ref - increments the kref for an event
829 * @evt: Pointer to an event structure.
830 **/
831static inline void
832lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
833{
834	kref_get(&evt->kref);
835}
836
837/**
838 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
839 * @evt: Pointer to an event structure.
840 **/
841static inline void
842lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
843{
844	kref_put(&evt->kref, lpfc_bsg_event_free);
845}
846
847/**
848 * lpfc_bsg_event_new - allocate and initialize a event structure
849 * @ev_mask: Mask of events.
850 * @ev_reg_id: Event reg id.
851 * @ev_req_id: Event request id.
852 **/
853static struct lpfc_bsg_event *
854lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
855{
856	struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
857
858	if (!evt)
859		return NULL;
860
861	INIT_LIST_HEAD(&evt->events_to_get);
862	INIT_LIST_HEAD(&evt->events_to_see);
863	evt->type_mask = ev_mask;
864	evt->req_id = ev_req_id;
865	evt->reg_id = ev_reg_id;
866	evt->wait_time_stamp = jiffies;
867	evt->dd_data = NULL;
868	init_waitqueue_head(&evt->wq);
869	kref_init(&evt->kref);
870	return evt;
871}
872
873/**
874 * diag_cmd_data_free - Frees an lpfc dma buffer extension
875 * @phba: Pointer to HBA context object.
876 * @mlist: Pointer to an lpfc dma buffer extension.
877 **/
878static int
879diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
880{
881	struct lpfc_dmabufext *mlast;
882	struct pci_dev *pcidev;
883	struct list_head head, *curr, *next;
884
885	if ((!mlist) || (!lpfc_is_link_up(phba) &&
886		(phba->link_flag & LS_LOOPBACK_MODE))) {
887		return 0;
888	}
889
890	pcidev = phba->pcidev;
891	list_add_tail(&head, &mlist->dma.list);
892
893	list_for_each_safe(curr, next, &head) {
894		mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
895		if (mlast->dma.virt)
896			dma_free_coherent(&pcidev->dev,
897					  mlast->size,
898					  mlast->dma.virt,
899					  mlast->dma.phys);
900		kfree(mlast);
901	}
902	return 0;
903}
904
905/**
906 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
907 * @phba:
908 * @pring:
909 * @piocbq:
910 *
911 * This function is called when an unsolicited CT command is received.  It
912 * forwards the event to any processes registered to receive CT events.
913 **/
914int
915lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
916			struct lpfc_iocbq *piocbq)
917{
918	uint32_t evt_req_id = 0;
919	uint32_t cmd;
920	struct lpfc_dmabuf *dmabuf = NULL;
921	struct lpfc_bsg_event *evt;
922	struct event_data *evt_dat = NULL;
923	struct lpfc_iocbq *iocbq;
924	size_t offset = 0;
925	struct list_head head;
926	struct ulp_bde64 *bde;
927	dma_addr_t dma_addr;
928	int i;
929	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
930	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
931	struct lpfc_hbq_entry *hbqe;
932	struct lpfc_sli_ct_request *ct_req;
933	struct bsg_job *job = NULL;
934	struct fc_bsg_reply *bsg_reply;
935	struct bsg_job_data *dd_data = NULL;
936	unsigned long flags;
937	int size = 0;
938
939	INIT_LIST_HEAD(&head);
940	list_add_tail(&head, &piocbq->list);
941
942	if (piocbq->iocb.ulpBdeCount == 0 ||
943	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
944		goto error_ct_unsol_exit;
945
946	if (phba->link_state == LPFC_HBA_ERROR ||
947		(!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
948		goto error_ct_unsol_exit;
949
950	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
951		dmabuf = bdeBuf1;
952	else {
953		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
954				    piocbq->iocb.un.cont64[0].addrLow);
955		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
956	}
957	if (dmabuf == NULL)
958		goto error_ct_unsol_exit;
959	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
960	evt_req_id = ct_req->FsType;
961	cmd = ct_req->CommandResponse.bits.CmdRsp;
962	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
963		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
964
965	spin_lock_irqsave(&phba->ct_ev_lock, flags);
966	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
967		if (!(evt->type_mask & FC_REG_CT_EVENT) ||
968			evt->req_id != evt_req_id)
969			continue;
970
971		lpfc_bsg_event_ref(evt);
972		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
973		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
974		if (evt_dat == NULL) {
975			spin_lock_irqsave(&phba->ct_ev_lock, flags);
976			lpfc_bsg_event_unref(evt);
977			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
978					"2614 Memory allocation failed for "
979					"CT event\n");
980			break;
981		}
982
983		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
984			/* take accumulated byte count from the last iocbq */
985			iocbq = list_entry(head.prev, typeof(*iocbq), list);
986			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
987		} else {
988			list_for_each_entry(iocbq, &head, list) {
989				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
990					evt_dat->len +=
991					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
992			}
993		}
994
995		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
996		if (evt_dat->data == NULL) {
997			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
998					"2615 Memory allocation failed for "
999					"CT event data, size %d\n",
1000					evt_dat->len);
1001			kfree(evt_dat);
1002			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1003			lpfc_bsg_event_unref(evt);
1004			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1005			goto error_ct_unsol_exit;
1006		}
1007
1008		list_for_each_entry(iocbq, &head, list) {
1009			size = 0;
1010			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1011				bdeBuf1 = iocbq->context2;
1012				bdeBuf2 = iocbq->context3;
1013			}
1014			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
1015				if (phba->sli3_options &
1016				    LPFC_SLI3_HBQ_ENABLED) {
1017					if (i == 0) {
1018						hbqe = (struct lpfc_hbq_entry *)
1019						  &iocbq->iocb.un.ulpWord[0];
1020						size = hbqe->bde.tus.f.bdeSize;
1021						dmabuf = bdeBuf1;
1022					} else if (i == 1) {
1023						hbqe = (struct lpfc_hbq_entry *)
1024							&iocbq->iocb.unsli3.
1025							sli3Words[4];
1026						size = hbqe->bde.tus.f.bdeSize;
1027						dmabuf = bdeBuf2;
1028					}
1029					if ((offset + size) > evt_dat->len)
1030						size = evt_dat->len - offset;
1031				} else {
1032					size = iocbq->iocb.un.cont64[i].
1033						tus.f.bdeSize;
1034					bde = &iocbq->iocb.un.cont64[i];
1035					dma_addr = getPaddr(bde->addrHigh,
1036							    bde->addrLow);
1037					dmabuf = lpfc_sli_ringpostbuf_get(phba,
1038							pring, dma_addr);
1039				}
1040				if (!dmabuf) {
1041					lpfc_printf_log(phba, KERN_ERR,
1042						LOG_LIBDFC, "2616 No dmabuf "
1043						"found for iocbq x%px\n",
1044						iocbq);
1045					kfree(evt_dat->data);
1046					kfree(evt_dat);
1047					spin_lock_irqsave(&phba->ct_ev_lock,
1048						flags);
1049					lpfc_bsg_event_unref(evt);
1050					spin_unlock_irqrestore(
1051						&phba->ct_ev_lock, flags);
1052					goto error_ct_unsol_exit;
1053				}
1054				memcpy((char *)(evt_dat->data) + offset,
1055				       dmabuf->virt, size);
1056				offset += size;
1057				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1058				    !(phba->sli3_options &
1059				      LPFC_SLI3_HBQ_ENABLED)) {
1060					lpfc_sli_ringpostbuf_put(phba, pring,
1061								 dmabuf);
1062				} else {
1063					switch (cmd) {
1064					case ELX_LOOPBACK_DATA:
1065						if (phba->sli_rev <
1066						    LPFC_SLI_REV4)
1067							diag_cmd_data_free(phba,
1068							(struct lpfc_dmabufext
1069							 *)dmabuf);
1070						break;
1071					case ELX_LOOPBACK_XRI_SETUP:
1072						if ((phba->sli_rev ==
1073							LPFC_SLI_REV2) ||
1074							(phba->sli3_options &
1075							LPFC_SLI3_HBQ_ENABLED
1076							)) {
1077							lpfc_in_buf_free(phba,
1078									dmabuf);
1079						} else {
1080							lpfc_post_buffer(phba,
1081									 pring,
1082									 1);
1083						}
1084						break;
1085					default:
1086						if (!(phba->sli3_options &
1087						      LPFC_SLI3_HBQ_ENABLED))
1088							lpfc_post_buffer(phba,
1089									 pring,
1090									 1);
1091						break;
1092					}
1093				}
1094			}
1095		}
1096
1097		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1098		if (phba->sli_rev == LPFC_SLI_REV4) {
1099			evt_dat->immed_dat = phba->ctx_idx;
1100			phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1101			/* Provide warning for over-run of the ct_ctx array */
1102			if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1103			    UNSOL_VALID)
1104				lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1105						"2717 CT context array entry "
1106						"[%d] over-run: oxid:x%x, "
1107						"sid:x%x\n", phba->ctx_idx,
1108						phba->ct_ctx[
1109						    evt_dat->immed_dat].oxid,
1110						phba->ct_ctx[
1111						    evt_dat->immed_dat].SID);
1112			phba->ct_ctx[evt_dat->immed_dat].rxid =
1113				piocbq->iocb.ulpContext;
1114			phba->ct_ctx[evt_dat->immed_dat].oxid =
1115				piocbq->iocb.unsli3.rcvsli3.ox_id;
1116			phba->ct_ctx[evt_dat->immed_dat].SID =
1117				piocbq->iocb.un.rcvels.remoteID;
1118			phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1119		} else
1120			evt_dat->immed_dat = piocbq->iocb.ulpContext;
1121
1122		evt_dat->type = FC_REG_CT_EVENT;
1123		list_add(&evt_dat->node, &evt->events_to_see);
1124		if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1125			wake_up_interruptible(&evt->wq);
1126			lpfc_bsg_event_unref(evt);
1127			break;
1128		}
1129
1130		list_move(evt->events_to_see.prev, &evt->events_to_get);
1131
1132		dd_data = (struct bsg_job_data *)evt->dd_data;
1133		job = dd_data->set_job;
1134		dd_data->set_job = NULL;
1135		lpfc_bsg_event_unref(evt);
1136		if (job) {
1137			bsg_reply = job->reply;
1138			bsg_reply->reply_payload_rcv_len = size;
1139			/* make error code available to userspace */
1140			bsg_reply->result = 0;
1141			job->dd_data = NULL;
1142			/* complete the job back to userspace */
1143			spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1144			bsg_job_done(job, bsg_reply->result,
1145				       bsg_reply->reply_payload_rcv_len);
1146			spin_lock_irqsave(&phba->ct_ev_lock, flags);
1147		}
1148	}
1149	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1150
1151error_ct_unsol_exit:
1152	if (!list_empty(&head))
1153		list_del(&head);
1154	if ((phba->sli_rev < LPFC_SLI_REV4) &&
1155	    (evt_req_id == SLI_CT_ELX_LOOPBACK))
1156		return 0;
1157	return 1;
1158}
1159
1160/**
1161 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1162 * @phba: Pointer to HBA context object.
1163 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1164 *
1165 * This function handles abort to the CT command toward management plane
1166 * for SLI4 port.
1167 *
1168 * If the pending context of a CT command to management plane present, clears
1169 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1170 * no context exists.
1171 **/
1172int
1173lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1174{
1175	struct fc_frame_header fc_hdr;
1176	struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1177	int ctx_idx, handled = 0;
1178	uint16_t oxid, rxid;
1179	uint32_t sid;
1180
1181	memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1182	sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1183	oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1184	rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1185
1186	for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1187		if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1188			continue;
1189		if (phba->ct_ctx[ctx_idx].rxid != rxid)
1190			continue;
1191		if (phba->ct_ctx[ctx_idx].oxid != oxid)
1192			continue;
1193		if (phba->ct_ctx[ctx_idx].SID != sid)
1194			continue;
1195		phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1196		handled = 1;
1197	}
1198	return handled;
1199}
1200
1201/**
1202 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1203 * @job: SET_EVENT fc_bsg_job
1204 **/
1205static int
1206lpfc_bsg_hba_set_event(struct bsg_job *job)
1207{
1208	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1209	struct lpfc_hba *phba = vport->phba;
1210	struct fc_bsg_request *bsg_request = job->request;
1211	struct set_ct_event *event_req;
1212	struct lpfc_bsg_event *evt;
1213	int rc = 0;
1214	struct bsg_job_data *dd_data = NULL;
1215	uint32_t ev_mask;
1216	unsigned long flags;
1217
1218	if (job->request_len <
1219	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1220		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1221				"2612 Received SET_CT_EVENT below minimum "
1222				"size\n");
1223		rc = -EINVAL;
1224		goto job_error;
1225	}
1226
1227	event_req = (struct set_ct_event *)
1228		bsg_request->rqst_data.h_vendor.vendor_cmd;
1229	ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1230				FC_REG_EVENT_MASK);
1231	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1232	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1233		if (evt->reg_id == event_req->ev_reg_id) {
1234			lpfc_bsg_event_ref(evt);
1235			evt->wait_time_stamp = jiffies;
1236			dd_data = (struct bsg_job_data *)evt->dd_data;
1237			break;
1238		}
1239	}
1240	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1241
1242	if (&evt->node == &phba->ct_ev_waiters) {
1243		/* no event waiting struct yet - first call */
1244		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1245		if (dd_data == NULL) {
1246			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1247					"2734 Failed allocation of dd_data\n");
1248			rc = -ENOMEM;
1249			goto job_error;
1250		}
1251		evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1252					event_req->ev_req_id);
1253		if (!evt) {
1254			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1255					"2617 Failed allocation of event "
1256					"waiter\n");
1257			rc = -ENOMEM;
1258			goto job_error;
1259		}
1260		dd_data->type = TYPE_EVT;
1261		dd_data->set_job = NULL;
1262		dd_data->context_un.evt = evt;
1263		evt->dd_data = (void *)dd_data;
1264		spin_lock_irqsave(&phba->ct_ev_lock, flags);
1265		list_add(&evt->node, &phba->ct_ev_waiters);
1266		lpfc_bsg_event_ref(evt);
1267		evt->wait_time_stamp = jiffies;
1268		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1269	}
1270
1271	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1272	evt->waiting = 1;
1273	dd_data->set_job = job; /* for unsolicited command */
1274	job->dd_data = dd_data; /* for fc transport timeout callback*/
1275	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1276	return 0; /* call job done later */
1277
1278job_error:
1279	kfree(dd_data);
1280	job->dd_data = NULL;
1281	return rc;
1282}
1283
1284/**
1285 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1286 * @job: GET_EVENT fc_bsg_job
1287 **/
1288static int
1289lpfc_bsg_hba_get_event(struct bsg_job *job)
1290{
1291	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1292	struct lpfc_hba *phba = vport->phba;
1293	struct fc_bsg_request *bsg_request = job->request;
1294	struct fc_bsg_reply *bsg_reply = job->reply;
1295	struct get_ct_event *event_req;
1296	struct get_ct_event_reply *event_reply;
1297	struct lpfc_bsg_event *evt, *evt_next;
1298	struct event_data *evt_dat = NULL;
1299	unsigned long flags;
1300	uint32_t rc = 0;
1301
1302	if (job->request_len <
1303	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1304		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1305				"2613 Received GET_CT_EVENT request below "
1306				"minimum size\n");
1307		rc = -EINVAL;
1308		goto job_error;
1309	}
1310
1311	event_req = (struct get_ct_event *)
1312		bsg_request->rqst_data.h_vendor.vendor_cmd;
1313
1314	event_reply = (struct get_ct_event_reply *)
1315		bsg_reply->reply_data.vendor_reply.vendor_rsp;
1316	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1317	list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1318		if (evt->reg_id == event_req->ev_reg_id) {
1319			if (list_empty(&evt->events_to_get))
1320				break;
1321			lpfc_bsg_event_ref(evt);
1322			evt->wait_time_stamp = jiffies;
1323			evt_dat = list_entry(evt->events_to_get.prev,
1324					     struct event_data, node);
1325			list_del(&evt_dat->node);
1326			break;
1327		}
1328	}
1329	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1330
1331	/* The app may continue to ask for event data until it gets
1332	 * an error indicating that there isn't anymore
1333	 */
1334	if (evt_dat == NULL) {
1335		bsg_reply->reply_payload_rcv_len = 0;
1336		rc = -ENOENT;
1337		goto job_error;
1338	}
1339
1340	if (evt_dat->len > job->request_payload.payload_len) {
1341		evt_dat->len = job->request_payload.payload_len;
1342		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1343				"2618 Truncated event data at %d "
1344				"bytes\n",
1345				job->request_payload.payload_len);
1346	}
1347
1348	event_reply->type = evt_dat->type;
1349	event_reply->immed_data = evt_dat->immed_dat;
1350	if (evt_dat->len > 0)
1351		bsg_reply->reply_payload_rcv_len =
1352			sg_copy_from_buffer(job->request_payload.sg_list,
1353					    job->request_payload.sg_cnt,
1354					    evt_dat->data, evt_dat->len);
1355	else
1356		bsg_reply->reply_payload_rcv_len = 0;
1357
1358	if (evt_dat) {
1359		kfree(evt_dat->data);
1360		kfree(evt_dat);
1361	}
1362
1363	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1364	lpfc_bsg_event_unref(evt);
1365	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1366	job->dd_data = NULL;
1367	bsg_reply->result = 0;
1368	bsg_job_done(job, bsg_reply->result,
1369		       bsg_reply->reply_payload_rcv_len);
1370	return 0;
1371
1372job_error:
1373	job->dd_data = NULL;
1374	bsg_reply->result = rc;
1375	return rc;
1376}
1377
1378/**
1379 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1380 * @phba: Pointer to HBA context object.
1381 * @cmdiocbq: Pointer to command iocb.
1382 * @rspiocbq: Pointer to response iocb.
1383 *
1384 * This function is the completion handler for iocbs issued using
1385 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1386 * ring event handler function without any lock held. This function
1387 * can be called from both worker thread context and interrupt
1388 * context. This function also can be called from other thread which
1389 * cleans up the SLI layer objects.
1390 * This function copy the contents of the response iocb to the
1391 * response iocb memory object provided by the caller of
1392 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1393 * sleeps for the iocb completion.
1394 **/
1395static void
1396lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1397			struct lpfc_iocbq *cmdiocbq,
1398			struct lpfc_iocbq *rspiocbq)
1399{
1400	struct bsg_job_data *dd_data;
1401	struct bsg_job *job;
1402	struct fc_bsg_reply *bsg_reply;
1403	IOCB_t *rsp;
1404	struct lpfc_dmabuf *bmp, *cmp;
1405	struct lpfc_nodelist *ndlp;
1406	unsigned long flags;
1407	int rc = 0;
1408
1409	dd_data = cmdiocbq->context1;
1410
1411	/* Determine if job has been aborted */
1412	spin_lock_irqsave(&phba->ct_ev_lock, flags);
1413	job = dd_data->set_job;
1414	if (job) {
1415		/* Prevent timeout handling from trying to abort job  */
1416		job->dd_data = NULL;
1417	}
1418	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1419
1420	/* Close the timeout handler abort window */
1421	spin_lock_irqsave(&phba->hbalock, flags);
1422	cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1423	spin_unlock_irqrestore(&phba->hbalock, flags);
1424
1425	ndlp = dd_data->context_un.iocb.ndlp;
1426	cmp = cmdiocbq->context2;
1427	bmp = cmdiocbq->context3;
1428	rsp = &rspiocbq->iocb;
1429
1430	/* Copy the completed job data or set the error status */
1431
1432	if (job) {
1433		bsg_reply = job->reply;
1434		if (rsp->ulpStatus) {
1435			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1436				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1437				case IOERR_SEQUENCE_TIMEOUT:
1438					rc = -ETIMEDOUT;
1439					break;
1440				case IOERR_INVALID_RPI:
1441					rc = -EFAULT;
1442					break;
1443				default:
1444					rc = -EACCES;
1445					break;
1446				}
1447			} else {
1448				rc = -EACCES;
1449			}
1450		} else {
1451			bsg_reply->reply_payload_rcv_len = 0;
1452		}
1453	}
1454
1455	lpfc_free_bsg_buffers(phba, cmp);
1456	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1457	kfree(bmp);
1458	lpfc_sli_release_iocbq(phba, cmdiocbq);
1459	lpfc_nlp_put(ndlp);
1460	kfree(dd_data);
1461
1462	/* Complete the job if the job is still active */
1463
1464	if (job) {
1465		bsg_reply->result = rc;
1466		bsg_job_done(job, bsg_reply->result,
1467			       bsg_reply->reply_payload_rcv_len);
1468	}
1469	return;
1470}
1471
1472/**
1473 * lpfc_issue_ct_rsp - issue a ct response
1474 * @phba: Pointer to HBA context object.
1475 * @job: Pointer to the job object.
1476 * @tag: tag index value into the ports context exchange array.
1477 * @bmp: Pointer to a dma buffer descriptor.
1478 * @num_entry: Number of enties in the bde.
1479 **/
1480static int
1481lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1482		  struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1483		  int num_entry)
1484{
1485	IOCB_t *icmd;
1486	struct lpfc_iocbq *ctiocb = NULL;
1487	int rc = 0;
1488	struct lpfc_nodelist *ndlp = NULL;
1489	struct bsg_job_data *dd_data;
1490	unsigned long flags;
1491	uint32_t creg_val;
1492
1493	/* allocate our bsg tracking structure */
1494	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1495	if (!dd_data) {
1496		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1497				"2736 Failed allocation of dd_data\n");
1498		rc = -ENOMEM;
1499		goto no_dd_data;
1500	}
1501
1502	/* Allocate buffer for  command iocb */
1503	ctiocb = lpfc_sli_get_iocbq(phba);
1504	if (!ctiocb) {
1505		rc = -ENOMEM;
1506		goto no_ctiocb;
1507	}
1508
1509	icmd = &ctiocb->iocb;
1510	icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1511	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1512	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1513	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1514	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1515	icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1516	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1517	icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1518	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1519
1520	/* Fill in rest of iocb */
1521	icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1522	icmd->ulpBdeCount = 1;
1523	icmd->ulpLe = 1;
1524	icmd->ulpClass = CLASS3;
1525	if (phba->sli_rev == LPFC_SLI_REV4) {
1526		/* Do not issue unsol response if oxid not marked as valid */
1527		if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1528			rc = IOCB_ERROR;
1529			goto issue_ct_rsp_exit;
1530		}
1531		icmd->ulpContext = phba->ct_ctx[tag].rxid;
1532		icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1533		ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1534		if (!ndlp) {
1535			lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1536				 "2721 ndlp null for oxid %x SID %x\n",
1537					icmd->ulpContext,
1538					phba->ct_ctx[tag].SID);
1539			rc = IOCB_ERROR;
1540			goto issue_ct_rsp_exit;
1541		}
1542
1543		/* Check if the ndlp is active */
1544		if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1545			rc = IOCB_ERROR;
1546			goto issue_ct_rsp_exit;
1547		}
1548
1549		/* get a refernece count so the ndlp doesn't go away while
1550		 * we respond
1551		 */
1552		if (!lpfc_nlp_get(ndlp)) {
1553			rc = IOCB_ERROR;
1554			goto issue_ct_rsp_exit;
1555		}
1556
1557		icmd->un.ulpWord[3] =
1558				phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1559
1560		/* The exchange is done, mark the entry as invalid */
1561		phba->ct_ctx[tag].valid = UNSOL_INVALID;
1562	} else
1563		icmd->ulpContext = (ushort) tag;
1564
1565	icmd->ulpTimeout = phba->fc_ratov * 2;
1566
1567	/* Xmit CT response on exchange <xid> */
1568	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1569		"2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1570		icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1571
1572	ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1573	ctiocb->vport = phba->pport;
1574	ctiocb->context1 = dd_data;
1575	ctiocb->context2 = cmp;
1576	ctiocb->context3 = bmp;
1577	ctiocb->context_un.ndlp = ndlp;
1578	ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1579
1580	dd_data->type = TYPE_IOCB;
1581	dd_data->set_job = job;
1582	dd_data->context_un.iocb.cmdiocbq = ctiocb;
1583	dd_data->context_un.iocb.ndlp = ndlp;
1584	dd_data->context_un.iocb.rmp = NULL;
1585	job->dd_data = dd_data;
1586
1587	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1588		if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1589			rc = -IOCB_ERROR;
1590			goto issue_ct_rsp_exit;
1591		}
1592		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1593		writel(creg_val, phba->HCregaddr);
1594		readl(phba->HCregaddr); /* flush */
1595	}
1596
1597	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1598
1599	if (rc == IOCB_SUCCESS) {
1600		spin_lock_irqsave(&phba->hbalock, flags);
1601		/* make sure the I/O had not been completed/released */
1602		if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1603			/* open up abort window to timeout handler */
1604			ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1605		}
1606		spin_unlock_irqrestore(&phba->hbalock, flags);
1607		return 0; /* done for now */
1608	}
1609
1610	/* iocb failed so cleanup */
1611	job->dd_data = NULL;
1612
1613issue_ct_rsp_exit:
1614	lpfc_sli_release_iocbq(phba, ctiocb);
1615no_ctiocb:
1616	kfree(dd_data);
1617no_dd_data:
1618	return rc;
1619}
1620
1621/**
1622 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1623 * @job: SEND_MGMT_RESP fc_bsg_job
1624 **/
1625static int
1626lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1627{
1628	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1629	struct lpfc_hba *phba = vport->phba;
1630	struct fc_bsg_request *bsg_request = job->request;
1631	struct fc_bsg_reply *bsg_reply = job->reply;
1632	struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1633		bsg_request->rqst_data.h_vendor.vendor_cmd;
1634	struct ulp_bde64 *bpl;
1635	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1636	int bpl_entries;
1637	uint32_t tag = mgmt_resp->tag;
1638	unsigned long reqbfrcnt =
1639			(unsigned long)job->request_payload.payload_len;
1640	int rc = 0;
1641
1642	/* in case no data is transferred */
1643	bsg_reply->reply_payload_rcv_len = 0;
1644
1645	if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1646		rc = -ERANGE;
1647		goto send_mgmt_rsp_exit;
1648	}
1649
1650	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1651	if (!bmp) {
1652		rc = -ENOMEM;
1653		goto send_mgmt_rsp_exit;
1654	}
1655
1656	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1657	if (!bmp->virt) {
1658		rc = -ENOMEM;
1659		goto send_mgmt_rsp_free_bmp;
1660	}
1661
1662	INIT_LIST_HEAD(&bmp->list);
1663	bpl = (struct ulp_bde64 *) bmp->virt;
1664	bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1665	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1666				     1, bpl, &bpl_entries);
1667	if (!cmp) {
1668		rc = -ENOMEM;
1669		goto send_mgmt_rsp_free_bmp;
1670	}
1671	lpfc_bsg_copy_data(cmp, &job->request_payload,
1672			   job->request_payload.payload_len, 1);
1673
1674	rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1675
1676	if (rc == IOCB_SUCCESS)
1677		return 0; /* done for now */
1678
1679	rc = -EACCES;
1680
1681	lpfc_free_bsg_buffers(phba, cmp);
1682
1683send_mgmt_rsp_free_bmp:
1684	if (bmp->virt)
1685		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1686	kfree(bmp);
1687send_mgmt_rsp_exit:
1688	/* make error code available to userspace */
1689	bsg_reply->result = rc;
1690	job->dd_data = NULL;
1691	return rc;
1692}
1693
1694/**
1695 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1696 * @phba: Pointer to HBA context object.
1697 *
1698 * This function is responsible for preparing driver for diag loopback
1699 * on device.
1700 */
1701static int
1702lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1703{
1704	struct lpfc_vport **vports;
1705	struct Scsi_Host *shost;
1706	struct lpfc_sli *psli;
1707	struct lpfc_queue *qp = NULL;
1708	struct lpfc_sli_ring *pring;
1709	int i = 0;
1710
1711	psli = &phba->sli;
1712	if (!psli)
1713		return -ENODEV;
1714
1715
1716	if ((phba->link_state == LPFC_HBA_ERROR) ||
1717	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1718	    (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1719		return -EACCES;
1720
1721	vports = lpfc_create_vport_work_array(phba);
1722	if (vports) {
1723		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1724			shost = lpfc_shost_from_vport(vports[i]);
1725			scsi_block_requests(shost);
1726		}
1727		lpfc_destroy_vport_work_array(phba, vports);
1728	} else {
1729		shost = lpfc_shost_from_vport(phba->pport);
1730		scsi_block_requests(shost);
1731	}
1732
1733	if (phba->sli_rev != LPFC_SLI_REV4) {
1734		pring = &psli->sli3_ring[LPFC_FCP_RING];
1735		lpfc_emptyq_wait(phba, &pring->txcmplq, &phba->hbalock);
1736		return 0;
1737	}
1738	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1739		pring = qp->pring;
1740		if (!pring || (pring->ringno != LPFC_FCP_RING))
1741			continue;
1742		if (!lpfc_emptyq_wait(phba, &pring->txcmplq,
1743				      &pring->ring_lock))
1744			break;
1745	}
1746	return 0;
1747}
1748
1749/**
1750 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1751 * @phba: Pointer to HBA context object.
1752 *
1753 * This function is responsible for driver exit processing of setting up
1754 * diag loopback mode on device.
1755 */
1756static void
1757lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1758{
1759	struct Scsi_Host *shost;
1760	struct lpfc_vport **vports;
1761	int i;
1762
1763	vports = lpfc_create_vport_work_array(phba);
1764	if (vports) {
1765		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1766			shost = lpfc_shost_from_vport(vports[i]);
1767			scsi_unblock_requests(shost);
1768		}
1769		lpfc_destroy_vport_work_array(phba, vports);
1770	} else {
1771		shost = lpfc_shost_from_vport(phba->pport);
1772		scsi_unblock_requests(shost);
1773	}
1774	return;
1775}
1776
1777/**
1778 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1779 * @phba: Pointer to HBA context object.
1780 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1781 *
1782 * This function is responsible for placing an sli3  port into diagnostic
1783 * loopback mode in order to perform a diagnostic loopback test.
1784 * All new scsi requests are blocked, a small delay is used to allow the
1785 * scsi requests to complete then the link is brought down. If the link is
1786 * is placed in loopback mode then scsi requests are again allowed
1787 * so the scsi mid-layer doesn't give up on the port.
1788 * All of this is done in-line.
1789 */
1790static int
1791lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1792{
1793	struct fc_bsg_request *bsg_request = job->request;
1794	struct fc_bsg_reply *bsg_reply = job->reply;
1795	struct diag_mode_set *loopback_mode;
1796	uint32_t link_flags;
1797	uint32_t timeout;
1798	LPFC_MBOXQ_t *pmboxq  = NULL;
1799	int mbxstatus = MBX_SUCCESS;
1800	int i = 0;
1801	int rc = 0;
1802
1803	/* no data to return just the return code */
1804	bsg_reply->reply_payload_rcv_len = 0;
1805
1806	if (job->request_len < sizeof(struct fc_bsg_request) +
1807	    sizeof(struct diag_mode_set)) {
1808		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1809				"2738 Received DIAG MODE request size:%d "
1810				"below the minimum size:%d\n",
1811				job->request_len,
1812				(int)(sizeof(struct fc_bsg_request) +
1813				sizeof(struct diag_mode_set)));
1814		rc = -EINVAL;
1815		goto job_error;
1816	}
1817
1818	rc = lpfc_bsg_diag_mode_enter(phba);
1819	if (rc)
1820		goto job_error;
1821
1822	/* bring the link to diagnostic mode */
1823	loopback_mode = (struct diag_mode_set *)
1824		bsg_request->rqst_data.h_vendor.vendor_cmd;
1825	link_flags = loopback_mode->type;
1826	timeout = loopback_mode->timeout * 100;
1827
1828	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1829	if (!pmboxq) {
1830		rc = -ENOMEM;
1831		goto loopback_mode_exit;
1832	}
1833	memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1834	pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1835	pmboxq->u.mb.mbxOwner = OWN_HOST;
1836
1837	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1838
1839	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1840		/* wait for link down before proceeding */
1841		i = 0;
1842		while (phba->link_state != LPFC_LINK_DOWN) {
1843			if (i++ > timeout) {
1844				rc = -ETIMEDOUT;
1845				goto loopback_mode_exit;
1846			}
1847			msleep(10);
1848		}
1849
1850		memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1851		if (link_flags == INTERNAL_LOOP_BACK)
1852			pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1853		else
1854			pmboxq->u.mb.un.varInitLnk.link_flags =
1855				FLAGS_TOPOLOGY_MODE_LOOP;
1856
1857		pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1858		pmboxq->u.mb.mbxOwner = OWN_HOST;
1859
1860		mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1861						     LPFC_MBOX_TMO);
1862
1863		if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1864			rc = -ENODEV;
1865		else {
1866			spin_lock_irq(&phba->hbalock);
1867			phba->link_flag |= LS_LOOPBACK_MODE;
1868			spin_unlock_irq(&phba->hbalock);
1869			/* wait for the link attention interrupt */
1870			msleep(100);
1871
1872			i = 0;
1873			while (phba->link_state != LPFC_HBA_READY) {
1874				if (i++ > timeout) {
1875					rc = -ETIMEDOUT;
1876					break;
1877				}
1878
1879				msleep(10);
1880			}
1881		}
1882
1883	} else
1884		rc = -ENODEV;
1885
1886loopback_mode_exit:
1887	lpfc_bsg_diag_mode_exit(phba);
1888
1889	/*
1890	 * Let SLI layer release mboxq if mbox command completed after timeout.
1891	 */
1892	if (pmboxq && mbxstatus != MBX_TIMEOUT)
1893		mempool_free(pmboxq, phba->mbox_mem_pool);
1894
1895job_error:
1896	/* make error code available to userspace */
1897	bsg_reply->result = rc;
1898	/* complete the job back to userspace if no error */
1899	if (rc == 0)
1900		bsg_job_done(job, bsg_reply->result,
1901			       bsg_reply->reply_payload_rcv_len);
1902	return rc;
1903}
1904
1905/**
1906 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1907 * @phba: Pointer to HBA context object.
1908 * @diag: Flag for set link to diag or nomral operation state.
1909 *
1910 * This function is responsible for issuing a sli4 mailbox command for setting
1911 * link to either diag state or normal operation state.
1912 */
1913static int
1914lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1915{
1916	LPFC_MBOXQ_t *pmboxq;
1917	struct lpfc_mbx_set_link_diag_state *link_diag_state;
1918	uint32_t req_len, alloc_len;
1919	int mbxstatus = MBX_SUCCESS, rc;
1920
1921	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1922	if (!pmboxq)
1923		return -ENOMEM;
1924
1925	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1926		   sizeof(struct lpfc_sli4_cfg_mhdr));
1927	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1928				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1929				req_len, LPFC_SLI4_MBX_EMBED);
1930	if (alloc_len != req_len) {
1931		rc = -ENOMEM;
1932		goto link_diag_state_set_out;
1933	}
1934	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1935			"3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1936			diag, phba->sli4_hba.lnk_info.lnk_tp,
1937			phba->sli4_hba.lnk_info.lnk_no);
1938
1939	link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1940	bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1941	       LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1942	bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1943	       phba->sli4_hba.lnk_info.lnk_no);
1944	bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1945	       phba->sli4_hba.lnk_info.lnk_tp);
1946	if (diag)
1947		bf_set(lpfc_mbx_set_diag_state_diag,
1948		       &link_diag_state->u.req, 1);
1949	else
1950		bf_set(lpfc_mbx_set_diag_state_diag,
1951		       &link_diag_state->u.req, 0);
1952
1953	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1954
1955	if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1956		rc = 0;
1957	else
1958		rc = -ENODEV;
1959
1960link_diag_state_set_out:
1961	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1962		mempool_free(pmboxq, phba->mbox_mem_pool);
1963
1964	return rc;
1965}
1966
1967/**
1968 * lpfc_sli4_bsg_set_loopback_mode - set sli4 internal loopback diagnostic
1969 * @phba: Pointer to HBA context object.
1970 * @mode: loopback mode to set
1971 * @link_no: link number for loopback mode to set
1972 *
1973 * This function is responsible for issuing a sli4 mailbox command for setting
1974 * up loopback diagnostic for a link.
1975 */
1976static int
1977lpfc_sli4_bsg_set_loopback_mode(struct lpfc_hba *phba, int mode,
1978				uint32_t link_no)
1979{
1980	LPFC_MBOXQ_t *pmboxq;
1981	uint32_t req_len, alloc_len;
1982	struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1983	int mbxstatus = MBX_SUCCESS, rc = 0;
1984
1985	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1986	if (!pmboxq)
1987		return -ENOMEM;
1988	req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1989		   sizeof(struct lpfc_sli4_cfg_mhdr));
1990	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1991				LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1992				req_len, LPFC_SLI4_MBX_EMBED);
1993	if (alloc_len != req_len) {
1994		mempool_free(pmboxq, phba->mbox_mem_pool);
1995		return -ENOMEM;
1996	}
1997	link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1998	bf_set(lpfc_mbx_set_diag_state_link_num,
1999	       &link_diag_loopback->u.req, link_no);
2000
2001	if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2002		bf_set(lpfc_mbx_set_diag_state_link_type,
2003		       &link_diag_loopback->u.req, LPFC_LNK_FC_TRUNKED);
2004	} else {
2005		bf_set(lpfc_mbx_set_diag_state_link_type,
2006		       &link_diag_loopback->u.req,
2007		       phba->sli4_hba.lnk_info.lnk_tp);
2008	}
2009
2010	bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
2011	       mode);
2012
2013	mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
2014	if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
2015		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2016				"3127 Failed setup loopback mode mailbox "
2017				"command, rc:x%x, status:x%x\n", mbxstatus,
2018				pmboxq->u.mb.mbxStatus);
2019		rc = -ENODEV;
2020	}
2021	if (pmboxq && (mbxstatus != MBX_TIMEOUT))
2022		mempool_free(pmboxq, phba->mbox_mem_pool);
2023	return rc;
2024}
2025
2026/**
2027 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2028 * @phba: Pointer to HBA context object.
2029 *
2030 * This function set up SLI4 FC port registrations for diagnostic run, which
2031 * includes all the rpis, vfi, and also vpi.
2032 */
2033static int
2034lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2035{
2036	int rc;
2037
2038	if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2039		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2040				"3136 Port still had vfi registered: "
2041				"mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2042				phba->pport->fc_myDID, phba->fcf.fcfi,
2043				phba->sli4_hba.vfi_ids[phba->pport->vfi],
2044				phba->vpi_ids[phba->pport->vpi]);
2045		return -EINVAL;
2046	}
2047	rc = lpfc_issue_reg_vfi(phba->pport);
2048	return rc;
2049}
2050
2051/**
2052 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2053 * @phba: Pointer to HBA context object.
2054 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2055 *
2056 * This function is responsible for placing an sli4 port into diagnostic
2057 * loopback mode in order to perform a diagnostic loopback test.
2058 */
2059static int
2060lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2061{
2062	struct fc_bsg_request *bsg_request = job->request;
2063	struct fc_bsg_reply *bsg_reply = job->reply;
2064	struct diag_mode_set *loopback_mode;
2065	uint32_t link_flags, timeout, link_no;
2066	int i, rc = 0;
2067
2068	/* no data to return just the return code */
2069	bsg_reply->reply_payload_rcv_len = 0;
2070
2071	if (job->request_len < sizeof(struct fc_bsg_request) +
2072	    sizeof(struct diag_mode_set)) {
2073		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2074				"3011 Received DIAG MODE request size:%d "
2075				"below the minimum size:%d\n",
2076				job->request_len,
2077				(int)(sizeof(struct fc_bsg_request) +
2078				sizeof(struct diag_mode_set)));
2079		rc = -EINVAL;
2080		goto job_done;
2081	}
2082
2083	loopback_mode = (struct diag_mode_set *)
2084		bsg_request->rqst_data.h_vendor.vendor_cmd;
2085	link_flags = loopback_mode->type;
2086	timeout = loopback_mode->timeout * 100;
2087
2088	if (loopback_mode->physical_link == -1)
2089		link_no = phba->sli4_hba.lnk_info.lnk_no;
2090	else
2091		link_no = loopback_mode->physical_link;
2092
2093	if (link_flags == DISABLE_LOOP_BACK) {
2094		rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2095					LPFC_DIAG_LOOPBACK_TYPE_DISABLE,
2096					link_no);
2097		if (!rc) {
2098			/* Unset the need disable bit */
2099			phba->sli4_hba.conf_trunk &= ~((1 << link_no) << 4);
2100		}
2101		goto job_done;
2102	} else {
2103		/* Check if we need to disable the loopback state */
2104		if (phba->sli4_hba.conf_trunk & ((1 << link_no) << 4)) {
2105			rc = -EPERM;
2106			goto job_done;
2107		}
2108	}
2109
2110	rc = lpfc_bsg_diag_mode_enter(phba);
2111	if (rc)
2112		goto job_done;
2113
2114	/* indicate we are in loobpack diagnostic mode */
2115	spin_lock_irq(&phba->hbalock);
2116	phba->link_flag |= LS_LOOPBACK_MODE;
2117	spin_unlock_irq(&phba->hbalock);
2118
2119	/* reset port to start frome scratch */
2120	rc = lpfc_selective_reset(phba);
2121	if (rc)
2122		goto job_done;
2123
2124	/* bring the link to diagnostic mode */
2125	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2126			"3129 Bring link to diagnostic state.\n");
2127
2128	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2129	if (rc) {
2130		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2131				"3130 Failed to bring link to diagnostic "
2132				"state, rc:x%x\n", rc);
2133		goto loopback_mode_exit;
2134	}
2135
2136	/* wait for link down before proceeding */
2137	i = 0;
2138	while (phba->link_state != LPFC_LINK_DOWN) {
2139		if (i++ > timeout) {
2140			rc = -ETIMEDOUT;
2141			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2142					"3131 Timeout waiting for link to "
2143					"diagnostic mode, timeout:%d ms\n",
2144					timeout * 10);
2145			goto loopback_mode_exit;
2146		}
2147		msleep(10);
2148	}
2149
2150	/* set up loopback mode */
2151	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2152			"3132 Set up loopback mode:x%x\n", link_flags);
2153
2154	switch (link_flags) {
2155	case INTERNAL_LOOP_BACK:
2156		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2157			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2158					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2159					link_no);
2160		} else {
2161			/* Trunk is configured, but link is not in this trunk */
2162			if (phba->sli4_hba.conf_trunk) {
2163				rc = -ELNRNG;
2164				goto loopback_mode_exit;
2165			}
2166
2167			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2168					LPFC_DIAG_LOOPBACK_TYPE_INTERNAL,
2169					link_no);
2170		}
2171
2172		if (!rc) {
2173			/* Set the need disable bit */
2174			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2175		}
2176
2177		break;
2178	case EXTERNAL_LOOP_BACK:
2179		if (phba->sli4_hba.conf_trunk & (1 << link_no)) {
2180			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2181				LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL_TRUNKED,
2182				link_no);
2183		} else {
2184			/* Trunk is configured, but link is not in this trunk */
2185			if (phba->sli4_hba.conf_trunk) {
2186				rc = -ELNRNG;
2187				goto loopback_mode_exit;
2188			}
2189
2190			rc = lpfc_sli4_bsg_set_loopback_mode(phba,
2191						LPFC_DIAG_LOOPBACK_TYPE_SERDES,
2192						link_no);
2193		}
2194
2195		if (!rc) {
2196			/* Set the need disable bit */
2197			phba->sli4_hba.conf_trunk |= (1 << link_no) << 4;
2198		}
2199
2200		break;
2201	default:
2202		rc = -EINVAL;
2203		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2204				"3141 Loopback mode:x%x not supported\n",
2205				link_flags);
2206		goto loopback_mode_exit;
2207	}
2208
2209	if (!rc) {
2210		/* wait for the link attention interrupt */
2211		msleep(100);
2212		i = 0;
2213		while (phba->link_state < LPFC_LINK_UP) {
2214			if (i++ > timeout) {
2215				rc = -ETIMEDOUT;
2216				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2217					"3137 Timeout waiting for link up "
2218					"in loopback mode, timeout:%d ms\n",
2219					timeout * 10);
2220				break;
2221			}
2222			msleep(10);
2223		}
2224	}
2225
2226	/* port resource registration setup for loopback diagnostic */
2227	if (!rc) {
2228		/* set up a none zero myDID for loopback test */
2229		phba->pport->fc_myDID = 1;
2230		rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2231	} else
2232		goto loopback_mode_exit;
2233
2234	if (!rc) {
2235		/* wait for the port ready */
2236		msleep(100);
2237		i = 0;
2238		while (phba->link_state != LPFC_HBA_READY) {
2239			if (i++ > timeout) {
2240				rc = -ETIMEDOUT;
2241				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2242					"3133 Timeout waiting for port "
2243					"loopback mode ready, timeout:%d ms\n",
2244					timeout * 10);
2245				break;
2246			}
2247			msleep(10);
2248		}
2249	}
2250
2251loopback_mode_exit:
2252	/* clear loopback diagnostic mode */
2253	if (rc) {
2254		spin_lock_irq(&phba->hbalock);
2255		phba->link_flag &= ~LS_LOOPBACK_MODE;
2256		spin_unlock_irq(&phba->hbalock);
2257	}
2258	lpfc_bsg_diag_mode_exit(phba);
2259
2260job_done:
2261	/* make error code available to userspace */
2262	bsg_reply->result = rc;
2263	/* complete the job back to userspace if no error */
2264	if (rc == 0)
2265		bsg_job_done(job, bsg_reply->result,
2266			       bsg_reply->reply_payload_rcv_len);
2267	return rc;
2268}
2269
2270/**
2271 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2272 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2273 *
2274 * This function is responsible for responding to check and dispatch bsg diag
2275 * command from the user to proper driver action routines.
2276 */
2277static int
2278lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2279{
2280	struct Scsi_Host *shost;
2281	struct lpfc_vport *vport;
2282	struct lpfc_hba *phba;
2283	int rc;
2284
2285	shost = fc_bsg_to_shost(job);
2286	if (!shost)
2287		return -ENODEV;
2288	vport = shost_priv(shost);
2289	if (!vport)
2290		return -ENODEV;
2291	phba = vport->phba;
2292	if (!phba)
2293		return -ENODEV;
2294
2295	if (phba->sli_rev < LPFC_SLI_REV4)
2296		rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2297	else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
2298		 LPFC_SLI_INTF_IF_TYPE_2)
2299		rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2300	else
2301		rc = -ENODEV;
2302
2303	return rc;
2304}
2305
2306/**
2307 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2308 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2309 *
2310 * This function is responsible for responding to check and dispatch bsg diag
2311 * command from the user to proper driver action routines.
2312 */
2313static int
2314lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2315{
2316	struct fc_bsg_request *bsg_request = job->request;
2317	struct fc_bsg_reply *bsg_reply = job->reply;
2318	struct Scsi_Host *shost;
2319	struct lpfc_vport *vport;
2320	struct lpfc_hba *phba;
2321	struct diag_mode_set *loopback_mode_end_cmd;
2322	uint32_t timeout;
2323	int rc, i;
2324
2325	shost = fc_bsg_to_shost(job);
2326	if (!shost)
2327		return -ENODEV;
2328	vport = shost_priv(shost);
2329	if (!vport)
2330		return -ENODEV;
2331	phba = vport->phba;
2332	if (!phba)
2333		return -ENODEV;
2334
2335	if (phba->sli_rev < LPFC_SLI_REV4)
2336		return -ENODEV;
2337	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2338	    LPFC_SLI_INTF_IF_TYPE_2)
2339		return -ENODEV;
2340
2341	/* clear loopback diagnostic mode */
2342	spin_lock_irq(&phba->hbalock);
2343	phba->link_flag &= ~LS_LOOPBACK_MODE;
2344	spin_unlock_irq(&phba->hbalock);
2345	loopback_mode_end_cmd = (struct diag_mode_set *)
2346			bsg_request->rqst_data.h_vendor.vendor_cmd;
2347	timeout = loopback_mode_end_cmd->timeout * 100;
2348
2349	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2350	if (rc) {
2351		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2352				"3139 Failed to bring link to diagnostic "
2353				"state, rc:x%x\n", rc);
2354		goto loopback_mode_end_exit;
2355	}
2356
2357	/* wait for link down before proceeding */
2358	i = 0;
2359	while (phba->link_state != LPFC_LINK_DOWN) {
2360		if (i++ > timeout) {
2361			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2362					"3140 Timeout waiting for link to "
2363					"diagnostic mode_end, timeout:%d ms\n",
2364					timeout * 10);
2365			/* there is nothing much we can do here */
2366			break;
2367		}
2368		msleep(10);
2369	}
2370
2371	/* reset port resource registrations */
2372	rc = lpfc_selective_reset(phba);
2373	phba->pport->fc_myDID = 0;
2374
2375loopback_mode_end_exit:
2376	/* make return code available to userspace */
2377	bsg_reply->result = rc;
2378	/* complete the job back to userspace if no error */
2379	if (rc == 0)
2380		bsg_job_done(job, bsg_reply->result,
2381			       bsg_reply->reply_payload_rcv_len);
2382	return rc;
2383}
2384
2385/**
2386 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2387 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2388 *
2389 * This function is to perform SLI4 diag link test request from the user
2390 * applicaiton.
2391 */
2392static int
2393lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2394{
2395	struct fc_bsg_request *bsg_request = job->request;
2396	struct fc_bsg_reply *bsg_reply = job->reply;
2397	struct Scsi_Host *shost;
2398	struct lpfc_vport *vport;
2399	struct lpfc_hba *phba;
2400	LPFC_MBOXQ_t *pmboxq;
2401	struct sli4_link_diag *link_diag_test_cmd;
2402	uint32_t req_len, alloc_len;
2403	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2404	union lpfc_sli4_cfg_shdr *shdr;
2405	uint32_t shdr_status, shdr_add_status;
2406	struct diag_status *diag_status_reply;
2407	int mbxstatus, rc = -ENODEV, rc1 = 0;
2408
2409	shost = fc_bsg_to_shost(job);
2410	if (!shost)
2411		goto job_error;
2412
2413	vport = shost_priv(shost);
2414	if (!vport)
2415		goto job_error;
2416
2417	phba = vport->phba;
2418	if (!phba)
2419		goto job_error;
2420
2421
2422	if (phba->sli_rev < LPFC_SLI_REV4)
2423		goto job_error;
2424
2425	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
2426	    LPFC_SLI_INTF_IF_TYPE_2)
2427		goto job_error;
2428
2429	if (job->request_len < sizeof(struct fc_bsg_request) +
2430	    sizeof(struct sli4_link_diag)) {
2431		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2432				"3013 Received LINK DIAG TEST request "
2433				" size:%d below the minimum size:%d\n",
2434				job->request_len,
2435				(int)(sizeof(struct fc_bsg_request) +
2436				sizeof(struct sli4_link_diag)));
2437		rc = -EINVAL;
2438		goto job_error;
2439	}
2440
2441	rc = lpfc_bsg_diag_mode_enter(phba);
2442	if (rc)
2443		goto job_error;
2444
2445	link_diag_test_cmd = (struct sli4_link_diag *)
2446			 bsg_request->rqst_data.h_vendor.vendor_cmd;
2447
2448	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2449
2450	if (rc)
2451		goto job_error;
2452
2453	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2454	if (!pmboxq)
2455		goto link_diag_test_exit;
2456
2457	req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2458		   sizeof(struct lpfc_sli4_cfg_mhdr));
2459	alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2460				     LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2461				     req_len, LPFC_SLI4_MBX_EMBED);
2462	if (alloc_len != req_len) {
2463		rc = -ENOMEM;
2464		goto link_diag_test_exit;
2465	}
2466
2467	run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2468	bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2469	       phba->sli4_hba.lnk_info.lnk_no);
2470	bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2471	       phba->sli4_hba.lnk_info.lnk_tp);
2472	bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2473	       link_diag_test_cmd->test_id);
2474	bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2475	       link_diag_test_cmd->loops);
2476	bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2477	       link_diag_test_cmd->test_version);
2478	bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2479	       link_diag_test_cmd->error_action);
2480
2481	mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2482
2483	shdr = (union lpfc_sli4_cfg_shdr *)
2484		&pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2485	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2486	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2487	if (shdr_status || shdr_add_status || mbxstatus) {
2488		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2489				"3010 Run link diag test mailbox failed with "
2490				"mbx_status x%x status x%x, add_status x%x\n",
2491				mbxstatus, shdr_status, shdr_add_status);
2492	}
2493
2494	diag_status_reply = (struct diag_status *)
2495			    bsg_reply->reply_data.vendor_reply.vendor_rsp;
2496
2497	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) {
2498		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2499				"3012 Received Run link diag test reply "
2500				"below minimum size (%d): reply_len:%d\n",
2501				(int)(sizeof(*bsg_reply) +
2502				sizeof(*diag_status_reply)),
2503				job->reply_len);
2504		rc = -EINVAL;
2505		goto job_error;
2506	}
2507
2508	diag_status_reply->mbox_status = mbxstatus;
2509	diag_status_reply->shdr_status = shdr_status;
2510	diag_status_reply->shdr_add_status = shdr_add_status;
2511
2512link_diag_test_exit:
2513	rc1 = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2514
2515	if (pmboxq)
2516		mempool_free(pmboxq, phba->mbox_mem_pool);
2517
2518	lpfc_bsg_diag_mode_exit(phba);
2519
2520job_error:
2521	/* make error code available to userspace */
2522	if (rc1 && !rc)
2523		rc = rc1;
2524	bsg_reply->result = rc;
2525	/* complete the job back to userspace if no error */
2526	if (rc == 0)
2527		bsg_job_done(job, bsg_reply->result,
2528			       bsg_reply->reply_payload_rcv_len);
2529	return rc;
2530}
2531
2532/**
2533 * lpfcdiag_loop_self_reg - obtains a remote port login id
2534 * @phba: Pointer to HBA context object
2535 * @rpi: Pointer to a remote port login id
2536 *
2537 * This function obtains a remote port login id so the diag loopback test
2538 * can send and receive its own unsolicited CT command.
2539 **/
2540static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2541{
2542	LPFC_MBOXQ_t *mbox;
2543	struct lpfc_dmabuf *dmabuff;
2544	int status;
2545
2546	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2547	if (!mbox)
2548		return -ENOMEM;
2549
2550	if (phba->sli_rev < LPFC_SLI_REV4)
2551		status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2552				(uint8_t *)&phba->pport->fc_sparam,
2553				mbox, *rpi);
2554	else {
2555		*rpi = lpfc_sli4_alloc_rpi(phba);
2556		if (*rpi == LPFC_RPI_ALLOC_ERROR) {
2557			mempool_free(mbox, phba->mbox_mem_pool);
2558			return -EBUSY;
2559		}
2560		status = lpfc_reg_rpi(phba, phba->pport->vpi,
2561				phba->pport->fc_myDID,
2562				(uint8_t *)&phba->pport->fc_sparam,
2563				mbox, *rpi);
2564	}
2565
2566	if (status) {
2567		mempool_free(mbox, phba->mbox_mem_pool);
2568		if (phba->sli_rev == LPFC_SLI_REV4)
2569			lpfc_sli4_free_rpi(phba, *rpi);
2570		return -ENOMEM;
2571	}
2572
2573	dmabuff = (struct lpfc_dmabuf *)mbox->ctx_buf;
2574	mbox->ctx_buf = NULL;
2575	mbox->ctx_ndlp = NULL;
2576	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2577
2578	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2579		lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2580		kfree(dmabuff);
2581		if (status != MBX_TIMEOUT)
2582			mempool_free(mbox, phba->mbox_mem_pool);
2583		if (phba->sli_rev == LPFC_SLI_REV4)
2584			lpfc_sli4_free_rpi(phba, *rpi);
2585		return -ENODEV;
2586	}
2587
2588	if (phba->sli_rev < LPFC_SLI_REV4)
2589		*rpi = mbox->u.mb.un.varWords[0];
2590
2591	lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2592	kfree(dmabuff);
2593	mempool_free(mbox, phba->mbox_mem_pool);
2594	return 0;
2595}
2596
2597/**
2598 * lpfcdiag_loop_self_unreg - unregs from the rpi
2599 * @phba: Pointer to HBA context object
2600 * @rpi: Remote port login id
2601 *
2602 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2603 **/
2604static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2605{
2606	LPFC_MBOXQ_t *mbox;
2607	int status;
2608
2609	/* Allocate mboxq structure */
2610	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2611	if (mbox == NULL)
2612		return -ENOMEM;
2613
2614	if (phba->sli_rev < LPFC_SLI_REV4)
2615		lpfc_unreg_login(phba, 0, rpi, mbox);
2616	else
2617		lpfc_unreg_login(phba, phba->pport->vpi,
2618				 phba->sli4_hba.rpi_ids[rpi], mbox);
2619
2620	status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2621
2622	if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2623		if (status != MBX_TIMEOUT)
2624			mempool_free(mbox, phba->mbox_mem_pool);
2625		return -EIO;
2626	}
2627	mempool_free(mbox, phba->mbox_mem_pool);
2628	if (phba->sli_rev == LPFC_SLI_REV4)
2629		lpfc_sli4_free_rpi(phba, rpi);
2630	return 0;
2631}
2632
2633/**
2634 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2635 * @phba: Pointer to HBA context object
2636 * @rpi: Remote port login id
2637 * @txxri: Pointer to transmit exchange id
2638 * @rxxri: Pointer to response exchabge id
2639 *
2640 * This function obtains the transmit and receive ids required to send
2641 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2642 * flags are used to the unsolicted response handler is able to process
2643 * the ct command sent on the same port.
2644 **/
2645static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2646			 uint16_t *txxri, uint16_t * rxxri)
2647{
2648	struct lpfc_bsg_event *evt;
2649	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2650	IOCB_t *cmd, *rsp;
2651	struct lpfc_dmabuf *dmabuf;
2652	struct ulp_bde64 *bpl = NULL;
2653	struct lpfc_sli_ct_request *ctreq = NULL;
2654	int ret_val = 0;
2655	int time_left;
2656	int iocb_stat = IOCB_SUCCESS;
2657	unsigned long flags;
2658
2659	*txxri = 0;
2660	*rxxri = 0;
2661	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2662				SLI_CT_ELX_LOOPBACK);
2663	if (!evt)
2664		return -ENOMEM;
2665
2666	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2667	list_add(&evt->node, &phba->ct_ev_waiters);
2668	lpfc_bsg_event_ref(evt);
2669	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2670
2671	cmdiocbq = lpfc_sli_get_iocbq(phba);
2672	rspiocbq = lpfc_sli_get_iocbq(phba);
2673
2674	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2675	if (dmabuf) {
2676		dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2677		if (dmabuf->virt) {
2678			INIT_LIST_HEAD(&dmabuf->list);
2679			bpl = (struct ulp_bde64 *) dmabuf->virt;
2680			memset(bpl, 0, sizeof(*bpl));
2681			ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2682			bpl->addrHigh =
2683				le32_to_cpu(putPaddrHigh(dmabuf->phys +
2684					sizeof(*bpl)));
2685			bpl->addrLow =
2686				le32_to_cpu(putPaddrLow(dmabuf->phys +
2687					sizeof(*bpl)));
2688			bpl->tus.f.bdeFlags = 0;
2689			bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2690			bpl->tus.w = le32_to_cpu(bpl->tus.w);
2691		}
2692	}
2693
2694	if (cmdiocbq == NULL || rspiocbq == NULL ||
2695	    dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2696		dmabuf->virt == NULL) {
2697		ret_val = -ENOMEM;
2698		goto err_get_xri_exit;
2699	}
2700
2701	cmd = &cmdiocbq->iocb;
2702	rsp = &rspiocbq->iocb;
2703
2704	memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2705
2706	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2707	ctreq->RevisionId.bits.InId = 0;
2708	ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2709	ctreq->FsSubType = 0;
2710	ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2711	ctreq->CommandResponse.bits.Size = 0;
2712
2713
2714	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2715	cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2716	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2717	cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2718
2719	cmd->un.xseq64.w5.hcsw.Fctl = LA;
2720	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2721	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2722	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2723
2724	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2725	cmd->ulpBdeCount = 1;
2726	cmd->ulpLe = 1;
2727	cmd->ulpClass = CLASS3;
2728	cmd->ulpContext = rpi;
2729
2730	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2731	cmdiocbq->vport = phba->pport;
2732	cmdiocbq->iocb_cmpl = NULL;
2733
2734	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2735				rspiocbq,
2736				(phba->fc_ratov * 2)
2737				+ LPFC_DRVR_TIMEOUT);
2738	if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2739		ret_val = -EIO;
2740		goto err_get_xri_exit;
2741	}
2742	*txxri =  rsp->ulpContext;
2743
2744	evt->waiting = 1;
2745	evt->wait_time_stamp = jiffies;
2746	time_left = wait_event_interruptible_timeout(
2747		evt->wq, !list_empty(&evt->events_to_see),
2748		msecs_to_jiffies(1000 *
2749			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2750	if (list_empty(&evt->events_to_see))
2751		ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2752	else {
2753		spin_lock_irqsave(&phba->ct_ev_lock, flags);
2754		list_move(evt->events_to_see.prev, &evt->events_to_get);
2755		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2756		*rxxri = (list_entry(evt->events_to_get.prev,
2757				     typeof(struct event_data),
2758				     node))->immed_dat;
2759	}
2760	evt->waiting = 0;
2761
2762err_get_xri_exit:
2763	spin_lock_irqsave(&phba->ct_ev_lock, flags);
2764	lpfc_bsg_event_unref(evt); /* release ref */
2765	lpfc_bsg_event_unref(evt); /* delete */
2766	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2767
2768	if (dmabuf) {
2769		if (dmabuf->virt)
2770			lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2771		kfree(dmabuf);
2772	}
2773
2774	if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2775		lpfc_sli_release_iocbq(phba, cmdiocbq);
2776	if (rspiocbq)
2777		lpfc_sli_release_iocbq(phba, rspiocbq);
2778	return ret_val;
2779}
2780
2781/**
2782 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2783 * @phba: Pointer to HBA context object
2784 *
2785 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and
2786 * returns the pointer to the buffer.
2787 **/
2788static struct lpfc_dmabuf *
2789lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2790{
2791	struct lpfc_dmabuf *dmabuf;
2792	struct pci_dev *pcidev = phba->pcidev;
2793
2794	/* allocate dma buffer struct */
2795	dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2796	if (!dmabuf)
2797		return NULL;
2798
2799	INIT_LIST_HEAD(&dmabuf->list);
2800
2801	/* now, allocate dma buffer */
2802	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2803					  &(dmabuf->phys), GFP_KERNEL);
2804
2805	if (!dmabuf->virt) {
2806		kfree(dmabuf);
2807		return NULL;
2808	}
2809
2810	return dmabuf;
2811}
2812
2813/**
2814 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2815 * @phba: Pointer to HBA context object.
2816 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2817 *
2818 * This routine just simply frees a dma buffer and its associated buffer
2819 * descriptor referred by @dmabuf.
2820 **/
2821static void
2822lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2823{
2824	struct pci_dev *pcidev = phba->pcidev;
2825
2826	if (!dmabuf)
2827		return;
2828
2829	if (dmabuf->virt)
2830		dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2831				  dmabuf->virt, dmabuf->phys);
2832	kfree(dmabuf);
2833	return;
2834}
2835
2836/**
2837 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2838 * @phba: Pointer to HBA context object.
2839 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2840 *
2841 * This routine just simply frees all dma buffers and their associated buffer
2842 * descriptors referred by @dmabuf_list.
2843 **/
2844static void
2845lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2846			    struct list_head *dmabuf_list)
2847{
2848	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2849
2850	if (list_empty(dmabuf_list))
2851		return;
2852
2853	list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2854		list_del_init(&dmabuf->list);
2855		lpfc_bsg_dma_page_free(phba, dmabuf);
2856	}
2857	return;
2858}
2859
2860/**
2861 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2862 * @phba: Pointer to HBA context object
2863 * @bpl: Pointer to 64 bit bde structure
2864 * @size: Number of bytes to process
2865 * @nocopydata: Flag to copy user data into the allocated buffer
2866 *
2867 * This function allocates page size buffers and populates an lpfc_dmabufext.
2868 * If allowed the user data pointed to with indataptr is copied into the kernel
2869 * memory. The chained list of page size buffers is returned.
2870 **/
2871static struct lpfc_dmabufext *
2872diag_cmd_data_alloc(struct lpfc_hba *phba,
2873		   struct ulp_bde64 *bpl, uint32_t size,
2874		   int nocopydata)
2875{
2876	struct lpfc_dmabufext *mlist = NULL;
2877	struct lpfc_dmabufext *dmp;
2878	int cnt, offset = 0, i = 0;
2879	struct pci_dev *pcidev;
2880
2881	pcidev = phba->pcidev;
2882
2883	while (size) {
2884		/* We get chunks of 4K */
2885		if (size > BUF_SZ_4K)
2886			cnt = BUF_SZ_4K;
2887		else
2888			cnt = size;
2889
2890		/* allocate struct lpfc_dmabufext buffer header */
2891		dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2892		if (!dmp)
2893			goto out;
2894
2895		INIT_LIST_HEAD(&dmp->dma.list);
2896
2897		/* Queue it to a linked list */
2898		if (mlist)
2899			list_add_tail(&dmp->dma.list, &mlist->dma.list);
2900		else
2901			mlist = dmp;
2902
2903		/* allocate buffer */
2904		dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2905						   cnt,
2906						   &(dmp->dma.phys),
2907						   GFP_KERNEL);
2908
2909		if (!dmp->dma.virt)
2910			goto out;
2911
2912		dmp->size = cnt;
2913
2914		if (nocopydata) {
2915			bpl->tus.f.bdeFlags = 0;
2916		} else {
2917			memset((uint8_t *)dmp->dma.virt, 0, cnt);
2918			bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2919		}
2920
2921		/* build buffer ptr list for IOCB */
2922		bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2923		bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2924		bpl->tus.f.bdeSize = (ushort) cnt;
2925		bpl->tus.w = le32_to_cpu(bpl->tus.w);
2926		bpl++;
2927
2928		i++;
2929		offset += cnt;
2930		size -= cnt;
2931	}
2932
2933	if (mlist) {
2934		mlist->flag = i;
2935		return mlist;
2936	}
2937out:
2938	diag_cmd_data_free(phba, mlist);
2939	return NULL;
2940}
2941
2942/**
2943 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2944 * @phba: Pointer to HBA context object
2945 * @rxxri: Receive exchange id
2946 * @len: Number of data bytes
2947 *
2948 * This function allocates and posts a data buffer of sufficient size to receive
2949 * an unsolicted CT command.
2950 **/
2951static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2952			     size_t len)
2953{
2954	struct lpfc_sli_ring *pring;
2955	struct lpfc_iocbq *cmdiocbq;
2956	IOCB_t *cmd = NULL;
2957	struct list_head head, *curr, *next;
2958	struct lpfc_dmabuf *rxbmp;
2959	struct lpfc_dmabuf *dmp;
2960	struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2961	struct ulp_bde64 *rxbpl = NULL;
2962	uint32_t num_bde;
2963	struct lpfc_dmabufext *rxbuffer = NULL;
2964	int ret_val = 0;
2965	int iocb_stat;
2966	int i = 0;
2967
2968	pring = lpfc_phba_elsring(phba);
2969
2970	cmdiocbq = lpfc_sli_get_iocbq(phba);
2971	rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2972	if (rxbmp != NULL) {
2973		rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2974		if (rxbmp->virt) {
2975			INIT_LIST_HEAD(&rxbmp->list);
2976			rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2977			rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2978		}
2979	}
2980
2981	if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer || !pring) {
2982		ret_val = -ENOMEM;
2983		goto err_post_rxbufs_exit;
2984	}
2985
2986	/* Queue buffers for the receive exchange */
2987	num_bde = (uint32_t)rxbuffer->flag;
2988	dmp = &rxbuffer->dma;
2989
2990	cmd = &cmdiocbq->iocb;
2991	i = 0;
2992
2993	INIT_LIST_HEAD(&head);
2994	list_add_tail(&head, &dmp->list);
2995	list_for_each_safe(curr, next, &head) {
2996		mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2997		list_del(curr);
2998
2999		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3000			mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
3001			cmd->un.quexri64cx.buff.bde.addrHigh =
3002				putPaddrHigh(mp[i]->phys);
3003			cmd->un.quexri64cx.buff.bde.addrLow =
3004				putPaddrLow(mp[i]->phys);
3005			cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
3006				((struct lpfc_dmabufext *)mp[i])->size;
3007			cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
3008			cmd->ulpCommand = CMD_QUE_XRI64_CX;
3009			cmd->ulpPU = 0;
3010			cmd->ulpLe = 1;
3011			cmd->ulpBdeCount = 1;
3012			cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
3013
3014		} else {
3015			cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
3016			cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
3017			cmd->un.cont64[i].tus.f.bdeSize =
3018				((struct lpfc_dmabufext *)mp[i])->size;
3019			cmd->ulpBdeCount = ++i;
3020
3021			if ((--num_bde > 0) && (i < 2))
3022				continue;
3023
3024			cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
3025			cmd->ulpLe = 1;
3026		}
3027
3028		cmd->ulpClass = CLASS3;
3029		cmd->ulpContext = rxxri;
3030
3031		iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
3032						0);
3033		if (iocb_stat == IOCB_ERROR) {
3034			diag_cmd_data_free(phba,
3035				(struct lpfc_dmabufext *)mp[0]);
3036			if (mp[1])
3037				diag_cmd_data_free(phba,
3038					  (struct lpfc_dmabufext *)mp[1]);
3039			dmp = list_entry(next, struct lpfc_dmabuf, list);
3040			ret_val = -EIO;
3041			goto err_post_rxbufs_exit;
3042		}
3043
3044		lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
3045		if (mp[1]) {
3046			lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
3047			mp[1] = NULL;
3048		}
3049
3050		/* The iocb was freed by lpfc_sli_issue_iocb */
3051		cmdiocbq = lpfc_sli_get_iocbq(phba);
3052		if (!cmdiocbq) {
3053			dmp = list_entry(next, struct lpfc_dmabuf, list);
3054			ret_val = -EIO;
3055			goto err_post_rxbufs_exit;
3056		}
3057
3058		cmd = &cmdiocbq->iocb;
3059		i = 0;
3060	}
3061	list_del(&head);
3062
3063err_post_rxbufs_exit:
3064
3065	if (rxbmp) {
3066		if (rxbmp->virt)
3067			lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
3068		kfree(rxbmp);
3069	}
3070
3071	if (cmdiocbq)
3072		lpfc_sli_release_iocbq(phba, cmdiocbq);
3073	return ret_val;
3074}
3075
3076/**
3077 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3078 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3079 *
3080 * This function receives a user data buffer to be transmitted and received on
3081 * the same port, the link must be up and in loopback mode prior
3082 * to being called.
3083 * 1. A kernel buffer is allocated to copy the user data into.
3084 * 2. The port registers with "itself".
3085 * 3. The transmit and receive exchange ids are obtained.
3086 * 4. The receive exchange id is posted.
3087 * 5. A new els loopback event is created.
3088 * 6. The command and response iocbs are allocated.
3089 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3090 *
3091 * This function is meant to be called n times while the port is in loopback
3092 * so it is the apps responsibility to issue a reset to take the port out
3093 * of loopback mode.
3094 **/
3095static int
3096lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3097{
3098	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3099	struct fc_bsg_reply *bsg_reply = job->reply;
3100	struct lpfc_hba *phba = vport->phba;
3101	struct lpfc_bsg_event *evt;
3102	struct event_data *evdat;
3103	struct lpfc_sli *psli = &phba->sli;
3104	uint32_t size;
3105	uint32_t full_size;
3106	size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3107	uint16_t rpi = 0;
3108	struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3109	IOCB_t *cmd, *rsp = NULL;
3110	struct lpfc_sli_ct_request *ctreq;
3111	struct lpfc_dmabuf *txbmp;
3112	struct ulp_bde64 *txbpl = NULL;
3113	struct lpfc_dmabufext *txbuffer = NULL;
3114	struct list_head head;
3115	struct lpfc_dmabuf  *curr;
3116	uint16_t txxri = 0, rxxri;
3117	uint32_t num_bde;
3118	uint8_t *ptr = NULL, *rx_databuf = NULL;
3119	int rc = 0;
3120	int time_left;
3121	int iocb_stat = IOCB_SUCCESS;
3122	unsigned long flags;
3123	void *dataout = NULL;
3124	uint32_t total_mem;
3125
3126	/* in case no data is returned return just the return code */
3127	bsg_reply->reply_payload_rcv_len = 0;
3128
3129	if (job->request_len <
3130	    sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3131		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3132				"2739 Received DIAG TEST request below minimum "
3133				"size\n");
3134		rc = -EINVAL;
3135		goto loopback_test_exit;
3136	}
3137
3138	if (job->request_payload.payload_len !=
3139		job->reply_payload.payload_len) {
3140		rc = -EINVAL;
3141		goto loopback_test_exit;
3142	}
3143
3144	if ((phba->link_state == LPFC_HBA_ERROR) ||
3145	    (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3146	    (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3147		rc = -EACCES;
3148		goto loopback_test_exit;
3149	}
3150
3151	if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3152		rc = -EACCES;
3153		goto loopback_test_exit;
3154	}
3155
3156	size = job->request_payload.payload_len;
3157	full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3158
3159	if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3160		rc = -ERANGE;
3161		goto loopback_test_exit;
3162	}
3163
3164	if (full_size >= BUF_SZ_4K) {
3165		/*
3166		 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3167		 * then we allocate 64k and re-use that buffer over and over to
3168		 * xfer the whole block. This is because Linux kernel has a
3169		 * problem allocating more than 120k of kernel space memory. Saw
3170		 * problem with GET_FCPTARGETMAPPING...
3171		 */
3172		if (size <= (64 * 1024))
3173			total_mem = full_size;
3174		else
3175			total_mem = 64 * 1024;
3176	} else
3177		/* Allocate memory for ioctl data */
3178		total_mem = BUF_SZ_4K;
3179
3180	dataout = kmalloc(total_mem, GFP_KERNEL);
3181	if (dataout == NULL) {
3182		rc = -ENOMEM;
3183		goto loopback_test_exit;
3184	}
3185
3186	ptr = dataout;
3187	ptr += ELX_LOOPBACK_HEADER_SZ;
3188	sg_copy_to_buffer(job->request_payload.sg_list,
3189				job->request_payload.sg_cnt,
3190				ptr, size);
3191	rc = lpfcdiag_loop_self_reg(phba, &rpi);
3192	if (rc)
3193		goto loopback_test_exit;
3194
3195	if (phba->sli_rev < LPFC_SLI_REV4) {
3196		rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3197		if (rc) {
3198			lpfcdiag_loop_self_unreg(phba, rpi);
3199			goto loopback_test_exit;
3200		}
3201
3202		rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3203		if (rc) {
3204			lpfcdiag_loop_self_unreg(phba, rpi);
3205			goto loopback_test_exit;
3206		}
3207	}
3208	evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3209				SLI_CT_ELX_LOOPBACK);
3210	if (!evt) {
3211		lpfcdiag_loop_self_unreg(phba, rpi);
3212		rc = -ENOMEM;
3213		goto loopback_test_exit;
3214	}
3215
3216	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3217	list_add(&evt->node, &phba->ct_ev_waiters);
3218	lpfc_bsg_event_ref(evt);
3219	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3220
3221	cmdiocbq = lpfc_sli_get_iocbq(phba);
3222	if (phba->sli_rev < LPFC_SLI_REV4)
3223		rspiocbq = lpfc_sli_get_iocbq(phba);
3224	txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3225
3226	if (txbmp) {
3227		txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3228		if (txbmp->virt) {
3229			INIT_LIST_HEAD(&txbmp->list);
3230			txbpl = (struct ulp_bde64 *) txbmp->virt;
3231			txbuffer = diag_cmd_data_alloc(phba,
3232							txbpl, full_size, 0);
3233		}
3234	}
3235
3236	if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3237		rc = -ENOMEM;
3238		goto err_loopback_test_exit;
3239	}
3240	if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3241		rc = -ENOMEM;
3242		goto err_loopback_test_exit;
3243	}
3244
3245	cmd = &cmdiocbq->iocb;
3246	if (phba->sli_rev < LPFC_SLI_REV4)
3247		rsp = &rspiocbq->iocb;
3248
3249	INIT_LIST_HEAD(&head);
3250	list_add_tail(&head, &txbuffer->dma.list);
3251	list_for_each_entry(curr, &head, list) {
3252		segment_len = ((struct lpfc_dmabufext *)curr)->size;
3253		if (current_offset == 0) {
3254			ctreq = curr->virt;
3255			memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3256			ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3257			ctreq->RevisionId.bits.InId = 0;
3258			ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3259			ctreq->FsSubType = 0;
3260			ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3261			ctreq->CommandResponse.bits.Size   = size;
3262			segment_offset = ELX_LOOPBACK_HEADER_SZ;
3263		} else
3264			segment_offset = 0;
3265
3266		BUG_ON(segment_offset >= segment_len);
3267		memcpy(curr->virt + segment_offset,
3268			ptr + current_offset,
3269			segment_len - segment_offset);
3270
3271		current_offset += segment_len - segment_offset;
3272		BUG_ON(current_offset > size);
3273	}
3274	list_del(&head);
3275
3276	/* Build the XMIT_SEQUENCE iocb */
3277	num_bde = (uint32_t)txbuffer->flag;
3278
3279	cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3280	cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3281	cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3282	cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3283
3284	cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3285	cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3286	cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3287	cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3288
3289	cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3290	cmd->ulpBdeCount = 1;
3291	cmd->ulpLe = 1;
3292	cmd->ulpClass = CLASS3;
3293
3294	if (phba->sli_rev < LPFC_SLI_REV4) {
3295		cmd->ulpContext = txxri;
3296	} else {
3297		cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3298		cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3299		cmdiocbq->context3 = txbmp;
3300		cmdiocbq->sli4_xritag = NO_XRI;
3301		cmd->unsli3.rcvsli3.ox_id = 0xffff;
3302	}
3303	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3304	cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3305	cmdiocbq->vport = phba->pport;
3306	cmdiocbq->iocb_cmpl = NULL;
3307	iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3308					     rspiocbq, (phba->fc_ratov * 2) +
3309					     LPFC_DRVR_TIMEOUT);
3310
3311	if ((iocb_stat != IOCB_SUCCESS) ||
3312	    ((phba->sli_rev < LPFC_SLI_REV4) &&
3313	     (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3314		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3315				"3126 Failed loopback test issue iocb: "
3316				"iocb_stat:x%x\n", iocb_stat);
3317		rc = -EIO;
3318		goto err_loopback_test_exit;
3319	}
3320
3321	evt->waiting = 1;
3322	time_left = wait_event_interruptible_timeout(
3323		evt->wq, !list_empty(&evt->events_to_see),
3324		msecs_to_jiffies(1000 *
3325			((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3326	evt->waiting = 0;
3327	if (list_empty(&evt->events_to_see)) {
3328		rc = (time_left) ? -EINTR : -ETIMEDOUT;
3329		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3330				"3125 Not receiving unsolicited event, "
3331				"rc:x%x\n", rc);
3332	} else {
3333		spin_lock_irqsave(&phba->ct_ev_lock, flags);
3334		list_move(evt->events_to_see.prev, &evt->events_to_get);
3335		evdat = list_entry(evt->events_to_get.prev,
3336				   typeof(*evdat), node);
3337		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3338		rx_databuf = evdat->data;
3339		if (evdat->len != full_size) {
3340			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3341				"1603 Loopback test did not receive expected "
3342				"data length. actual length 0x%x expected "
3343				"length 0x%x\n",
3344				evdat->len, full_size);
3345			rc = -EIO;
3346		} else if (rx_databuf == NULL)
3347			rc = -EIO;
3348		else {
3349			rc = IOCB_SUCCESS;
3350			/* skip over elx loopback header */
3351			rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3352			bsg_reply->reply_payload_rcv_len =
3353				sg_copy_from_buffer(job->reply_payload.sg_list,
3354						    job->reply_payload.sg_cnt,
3355						    rx_databuf, size);
3356			bsg_reply->reply_payload_rcv_len = size;
3357		}
3358	}
3359
3360err_loopback_test_exit:
3361	lpfcdiag_loop_self_unreg(phba, rpi);
3362
3363	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3364	lpfc_bsg_event_unref(evt); /* release ref */
3365	lpfc_bsg_event_unref(evt); /* delete */
3366	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3367
3368	if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3369		lpfc_sli_release_iocbq(phba, cmdiocbq);
3370
3371	if (rspiocbq != NULL)
3372		lpfc_sli_release_iocbq(phba, rspiocbq);
3373
3374	if (txbmp != NULL) {
3375		if (txbpl != NULL) {
3376			if (txbuffer != NULL)
3377				diag_cmd_data_free(phba, txbuffer);
3378			lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3379		}
3380		kfree(txbmp);
3381	}
3382
3383loopback_test_exit:
3384	kfree(dataout);
3385	/* make error code available to userspace */
3386	bsg_reply->result = rc;
3387	job->dd_data = NULL;
3388	/* complete the job back to userspace if no error */
3389	if (rc == IOCB_SUCCESS)
3390		bsg_job_done(job, bsg_reply->result,
3391			       bsg_reply->reply_payload_rcv_len);
3392	return rc;
3393}
3394
3395/**
3396 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3397 * @job: GET_DFC_REV fc_bsg_job
3398 **/
3399static int
3400lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3401{
3402	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3403	struct fc_bsg_reply *bsg_reply = job->reply;
3404	struct lpfc_hba *phba = vport->phba;
3405	struct get_mgmt_rev_reply *event_reply;
3406	int rc = 0;
3407
3408	if (job->request_len <
3409	    sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3410		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3411				"2740 Received GET_DFC_REV request below "
3412				"minimum size\n");
3413		rc = -EINVAL;
3414		goto job_error;
3415	}
3416
3417	event_reply = (struct get_mgmt_rev_reply *)
3418		bsg_reply->reply_data.vendor_reply.vendor_rsp;
3419
3420	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
3421		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3422				"2741 Received GET_DFC_REV reply below "
3423				"minimum size\n");
3424		rc = -EINVAL;
3425		goto job_error;
3426	}
3427
3428	event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3429	event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3430job_error:
3431	bsg_reply->result = rc;
3432	if (rc == 0)
3433		bsg_job_done(job, bsg_reply->result,
3434			       bsg_reply->reply_payload_rcv_len);
3435	return rc;
3436}
3437
3438/**
3439 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3440 * @phba: Pointer to HBA context object.
3441 * @pmboxq: Pointer to mailbox command.
3442 *
3443 * This is completion handler function for mailbox commands issued from
3444 * lpfc_bsg_issue_mbox function. This function is called by the
3445 * mailbox event handler function with no lock held. This function
3446 * will wake up thread waiting on the wait queue pointed by context1
3447 * of the mailbox.
3448 **/
3449static void
3450lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3451{
3452	struct bsg_job_data *dd_data;
3453	struct fc_bsg_reply *bsg_reply;
3454	struct bsg_job *job;
3455	uint32_t size;
3456	unsigned long flags;
3457	uint8_t *pmb, *pmb_buf;
3458
3459	dd_data = pmboxq->ctx_ndlp;
3460
3461	/*
3462	 * The outgoing buffer is readily referred from the dma buffer,
3463	 * just need to get header part from mailboxq structure.
3464	 */
3465	pmb = (uint8_t *)&pmboxq->u.mb;
3466	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3467	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3468
3469	/* Determine if job has been aborted */
3470
3471	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3472	job = dd_data->set_job;
3473	if (job) {
3474		/* Prevent timeout handling from trying to abort job  */
3475		job->dd_data = NULL;
3476	}
3477	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3478
3479	/* Copy the mailbox data to the job if it is still active */
3480
3481	if (job) {
3482		bsg_reply = job->reply;
3483		size = job->reply_payload.payload_len;
3484		bsg_reply->reply_payload_rcv_len =
3485			sg_copy_from_buffer(job->reply_payload.sg_list,
3486					    job->reply_payload.sg_cnt,
3487					    pmb_buf, size);
3488	}
3489
3490	dd_data->set_job = NULL;
3491	mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3492	lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3493	kfree(dd_data);
3494
3495	/* Complete the job if the job is still active */
3496
3497	if (job) {
3498		bsg_reply->result = 0;
3499		bsg_job_done(job, bsg_reply->result,
3500			       bsg_reply->reply_payload_rcv_len);
3501	}
3502	return;
3503}
3504
3505/**
3506 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3507 * @phba: Pointer to HBA context object.
3508 * @mb: Pointer to a mailbox object.
3509 * @vport: Pointer to a vport object.
3510 *
3511 * Some commands require the port to be offline, some may not be called from
3512 * the application.
3513 **/
3514static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3515	MAILBOX_t *mb, struct lpfc_vport *vport)
3516{
3517	/* return negative error values for bsg job */
3518	switch (mb->mbxCommand) {
3519	/* Offline only */
3520	case MBX_INIT_LINK:
3521	case MBX_DOWN_LINK:
3522	case MBX_CONFIG_LINK:
3523	case MBX_CONFIG_RING:
3524	case MBX_RESET_RING:
3525	case MBX_UNREG_LOGIN:
3526	case MBX_CLEAR_LA:
3527	case MBX_DUMP_CONTEXT:
3528	case MBX_RUN_DIAGS:
3529	case MBX_RESTART:
3530	case MBX_SET_MASK:
3531		if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3532			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3533				"2743 Command 0x%x is illegal in on-line "
3534				"state\n",
3535				mb->mbxCommand);
3536			return -EPERM;
3537		}
3538	case MBX_WRITE_NV:
3539	case MBX_WRITE_VPARMS:
3540	case MBX_LOAD_SM:
3541	case MBX_READ_NV:
3542	case MBX_READ_CONFIG:
3543	case MBX_READ_RCONFIG:
3544	case MBX_READ_STATUS:
3545	case MBX_READ_XRI:
3546	case MBX_READ_REV:
3547	case MBX_READ_LNK_STAT:
3548	case MBX_DUMP_MEMORY:
3549	case MBX_DOWN_LOAD:
3550	case MBX_UPDATE_CFG:
3551	case MBX_KILL_BOARD:
3552	case MBX_READ_TOPOLOGY:
3553	case MBX_LOAD_AREA:
3554	case MBX_LOAD_EXP_ROM:
3555	case MBX_BEACON:
3556	case MBX_DEL_LD_ENTRY:
3557	case MBX_SET_DEBUG:
3558	case MBX_WRITE_WWN:
3559	case MBX_SLI4_CONFIG:
3560	case MBX_READ_EVENT_LOG:
3561	case MBX_READ_EVENT_LOG_STATUS:
3562	case MBX_WRITE_EVENT_LOG:
3563	case MBX_PORT_CAPABILITIES:
3564	case MBX_PORT_IOV_CONTROL:
3565	case MBX_RUN_BIU_DIAG64:
3566		break;
3567	case MBX_SET_VARIABLE:
3568		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3569			"1226 mbox: set_variable 0x%x, 0x%x\n",
3570			mb->un.varWords[0],
3571			mb->un.varWords[1]);
3572		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3573			&& (mb->un.varWords[1] == 1)) {
3574			phba->wait_4_mlo_maint_flg = 1;
3575		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
3576			spin_lock_irq(&phba->hbalock);
3577			phba->link_flag &= ~LS_LOOPBACK_MODE;
3578			spin_unlock_irq(&phba->hbalock);
3579			phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3580		}
3581		break;
3582	case MBX_READ_SPARM64:
3583	case MBX_REG_LOGIN:
3584	case MBX_REG_LOGIN64:
3585	case MBX_CONFIG_PORT:
3586	case MBX_RUN_BIU_DIAG:
3587	default:
3588		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3589			"2742 Unknown Command 0x%x\n",
3590			mb->mbxCommand);
3591		return -EPERM;
3592	}
3593
3594	return 0; /* ok */
3595}
3596
3597/**
3598 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3599 * @phba: Pointer to HBA context object.
3600 *
3601 * This is routine clean up and reset BSG handling of multi-buffer mbox
3602 * command session.
3603 **/
3604static void
3605lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3606{
3607	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3608		return;
3609
3610	/* free all memory, including dma buffers */
3611	lpfc_bsg_dma_page_list_free(phba,
3612				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3613	lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3614	/* multi-buffer write mailbox command pass-through complete */
3615	memset((char *)&phba->mbox_ext_buf_ctx, 0,
3616	       sizeof(struct lpfc_mbox_ext_buf_ctx));
3617	INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3618
3619	return;
3620}
3621
3622/**
3623 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3624 * @phba: Pointer to HBA context object.
3625 * @pmboxq: Pointer to mailbox command.
3626 *
3627 * This is routine handles BSG job for mailbox commands completions with
3628 * multiple external buffers.
3629 **/
3630static struct bsg_job *
3631lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3632{
3633	struct bsg_job_data *dd_data;
3634	struct bsg_job *job;
3635	struct fc_bsg_reply *bsg_reply;
3636	uint8_t *pmb, *pmb_buf;
3637	unsigned long flags;
3638	uint32_t size;
3639	int rc = 0;
3640	struct lpfc_dmabuf *dmabuf;
3641	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3642	uint8_t *pmbx;
3643
3644	dd_data = pmboxq->ctx_buf;
3645
3646	/* Determine if job has been aborted */
3647	spin_lock_irqsave(&phba->ct_ev_lock, flags);
3648	job = dd_data->set_job;
3649	if (job) {
3650		bsg_reply = job->reply;
3651		/* Prevent timeout handling from trying to abort job  */
3652		job->dd_data = NULL;
3653	}
3654	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3655
3656	/*
3657	 * The outgoing buffer is readily referred from the dma buffer,
3658	 * just need to get header part from mailboxq structure.
3659	 */
3660
3661	pmb = (uint8_t *)&pmboxq->u.mb;
3662	pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3663	/* Copy the byte swapped response mailbox back to the user */
3664	memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3665	/* if there is any non-embedded extended data copy that too */
3666	dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3667	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3668	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3669	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3670		pmbx = (uint8_t *)dmabuf->virt;
3671		/* byte swap the extended data following the mailbox command */
3672		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3673			&pmbx[sizeof(MAILBOX_t)],
3674			sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3675	}
3676
3677	/* Complete the job if the job is still active */
3678
3679	if (job) {
3680		size = job->reply_payload.payload_len;
3681		bsg_reply->reply_payload_rcv_len =
3682			sg_copy_from_buffer(job->reply_payload.sg_list,
3683					    job->reply_payload.sg_cnt,
3684					    pmb_buf, size);
3685
3686		/* result for successful */
3687		bsg_reply->result = 0;
3688
3689		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3690				"2937 SLI_CONFIG ext-buffer mailbox command "
3691				"(x%x/x%x) complete bsg job done, bsize:%d\n",
3692				phba->mbox_ext_buf_ctx.nembType,
3693				phba->mbox_ext_buf_ctx.mboxType, size);
3694		lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3695					phba->mbox_ext_buf_ctx.nembType,
3696					phba->mbox_ext_buf_ctx.mboxType,
3697					dma_ebuf, sta_pos_addr,
3698					phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3699	} else {
3700		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3701				"2938 SLI_CONFIG ext-buffer mailbox "
3702				"command (x%x/x%x) failure, rc:x%x\n",
3703				phba->mbox_ext_buf_ctx.nembType,
3704				phba->mbox_ext_buf_ctx.mboxType, rc);
3705	}
3706
3707
3708	/* state change */
3709	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3710	kfree(dd_data);
3711	return job;
3712}
3713
3714/**
3715 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3716 * @phba: Pointer to HBA context object.
3717 * @pmboxq: Pointer to mailbox command.
3718 *
3719 * This is completion handler function for mailbox read commands with multiple
3720 * external buffers.
3721 **/
3722static void
3723lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3724{
3725	struct bsg_job *job;
3726	struct fc_bsg_reply *bsg_reply;
3727
3728	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3729
3730	/* handle the BSG job with mailbox command */
3731	if (!job)
3732		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3733
3734	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3735			"2939 SLI_CONFIG ext-buffer rd mailbox command "
3736			"complete, ctxState:x%x, mbxStatus:x%x\n",
3737			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3738
3739	if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3740		lpfc_bsg_mbox_ext_session_reset(phba);
3741
3742	/* free base driver mailbox structure memory */
3743	mempool_free(pmboxq, phba->mbox_mem_pool);
3744
3745	/* if the job is still active, call job done */
3746	if (job) {
3747		bsg_reply = job->reply;
3748		bsg_job_done(job, bsg_reply->result,
3749			       bsg_reply->reply_payload_rcv_len);
3750	}
3751	return;
3752}
3753
3754/**
3755 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3756 * @phba: Pointer to HBA context object.
3757 * @pmboxq: Pointer to mailbox command.
3758 *
3759 * This is completion handler function for mailbox write commands with multiple
3760 * external buffers.
3761 **/
3762static void
3763lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3764{
3765	struct bsg_job *job;
3766	struct fc_bsg_reply *bsg_reply;
3767
3768	job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3769
3770	/* handle the BSG job with the mailbox command */
3771	if (!job)
3772		pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3773
3774	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3775			"2940 SLI_CONFIG ext-buffer wr mailbox command "
3776			"complete, ctxState:x%x, mbxStatus:x%x\n",
3777			phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3778
3779	/* free all memory, including dma buffers */
3780	mempool_free(pmboxq, phba->mbox_mem_pool);
3781	lpfc_bsg_mbox_ext_session_reset(phba);
3782
3783	/* if the job is still active, call job done */
3784	if (job) {
3785		bsg_reply = job->reply;
3786		bsg_job_done(job, bsg_reply->result,
3787			       bsg_reply->reply_payload_rcv_len);
3788	}
3789
3790	return;
3791}
3792
3793static void
3794lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3795				uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3796				struct lpfc_dmabuf *ext_dmabuf)
3797{
3798	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3799
3800	/* pointer to the start of mailbox command */
3801	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3802
3803	if (nemb_tp == nemb_mse) {
3804		if (index == 0) {
3805			sli_cfg_mbx->un.sli_config_emb0_subsys.
3806				mse[index].pa_hi =
3807				putPaddrHigh(mbx_dmabuf->phys +
3808					     sizeof(MAILBOX_t));
3809			sli_cfg_mbx->un.sli_config_emb0_subsys.
3810				mse[index].pa_lo =
3811				putPaddrLow(mbx_dmabuf->phys +
3812					    sizeof(MAILBOX_t));
3813			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3814					"2943 SLI_CONFIG(mse)[%d], "
3815					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3816					index,
3817					sli_cfg_mbx->un.sli_config_emb0_subsys.
3818					mse[index].buf_len,
3819					sli_cfg_mbx->un.sli_config_emb0_subsys.
3820					mse[index].pa_hi,
3821					sli_cfg_mbx->un.sli_config_emb0_subsys.
3822					mse[index].pa_lo);
3823		} else {
3824			sli_cfg_mbx->un.sli_config_emb0_subsys.
3825				mse[index].pa_hi =
3826				putPaddrHigh(ext_dmabuf->phys);
3827			sli_cfg_mbx->un.sli_config_emb0_subsys.
3828				mse[index].pa_lo =
3829				putPaddrLow(ext_dmabuf->phys);
3830			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3831					"2944 SLI_CONFIG(mse)[%d], "
3832					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3833					index,
3834					sli_cfg_mbx->un.sli_config_emb0_subsys.
3835					mse[index].buf_len,
3836					sli_cfg_mbx->un.sli_config_emb0_subsys.
3837					mse[index].pa_hi,
3838					sli_cfg_mbx->un.sli_config_emb0_subsys.
3839					mse[index].pa_lo);
3840		}
3841	} else {
3842		if (index == 0) {
3843			sli_cfg_mbx->un.sli_config_emb1_subsys.
3844				hbd[index].pa_hi =
3845				putPaddrHigh(mbx_dmabuf->phys +
3846					     sizeof(MAILBOX_t));
3847			sli_cfg_mbx->un.sli_config_emb1_subsys.
3848				hbd[index].pa_lo =
3849				putPaddrLow(mbx_dmabuf->phys +
3850					    sizeof(MAILBOX_t));
3851			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3852					"3007 SLI_CONFIG(hbd)[%d], "
3853					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3854				index,
3855				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3856				&sli_cfg_mbx->un.
3857				sli_config_emb1_subsys.hbd[index]),
3858				sli_cfg_mbx->un.sli_config_emb1_subsys.
3859				hbd[index].pa_hi,
3860				sli_cfg_mbx->un.sli_config_emb1_subsys.
3861				hbd[index].pa_lo);
3862
3863		} else {
3864			sli_cfg_mbx->un.sli_config_emb1_subsys.
3865				hbd[index].pa_hi =
3866				putPaddrHigh(ext_dmabuf->phys);
3867			sli_cfg_mbx->un.sli_config_emb1_subsys.
3868				hbd[index].pa_lo =
3869				putPaddrLow(ext_dmabuf->phys);
3870			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3871					"3008 SLI_CONFIG(hbd)[%d], "
3872					"bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3873				index,
3874				bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3875				&sli_cfg_mbx->un.
3876				sli_config_emb1_subsys.hbd[index]),
3877				sli_cfg_mbx->un.sli_config_emb1_subsys.
3878				hbd[index].pa_hi,
3879				sli_cfg_mbx->un.sli_config_emb1_subsys.
3880				hbd[index].pa_lo);
3881		}
3882	}
3883	return;
3884}
3885
3886/**
3887 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3888 * @phba: Pointer to HBA context object.
3889 * @mb: Pointer to a BSG mailbox object.
3890 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3891 * @dmabuff: Pointer to a DMA buffer descriptor.
3892 *
3893 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3894 * non-embedded external bufffers.
3895 **/
3896static int
3897lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3898			      enum nemb_type nemb_tp,
3899			      struct lpfc_dmabuf *dmabuf)
3900{
3901	struct fc_bsg_request *bsg_request = job->request;
3902	struct lpfc_sli_config_mbox *sli_cfg_mbx;
3903	struct dfc_mbox_req *mbox_req;
3904	struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3905	uint32_t ext_buf_cnt, ext_buf_index;
3906	struct lpfc_dmabuf *ext_dmabuf = NULL;
3907	struct bsg_job_data *dd_data = NULL;
3908	LPFC_MBOXQ_t *pmboxq = NULL;
3909	MAILBOX_t *pmb;
3910	uint8_t *pmbx;
3911	int rc, i;
3912
3913	mbox_req =
3914	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3915
3916	/* pointer to the start of mailbox command */
3917	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3918
3919	if (nemb_tp == nemb_mse) {
3920		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3921			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3922		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3923			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3924					"2945 Handled SLI_CONFIG(mse) rd, "
3925					"ext_buf_cnt(%d) out of range(%d)\n",
3926					ext_buf_cnt,
3927					LPFC_MBX_SLI_CONFIG_MAX_MSE);
3928			rc = -ERANGE;
3929			goto job_error;
3930		}
3931		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3932				"2941 Handled SLI_CONFIG(mse) rd, "
3933				"ext_buf_cnt:%d\n", ext_buf_cnt);
3934	} else {
3935		/* sanity check on interface type for support */
3936		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
3937		    LPFC_SLI_INTF_IF_TYPE_2) {
3938			rc = -ENODEV;
3939			goto job_error;
3940		}
3941		/* nemb_tp == nemb_hbd */
3942		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3943		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3944			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3945					"2946 Handled SLI_CONFIG(hbd) rd, "
3946					"ext_buf_cnt(%d) out of range(%d)\n",
3947					ext_buf_cnt,
3948					LPFC_MBX_SLI_CONFIG_MAX_HBD);
3949			rc = -ERANGE;
3950			goto job_error;
3951		}
3952		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3953				"2942 Handled SLI_CONFIG(hbd) rd, "
3954				"ext_buf_cnt:%d\n", ext_buf_cnt);
3955	}
3956
3957	/* before dma descriptor setup */
3958	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3959					sta_pre_addr, dmabuf, ext_buf_cnt);
3960
3961	/* reject non-embedded mailbox command with none external buffer */
3962	if (ext_buf_cnt == 0) {
3963		rc = -EPERM;
3964		goto job_error;
3965	} else if (ext_buf_cnt > 1) {
3966		/* additional external read buffers */
3967		for (i = 1; i < ext_buf_cnt; i++) {
3968			ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3969			if (!ext_dmabuf) {
3970				rc = -ENOMEM;
3971				goto job_error;
3972			}
3973			list_add_tail(&ext_dmabuf->list,
3974				      &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3975		}
3976	}
3977
3978	/* bsg tracking structure */
3979	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3980	if (!dd_data) {
3981		rc = -ENOMEM;
3982		goto job_error;
3983	}
3984
3985	/* mailbox command structure for base driver */
3986	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3987	if (!pmboxq) {
3988		rc = -ENOMEM;
3989		goto job_error;
3990	}
3991	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3992
3993	/* for the first external buffer */
3994	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3995
3996	/* for the rest of external buffer descriptors if any */
3997	if (ext_buf_cnt > 1) {
3998		ext_buf_index = 1;
3999		list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
4000				&phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
4001			lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
4002						ext_buf_index, dmabuf,
4003						curr_dmabuf);
4004			ext_buf_index++;
4005		}
4006	}
4007
4008	/* after dma descriptor setup */
4009	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
4010					sta_pos_addr, dmabuf, ext_buf_cnt);
4011
4012	/* construct base driver mbox command */
4013	pmb = &pmboxq->u.mb;
4014	pmbx = (uint8_t *)dmabuf->virt;
4015	memcpy(pmb, pmbx, sizeof(*pmb));
4016	pmb->mbxOwner = OWN_HOST;
4017	pmboxq->vport = phba->pport;
4018
4019	/* multi-buffer handling context */
4020	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4021	phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
4022	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4023	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4024	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4025	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4026
4027	/* callback for multi-buffer read mailbox command */
4028	pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
4029
4030	/* context fields to callback function */
4031	pmboxq->ctx_buf = dd_data;
4032	dd_data->type = TYPE_MBOX;
4033	dd_data->set_job = job;
4034	dd_data->context_un.mbox.pmboxq = pmboxq;
4035	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4036	job->dd_data = dd_data;
4037
4038	/* state change */
4039	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4040
4041	/*
4042	 * Non-embedded mailbox subcommand data gets byte swapped here because
4043	 * the lower level driver code only does the first 64 mailbox words.
4044	 */
4045	if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
4046	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
4047		(nemb_tp == nemb_mse))
4048		lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
4049			&pmbx[sizeof(MAILBOX_t)],
4050				sli_cfg_mbx->un.sli_config_emb0_subsys.
4051					mse[0].buf_len);
4052
4053	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4054	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4055		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4056				"2947 Issued SLI_CONFIG ext-buffer "
4057				"mailbox command, rc:x%x\n", rc);
4058		return SLI_CONFIG_HANDLED;
4059	}
4060	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4061			"2948 Failed to issue SLI_CONFIG ext-buffer "
4062			"mailbox command, rc:x%x\n", rc);
4063	rc = -EPIPE;
4064
4065job_error:
4066	if (pmboxq)
4067		mempool_free(pmboxq, phba->mbox_mem_pool);
4068	lpfc_bsg_dma_page_list_free(phba,
4069				    &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4070	kfree(dd_data);
4071	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4072	return rc;
4073}
4074
4075/**
4076 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4077 * @phba: Pointer to HBA context object.
4078 * @mb: Pointer to a BSG mailbox object.
4079 * @dmabuff: Pointer to a DMA buffer descriptor.
4080 *
4081 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4082 * non-embedded external bufffers.
4083 **/
4084static int
4085lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4086			       enum nemb_type nemb_tp,
4087			       struct lpfc_dmabuf *dmabuf)
4088{
4089	struct fc_bsg_request *bsg_request = job->request;
4090	struct fc_bsg_reply *bsg_reply = job->reply;
4091	struct dfc_mbox_req *mbox_req;
4092	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4093	uint32_t ext_buf_cnt;
4094	struct bsg_job_data *dd_data = NULL;
4095	LPFC_MBOXQ_t *pmboxq = NULL;
4096	MAILBOX_t *pmb;
4097	uint8_t *mbx;
4098	int rc = SLI_CONFIG_NOT_HANDLED, i;
4099
4100	mbox_req =
4101	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4102
4103	/* pointer to the start of mailbox command */
4104	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4105
4106	if (nemb_tp == nemb_mse) {
4107		ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4108			&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4109		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4110			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4111					"2953 Failed SLI_CONFIG(mse) wr, "
4112					"ext_buf_cnt(%d) out of range(%d)\n",
4113					ext_buf_cnt,
4114					LPFC_MBX_SLI_CONFIG_MAX_MSE);
4115			return -ERANGE;
4116		}
4117		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4118				"2949 Handled SLI_CONFIG(mse) wr, "
4119				"ext_buf_cnt:%d\n", ext_buf_cnt);
4120	} else {
4121		/* sanity check on interface type for support */
4122		if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
4123		    LPFC_SLI_INTF_IF_TYPE_2)
4124			return -ENODEV;
4125		/* nemb_tp == nemb_hbd */
4126		ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4127		if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4128			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4129					"2954 Failed SLI_CONFIG(hbd) wr, "
4130					"ext_buf_cnt(%d) out of range(%d)\n",
4131					ext_buf_cnt,
4132					LPFC_MBX_SLI_CONFIG_MAX_HBD);
4133			return -ERANGE;
4134		}
4135		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4136				"2950 Handled SLI_CONFIG(hbd) wr, "
4137				"ext_buf_cnt:%d\n", ext_buf_cnt);
4138	}
4139
4140	/* before dma buffer descriptor setup */
4141	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4142					sta_pre_addr, dmabuf, ext_buf_cnt);
4143
4144	if (ext_buf_cnt == 0)
4145		return -EPERM;
4146
4147	/* for the first external buffer */
4148	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4149
4150	/* after dma descriptor setup */
4151	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4152					sta_pos_addr, dmabuf, ext_buf_cnt);
4153
4154	/* log for looking forward */
4155	for (i = 1; i < ext_buf_cnt; i++) {
4156		if (nemb_tp == nemb_mse)
4157			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4158				"2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4159				i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4160				mse[i].buf_len);
4161		else
4162			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4163				"2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4164				i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4165				&sli_cfg_mbx->un.sli_config_emb1_subsys.
4166				hbd[i]));
4167	}
4168
4169	/* multi-buffer handling context */
4170	phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4171	phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4172	phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4173	phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4174	phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4175	phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4176
4177	if (ext_buf_cnt == 1) {
4178		/* bsg tracking structure */
4179		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4180		if (!dd_data) {
4181			rc = -ENOMEM;
4182			goto job_error;
4183		}
4184
4185		/* mailbox command structure for base driver */
4186		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4187		if (!pmboxq) {
4188			rc = -ENOMEM;
4189			goto job_error;
4190		}
4191		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4192		pmb = &pmboxq->u.mb;
4193		mbx = (uint8_t *)dmabuf->virt;
4194		memcpy(pmb, mbx, sizeof(*pmb));
4195		pmb->mbxOwner = OWN_HOST;
4196		pmboxq->vport = phba->pport;
4197
4198		/* callback for multi-buffer read mailbox command */
4199		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4200
4201		/* context fields to callback function */
4202		pmboxq->ctx_buf = dd_data;
4203		dd_data->type = TYPE_MBOX;
4204		dd_data->set_job = job;
4205		dd_data->context_un.mbox.pmboxq = pmboxq;
4206		dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4207		job->dd_data = dd_data;
4208
4209		/* state change */
4210
4211		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4212		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4213		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4214			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4215					"2955 Issued SLI_CONFIG ext-buffer "
4216					"mailbox command, rc:x%x\n", rc);
4217			return SLI_CONFIG_HANDLED;
4218		}
4219		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4220				"2956 Failed to issue SLI_CONFIG ext-buffer "
4221				"mailbox command, rc:x%x\n", rc);
4222		rc = -EPIPE;
4223		goto job_error;
4224	}
4225
4226	/* wait for additoinal external buffers */
4227
4228	bsg_reply->result = 0;
4229	bsg_job_done(job, bsg_reply->result,
4230		       bsg_reply->reply_payload_rcv_len);
4231	return SLI_CONFIG_HANDLED;
4232
4233job_error:
4234	if (pmboxq)
4235		mempool_free(pmboxq, phba->mbox_mem_pool);
4236	kfree(dd_data);
4237
4238	return rc;
4239}
4240
4241/**
4242 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4243 * @phba: Pointer to HBA context object.
4244 * @mb: Pointer to a BSG mailbox object.
4245 * @dmabuff: Pointer to a DMA buffer descriptor.
4246 *
4247 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4248 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4249 * with embedded sussystem 0x1 and opcodes with external HBDs.
4250 **/
4251static int
4252lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4253			     struct lpfc_dmabuf *dmabuf)
4254{
4255	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4256	uint32_t subsys;
4257	uint32_t opcode;
4258	int rc = SLI_CONFIG_NOT_HANDLED;
4259
4260	/* state change on new multi-buffer pass-through mailbox command */
4261	phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4262
4263	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4264
4265	if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4266	    &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4267		subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4268				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4269		opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4270				    &sli_cfg_mbx->un.sli_config_emb0_subsys);
4271		if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4272			switch (opcode) {
4273			case FCOE_OPCODE_READ_FCF:
4274			case FCOE_OPCODE_GET_DPORT_RESULTS:
4275				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4276						"2957 Handled SLI_CONFIG "
4277						"subsys_fcoe, opcode:x%x\n",
4278						opcode);
4279				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4280							nemb_mse, dmabuf);
4281				break;
4282			case FCOE_OPCODE_ADD_FCF:
4283			case FCOE_OPCODE_SET_DPORT_MODE:
4284			case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4285				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4286						"2958 Handled SLI_CONFIG "
4287						"subsys_fcoe, opcode:x%x\n",
4288						opcode);
4289				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4290							nemb_mse, dmabuf);
4291				break;
4292			default:
4293				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4294						"2959 Reject SLI_CONFIG "
4295						"subsys_fcoe, opcode:x%x\n",
4296						opcode);
4297				rc = -EPERM;
4298				break;
4299			}
4300		} else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4301			switch (opcode) {
4302			case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4303			case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4304			case COMN_OPCODE_GET_PROFILE_CONFIG:
4305			case COMN_OPCODE_SET_FEATURES:
4306				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4307						"3106 Handled SLI_CONFIG "
4308						"subsys_comn, opcode:x%x\n",
4309						opcode);
4310				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4311							nemb_mse, dmabuf);
4312				break;
4313			default:
4314				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4315						"3107 Reject SLI_CONFIG "
4316						"subsys_comn, opcode:x%x\n",
4317						opcode);
4318				rc = -EPERM;
4319				break;
4320			}
4321		} else {
4322			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4323					"2977 Reject SLI_CONFIG "
4324					"subsys:x%d, opcode:x%x\n",
4325					subsys, opcode);
4326			rc = -EPERM;
4327		}
4328	} else {
4329		subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4330				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4331		opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4332				    &sli_cfg_mbx->un.sli_config_emb1_subsys);
4333		if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4334			switch (opcode) {
4335			case COMN_OPCODE_READ_OBJECT:
4336			case COMN_OPCODE_READ_OBJECT_LIST:
4337				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4338						"2960 Handled SLI_CONFIG "
4339						"subsys_comn, opcode:x%x\n",
4340						opcode);
4341				rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4342							nemb_hbd, dmabuf);
4343				break;
4344			case COMN_OPCODE_WRITE_OBJECT:
4345				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4346						"2961 Handled SLI_CONFIG "
4347						"subsys_comn, opcode:x%x\n",
4348						opcode);
4349				rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4350							nemb_hbd, dmabuf);
4351				break;
4352			default:
4353				lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4354						"2962 Not handled SLI_CONFIG "
4355						"subsys_comn, opcode:x%x\n",
4356						opcode);
4357				rc = SLI_CONFIG_NOT_HANDLED;
4358				break;
4359			}
4360		} else {
4361			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4362					"2978 Not handled SLI_CONFIG "
4363					"subsys:x%d, opcode:x%x\n",
4364					subsys, opcode);
4365			rc = SLI_CONFIG_NOT_HANDLED;
4366		}
4367	}
4368
4369	/* state reset on not handled new multi-buffer mailbox command */
4370	if (rc != SLI_CONFIG_HANDLED)
4371		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4372
4373	return rc;
4374}
4375
4376/**
4377 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4378 * @phba: Pointer to HBA context object.
4379 *
4380 * This routine is for requesting to abort a pass-through mailbox command with
4381 * multiple external buffers due to error condition.
4382 **/
4383static void
4384lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4385{
4386	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4387		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4388	else
4389		lpfc_bsg_mbox_ext_session_reset(phba);
4390	return;
4391}
4392
4393/**
4394 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4395 * @phba: Pointer to HBA context object.
4396 * @dmabuf: Pointer to a DMA buffer descriptor.
4397 *
4398 * This routine extracts the next mailbox read external buffer back to
4399 * user space through BSG.
4400 **/
4401static int
4402lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4403{
4404	struct fc_bsg_reply *bsg_reply = job->reply;
4405	struct lpfc_sli_config_mbox *sli_cfg_mbx;
4406	struct lpfc_dmabuf *dmabuf;
4407	uint8_t *pbuf;
4408	uint32_t size;
4409	uint32_t index;
4410
4411	index = phba->mbox_ext_buf_ctx.seqNum;
4412	phba->mbox_ext_buf_ctx.seqNum++;
4413
4414	sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4415			phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4416
4417	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4418		size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4419			&sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4420		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4421				"2963 SLI_CONFIG (mse) ext-buffer rd get "
4422				"buffer[%d], size:%d\n", index, size);
4423	} else {
4424		size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4425			&sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4426		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4427				"2964 SLI_CONFIG (hbd) ext-buffer rd get "
4428				"buffer[%d], size:%d\n", index, size);
4429	}
4430	if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4431		return -EPIPE;
4432	dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4433				  struct lpfc_dmabuf, list);
4434	list_del_init(&dmabuf->list);
4435
4436	/* after dma buffer descriptor setup */
4437	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4438					mbox_rd, dma_ebuf, sta_pos_addr,
4439					dmabuf, index);
4440
4441	pbuf = (uint8_t *)dmabuf->virt;
4442	bsg_reply->reply_payload_rcv_len =
4443		sg_copy_from_buffer(job->reply_payload.sg_list,
4444				    job->reply_payload.sg_cnt,
4445				    pbuf, size);
4446
4447	lpfc_bsg_dma_page_free(phba, dmabuf);
4448
4449	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4450		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4451				"2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4452				"command session done\n");
4453		lpfc_bsg_mbox_ext_session_reset(phba);
4454	}
4455
4456	bsg_reply->result = 0;
4457	bsg_job_done(job, bsg_reply->result,
4458		       bsg_reply->reply_payload_rcv_len);
4459
4460	return SLI_CONFIG_HANDLED;
4461}
4462
4463/**
4464 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4465 * @phba: Pointer to HBA context object.
4466 * @dmabuf: Pointer to a DMA buffer descriptor.
4467 *
4468 * This routine sets up the next mailbox read external buffer obtained
4469 * from user space through BSG.
4470 **/
4471static int
4472lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4473			struct lpfc_dmabuf *dmabuf)
4474{
4475	struct fc_bsg_reply *bsg_reply = job->reply;
4476	struct bsg_job_data *dd_data = NULL;
4477	LPFC_MBOXQ_t *pmboxq = NULL;
4478	MAILBOX_t *pmb;
4479	enum nemb_type nemb_tp;
4480	uint8_t *pbuf;
4481	uint32_t size;
4482	uint32_t index;
4483	int rc;
4484
4485	index = phba->mbox_ext_buf_ctx.seqNum;
4486	phba->mbox_ext_buf_ctx.seqNum++;
4487	nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4488
4489	pbuf = (uint8_t *)dmabuf->virt;
4490	size = job->request_payload.payload_len;
4491	sg_copy_to_buffer(job->request_payload.sg_list,
4492			  job->request_payload.sg_cnt,
4493			  pbuf, size);
4494
4495	if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4496		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4497				"2966 SLI_CONFIG (mse) ext-buffer wr set "
4498				"buffer[%d], size:%d\n",
4499				phba->mbox_ext_buf_ctx.seqNum, size);
4500
4501	} else {
4502		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4503				"2967 SLI_CONFIG (hbd) ext-buffer wr set "
4504				"buffer[%d], size:%d\n",
4505				phba->mbox_ext_buf_ctx.seqNum, size);
4506
4507	}
4508
4509	/* set up external buffer descriptor and add to external buffer list */
4510	lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4511					phba->mbox_ext_buf_ctx.mbx_dmabuf,
4512					dmabuf);
4513	list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4514
4515	/* after write dma buffer */
4516	lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4517					mbox_wr, dma_ebuf, sta_pos_addr,
4518					dmabuf, index);
4519
4520	if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4521		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4522				"2968 SLI_CONFIG ext-buffer wr all %d "
4523				"ebuffers received\n",
4524				phba->mbox_ext_buf_ctx.numBuf);
4525
4526		dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4527		if (!dd_data) {
4528			rc = -ENOMEM;
4529			goto job_error;
4530		}
4531
4532		/* mailbox command structure for base driver */
4533		pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4534		if (!pmboxq) {
4535			rc = -ENOMEM;
4536			goto job_error;
4537		}
4538		memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4539		pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4540		pmb = &pmboxq->u.mb;
4541		memcpy(pmb, pbuf, sizeof(*pmb));
4542		pmb->mbxOwner = OWN_HOST;
4543		pmboxq->vport = phba->pport;
4544
4545		/* callback for multi-buffer write mailbox command */
4546		pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4547
4548		/* context fields to callback function */
4549		pmboxq->ctx_buf = dd_data;
4550		dd_data->type = TYPE_MBOX;
4551		dd_data->set_job = job;
4552		dd_data->context_un.mbox.pmboxq = pmboxq;
4553		dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4554		job->dd_data = dd_data;
4555
4556		/* state change */
4557		phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4558
4559		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4560		if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4561			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4562					"2969 Issued SLI_CONFIG ext-buffer "
4563					"mailbox command, rc:x%x\n", rc);
4564			return SLI_CONFIG_HANDLED;
4565		}
4566		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4567				"2970 Failed to issue SLI_CONFIG ext-buffer "
4568				"mailbox command, rc:x%x\n", rc);
4569		rc = -EPIPE;
4570		goto job_error;
4571	}
4572
4573	/* wait for additoinal external buffers */
4574	bsg_reply->result = 0;
4575	bsg_job_done(job, bsg_reply->result,
4576		       bsg_reply->reply_payload_rcv_len);
4577	return SLI_CONFIG_HANDLED;
4578
4579job_error:
4580	if (pmboxq)
4581		mempool_free(pmboxq, phba->mbox_mem_pool);
4582	lpfc_bsg_dma_page_free(phba, dmabuf);
4583	kfree(dd_data);
4584
4585	return rc;
4586}
4587
4588/**
4589 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4590 * @phba: Pointer to HBA context object.
4591 * @mb: Pointer to a BSG mailbox object.
4592 * @dmabuff: Pointer to a DMA buffer descriptor.
4593 *
4594 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4595 * command with multiple non-embedded external buffers.
4596 **/
4597static int
4598lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4599			     struct lpfc_dmabuf *dmabuf)
4600{
4601	int rc;
4602
4603	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4604			"2971 SLI_CONFIG buffer (type:x%x)\n",
4605			phba->mbox_ext_buf_ctx.mboxType);
4606
4607	if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4608		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4609			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4610					"2972 SLI_CONFIG rd buffer state "
4611					"mismatch:x%x\n",
4612					phba->mbox_ext_buf_ctx.state);
4613			lpfc_bsg_mbox_ext_abort(phba);
4614			return -EPIPE;
4615		}
4616		rc = lpfc_bsg_read_ebuf_get(phba, job);
4617		if (rc == SLI_CONFIG_HANDLED)
4618			lpfc_bsg_dma_page_free(phba, dmabuf);
4619	} else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4620		if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4621			lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4622					"2973 SLI_CONFIG wr buffer state "
4623					"mismatch:x%x\n",
4624					phba->mbox_ext_buf_ctx.state);
4625			lpfc_bsg_mbox_ext_abort(phba);
4626			return -EPIPE;
4627		}
4628		rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4629	}
4630	return rc;
4631}
4632
4633/**
4634 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4635 * @phba: Pointer to HBA context object.
4636 * @mb: Pointer to a BSG mailbox object.
4637 * @dmabuff: Pointer to a DMA buffer descriptor.
4638 *
4639 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4640 * (0x9B) mailbox commands and external buffers.
4641 **/
4642static int
4643lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4644			    struct lpfc_dmabuf *dmabuf)
4645{
4646	struct fc_bsg_request *bsg_request = job->request;
4647	struct dfc_mbox_req *mbox_req;
4648	int rc = SLI_CONFIG_NOT_HANDLED;
4649
4650	mbox_req =
4651	   (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4652
4653	/* mbox command with/without single external buffer */
4654	if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4655		return rc;
4656
4657	/* mbox command and first external buffer */
4658	if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4659		if (mbox_req->extSeqNum == 1) {
4660			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4661					"2974 SLI_CONFIG mailbox: tag:%d, "
4662					"seq:%d\n", mbox_req->extMboxTag,
4663					mbox_req->extSeqNum);
4664			rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4665			return rc;
4666		} else
4667			goto sli_cfg_ext_error;
4668	}
4669
4670	/*
4671	 * handle additional external buffers
4672	 */
4673
4674	/* check broken pipe conditions */
4675	if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4676		goto sli_cfg_ext_error;
4677	if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4678		goto sli_cfg_ext_error;
4679	if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4680		goto sli_cfg_ext_error;
4681
4682	lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4683			"2975 SLI_CONFIG mailbox external buffer: "
4684			"extSta:x%x, tag:%d, seq:%d\n",
4685			phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4686			mbox_req->extSeqNum);
4687	rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4688	return rc;
4689
4690sli_cfg_ext_error:
4691	/* all other cases, broken pipe */
4692	lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4693			"2976 SLI_CONFIG mailbox broken pipe: "
4694			"ctxSta:x%x, ctxNumBuf:%d "
4695			"ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4696			phba->mbox_ext_buf_ctx.state,
4697			phba->mbox_ext_buf_ctx.numBuf,
4698			phba->mbox_ext_buf_ctx.mbxTag,
4699			phba->mbox_ext_buf_ctx.seqNum,
4700			mbox_req->extMboxTag, mbox_req->extSeqNum);
4701
4702	lpfc_bsg_mbox_ext_session_reset(phba);
4703
4704	return -EPIPE;
4705}
4706
4707/**
4708 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4709 * @phba: Pointer to HBA context object.
4710 * @mb: Pointer to a mailbox object.
4711 * @vport: Pointer to a vport object.
4712 *
4713 * Allocate a tracking object, mailbox command memory, get a mailbox
4714 * from the mailbox pool, copy the caller mailbox command.
4715 *
4716 * If offline and the sli is active we need to poll for the command (port is
4717 * being reset) and com-plete the job, otherwise issue the mailbox command and
4718 * let our completion handler finish the command.
4719 **/
4720static int
4721lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4722	struct lpfc_vport *vport)
4723{
4724	struct fc_bsg_request *bsg_request = job->request;
4725	struct fc_bsg_reply *bsg_reply = job->reply;
4726	LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4727	MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4728	/* a 4k buffer to hold the mb and extended data from/to the bsg */
4729	uint8_t *pmbx = NULL;
4730	struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4731	struct lpfc_dmabuf *dmabuf = NULL;
4732	struct dfc_mbox_req *mbox_req;
4733	struct READ_EVENT_LOG_VAR *rdEventLog;
4734	uint32_t transmit_length, receive_length, mode;
4735	struct lpfc_mbx_sli4_config *sli4_config;
4736	struct lpfc_mbx_nembed_cmd *nembed_sge;
4737	struct ulp_bde64 *bde;
4738	uint8_t *ext = NULL;
4739	int rc = 0;
4740	uint8_t *from;
4741	uint32_t size;
4742
4743	/* in case no data is transferred */
4744	bsg_reply->reply_payload_rcv_len = 0;
4745
4746	/* sanity check to protect driver */
4747	if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4748	    job->request_payload.payload_len > BSG_MBOX_SIZE) {
4749		rc = -ERANGE;
4750		goto job_done;
4751	}
4752
4753	/*
4754	 * Don't allow mailbox commands to be sent when blocked or when in
4755	 * the middle of discovery
4756	 */
4757	if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4758		rc = -EAGAIN;
4759		goto job_done;
4760	}
4761
4762	mbox_req =
4763	    (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4764
4765	/* check if requested extended data lengths are valid */
4766	if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4767	    (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4768		rc = -ERANGE;
4769		goto job_done;
4770	}
4771
4772	dmabuf = lpfc_bsg_dma_page_alloc(phba);
4773	if (!dmabuf || !dmabuf->virt) {
4774		rc = -ENOMEM;
4775		goto job_done;
4776	}
4777
4778	/* Get the mailbox command or external buffer from BSG */
4779	pmbx = (uint8_t *)dmabuf->virt;
4780	size = job->request_payload.payload_len;
4781	sg_copy_to_buffer(job->request_payload.sg_list,
4782			  job->request_payload.sg_cnt, pmbx, size);
4783
4784	/* Handle possible SLI_CONFIG with non-embedded payloads */
4785	if (phba->sli_rev == LPFC_SLI_REV4) {
4786		rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4787		if (rc == SLI_CONFIG_HANDLED)
4788			goto job_cont;
4789		if (rc)
4790			goto job_done;
4791		/* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4792	}
4793
4794	rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4795	if (rc != 0)
4796		goto job_done; /* must be negative */
4797
4798	/* allocate our bsg tracking structure */
4799	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4800	if (!dd_data) {
4801		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4802				"2727 Failed allocation of dd_data\n");
4803		rc = -ENOMEM;
4804		goto job_done;
4805	}
4806
4807	pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4808	if (!pmboxq) {
4809		rc = -ENOMEM;
4810		goto job_done;
4811	}
4812	memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4813
4814	pmb = &pmboxq->u.mb;
4815	memcpy(pmb, pmbx, sizeof(*pmb));
4816	pmb->mbxOwner = OWN_HOST;
4817	pmboxq->vport = vport;
4818
4819	/* If HBA encountered an error attention, allow only DUMP
4820	 * or RESTART mailbox commands until the HBA is restarted.
4821	 */
4822	if (phba->pport->stopped &&
4823	    pmb->mbxCommand != MBX_DUMP_MEMORY &&
4824	    pmb->mbxCommand != MBX_RESTART &&
4825	    pmb->mbxCommand != MBX_WRITE_VPARMS &&
4826	    pmb->mbxCommand != MBX_WRITE_WWN)
4827		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4828				"2797 mbox: Issued mailbox cmd "
4829				"0x%x while in stopped state.\n",
4830				pmb->mbxCommand);
4831
4832	/* extended mailbox commands will need an extended buffer */
4833	if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4834		from = pmbx;
4835		ext = from + sizeof(MAILBOX_t);
4836		pmboxq->ctx_buf = ext;
4837		pmboxq->in_ext_byte_len =
4838			mbox_req->inExtWLen * sizeof(uint32_t);
4839		pmboxq->out_ext_byte_len =
4840			mbox_req->outExtWLen * sizeof(uint32_t);
4841		pmboxq->mbox_offset_word = mbox_req->mbOffset;
4842	}
4843
4844	/* biu diag will need a kernel buffer to transfer the data
4845	 * allocate our own buffer and setup the mailbox command to
4846	 * use ours
4847	 */
4848	if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4849		transmit_length = pmb->un.varWords[1];
4850		receive_length = pmb->un.varWords[4];
4851		/* transmit length cannot be greater than receive length or
4852		 * mailbox extension size
4853		 */
4854		if ((transmit_length > receive_length) ||
4855			(transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4856			rc = -ERANGE;
4857			goto job_done;
4858		}
4859		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4860			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4861		pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4862			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4863
4864		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4865			putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4866			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4867		pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4868			putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4869			  + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4870	} else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4871		rdEventLog = &pmb->un.varRdEventLog;
4872		receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4873		mode = bf_get(lpfc_event_log, rdEventLog);
4874
4875		/* receive length cannot be greater than mailbox
4876		 * extension size
4877		 */
4878		if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4879			rc = -ERANGE;
4880			goto job_done;
4881		}
4882
4883		/* mode zero uses a bde like biu diags command */
4884		if (mode == 0) {
4885			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4886							+ sizeof(MAILBOX_t));
4887			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4888							+ sizeof(MAILBOX_t));
4889		}
4890	} else if (phba->sli_rev == LPFC_SLI_REV4) {
4891		/* Let type 4 (well known data) through because the data is
4892		 * returned in varwords[4-8]
4893		 * otherwise check the recieve length and fetch the buffer addr
4894		 */
4895		if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4896			(pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4897			/* rebuild the command for sli4 using our own buffers
4898			* like we do for biu diags
4899			*/
4900			receive_length = pmb->un.varWords[2];
4901			/* receive length cannot be greater than mailbox
4902			 * extension size
4903			 */
4904			if (receive_length == 0) {
4905				rc = -ERANGE;
4906				goto job_done;
4907			}
4908			pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4909						+ sizeof(MAILBOX_t));
4910			pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4911						+ sizeof(MAILBOX_t));
4912		} else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4913			pmb->un.varUpdateCfg.co) {
4914			bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4915
4916			/* bde size cannot be greater than mailbox ext size */
4917			if (bde->tus.f.bdeSize >
4918			    BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4919				rc = -ERANGE;
4920				goto job_done;
4921			}
4922			bde->addrHigh = putPaddrHigh(dmabuf->phys
4923						+ sizeof(MAILBOX_t));
4924			bde->addrLow = putPaddrLow(dmabuf->phys
4925						+ sizeof(MAILBOX_t));
4926		} else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4927			/* Handling non-embedded SLI_CONFIG mailbox command */
4928			sli4_config = &pmboxq->u.mqe.un.sli4_config;
4929			if (!bf_get(lpfc_mbox_hdr_emb,
4930			    &sli4_config->header.cfg_mhdr)) {
4931				/* rebuild the command for sli4 using our
4932				 * own buffers like we do for biu diags
4933				 */
4934				nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4935						&pmb->un.varWords[0];
4936				receive_length = nembed_sge->sge[0].length;
4937
4938				/* receive length cannot be greater than
4939				 * mailbox extension size
4940				 */
4941				if ((receive_length == 0) ||
4942				    (receive_length >
4943				     BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4944					rc = -ERANGE;
4945					goto job_done;
4946				}
4947
4948				nembed_sge->sge[0].pa_hi =
4949						putPaddrHigh(dmabuf->phys
4950						   + sizeof(MAILBOX_t));
4951				nembed_sge->sge[0].pa_lo =
4952						putPaddrLow(dmabuf->phys
4953						   + sizeof(MAILBOX_t));
4954			}
4955		}
4956	}
4957
4958	dd_data->context_un.mbox.dmabuffers = dmabuf;
4959
4960	/* setup wake call as IOCB callback */
4961	pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4962
4963	/* setup context field to pass wait_queue pointer to wake function */
4964	pmboxq->ctx_ndlp = dd_data;
4965	dd_data->type = TYPE_MBOX;
4966	dd_data->set_job = job;
4967	dd_data->context_un.mbox.pmboxq = pmboxq;
4968	dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4969	dd_data->context_un.mbox.ext = ext;
4970	dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4971	dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4972	dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4973	job->dd_data = dd_data;
4974
4975	if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4976	    (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4977		rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4978		if (rc != MBX_SUCCESS) {
4979			rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4980			goto job_done;
4981		}
4982
4983		/* job finished, copy the data */
4984		memcpy(pmbx, pmb, sizeof(*pmb));
4985		bsg_reply->reply_payload_rcv_len =
4986			sg_copy_from_buffer(job->reply_payload.sg_list,
4987					    job->reply_payload.sg_cnt,
4988					    pmbx, size);
4989		/* not waiting mbox already done */
4990		rc = 0;
4991		goto job_done;
4992	}
4993
4994	rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4995	if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4996		return 1; /* job started */
4997
4998job_done:
4999	/* common exit for error or job completed inline */
5000	if (pmboxq)
5001		mempool_free(pmboxq, phba->mbox_mem_pool);
5002	lpfc_bsg_dma_page_free(phba, dmabuf);
5003	kfree(dd_data);
5004
5005job_cont:
5006	return rc;
5007}
5008
5009/**
5010 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
5011 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
5012 **/
5013static int
5014lpfc_bsg_mbox_cmd(struct bsg_job *job)
5015{
5016	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5017	struct fc_bsg_request *bsg_request = job->request;
5018	struct fc_bsg_reply *bsg_reply = job->reply;
5019	struct lpfc_hba *phba = vport->phba;
5020	struct dfc_mbox_req *mbox_req;
5021	int rc = 0;
5022
5023	/* mix-and-match backward compatibility */
5024	bsg_reply->reply_payload_rcv_len = 0;
5025	if (job->request_len <
5026	    sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
5027		lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
5028				"2737 Mix-and-match backward compatibility "
5029				"between MBOX_REQ old size:%d and "
5030				"new request size:%d\n",
5031				(int)(job->request_len -
5032				      sizeof(struct fc_bsg_request)),
5033				(int)sizeof(struct dfc_mbox_req));
5034		mbox_req = (struct dfc_mbox_req *)
5035				bsg_request->rqst_data.h_vendor.vendor_cmd;
5036		mbox_req->extMboxTag = 0;
5037		mbox_req->extSeqNum = 0;
5038	}
5039
5040	rc = lpfc_bsg_issue_mbox(phba, job, vport);
5041
5042	if (rc == 0) {
5043		/* job done */
5044		bsg_reply->result = 0;
5045		job->dd_data = NULL;
5046		bsg_job_done(job, bsg_reply->result,
5047			       bsg_reply->reply_payload_rcv_len);
5048	} else if (rc == 1)
5049		/* job submitted, will complete later*/
5050		rc = 0; /* return zero, no error */
5051	else {
5052		/* some error occurred */
5053		bsg_reply->result = rc;
5054		job->dd_data = NULL;
5055	}
5056
5057	return rc;
5058}
5059
5060/**
5061 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
5062 * @phba: Pointer to HBA context object.
5063 * @cmdiocbq: Pointer to command iocb.
5064 * @rspiocbq: Pointer to response iocb.
5065 *
5066 * This function is the completion handler for iocbs issued using
5067 * lpfc_menlo_cmd function. This function is called by the
5068 * ring event handler function without any lock held. This function
5069 * can be called from both worker thread context and interrupt
5070 * context. This function also can be called from another thread which
5071 * cleans up the SLI layer objects.
5072 * This function copies the contents of the response iocb to the
5073 * response iocb memory object provided by the caller of
5074 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
5075 * sleeps for the iocb completion.
5076 **/
5077static void
5078lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
5079			struct lpfc_iocbq *cmdiocbq,
5080			struct lpfc_iocbq *rspiocbq)
5081{
5082	struct bsg_job_data *dd_data;
5083	struct bsg_job *job;
5084	struct fc_bsg_reply *bsg_reply;
5085	IOCB_t *rsp;
5086	struct lpfc_dmabuf *bmp, *cmp, *rmp;
5087	struct lpfc_bsg_menlo *menlo;
5088	unsigned long flags;
5089	struct menlo_response *menlo_resp;
5090	unsigned int rsp_size;
5091	int rc = 0;
5092
5093	dd_data = cmdiocbq->context1;
5094	cmp = cmdiocbq->context2;
5095	bmp = cmdiocbq->context3;
5096	menlo = &dd_data->context_un.menlo;
5097	rmp = menlo->rmp;
5098	rsp = &rspiocbq->iocb;
5099
5100	/* Determine if job has been aborted */
5101	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5102	job = dd_data->set_job;
5103	if (job) {
5104		bsg_reply = job->reply;
5105		/* Prevent timeout handling from trying to abort job  */
5106		job->dd_data = NULL;
5107	}
5108	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5109
5110	/* Copy the job data or set the failing status for the job */
5111
5112	if (job) {
5113		/* always return the xri, this would be used in the case
5114		 * of a menlo download to allow the data to be sent as a
5115		 * continuation of the exchange.
5116		 */
5117
5118		menlo_resp = (struct menlo_response *)
5119			bsg_reply->reply_data.vendor_reply.vendor_rsp;
5120		menlo_resp->xri = rsp->ulpContext;
5121		if (rsp->ulpStatus) {
5122			if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5123				switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5124				case IOERR_SEQUENCE_TIMEOUT:
5125					rc = -ETIMEDOUT;
5126					break;
5127				case IOERR_INVALID_RPI:
5128					rc = -EFAULT;
5129					break;
5130				default:
5131					rc = -EACCES;
5132					break;
5133				}
5134			} else {
5135				rc = -EACCES;
5136			}
5137		} else {
5138			rsp_size = rsp->un.genreq64.bdl.bdeSize;
5139			bsg_reply->reply_payload_rcv_len =
5140				lpfc_bsg_copy_data(rmp, &job->reply_payload,
5141						   rsp_size, 0);
5142		}
5143
5144	}
5145
5146	lpfc_sli_release_iocbq(phba, cmdiocbq);
5147	lpfc_free_bsg_buffers(phba, cmp);
5148	lpfc_free_bsg_buffers(phba, rmp);
5149	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5150	kfree(bmp);
5151	kfree(dd_data);
5152
5153	/* Complete the job if active */
5154
5155	if (job) {
5156		bsg_reply->result = rc;
5157		bsg_job_done(job, bsg_reply->result,
5158			       bsg_reply->reply_payload_rcv_len);
5159	}
5160
5161	return;
5162}
5163
5164/**
5165 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5166 * @job: fc_bsg_job to handle
5167 *
5168 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5169 * all the command completions will return the xri for the command.
5170 * For menlo data requests a gen request 64 CX is used to continue the exchange
5171 * supplied in the menlo request header xri field.
5172 **/
5173static int
5174lpfc_menlo_cmd(struct bsg_job *job)
5175{
5176	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5177	struct fc_bsg_request *bsg_request = job->request;
5178	struct fc_bsg_reply *bsg_reply = job->reply;
5179	struct lpfc_hba *phba = vport->phba;
5180	struct lpfc_iocbq *cmdiocbq;
5181	IOCB_t *cmd;
5182	int rc = 0;
5183	struct menlo_command *menlo_cmd;
5184	struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5185	int request_nseg;
5186	int reply_nseg;
5187	struct bsg_job_data *dd_data;
5188	struct ulp_bde64 *bpl = NULL;
5189
5190	/* in case no data is returned return just the return code */
5191	bsg_reply->reply_payload_rcv_len = 0;
5192
5193	if (job->request_len <
5194	    sizeof(struct fc_bsg_request) +
5195		sizeof(struct menlo_command)) {
5196		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5197				"2784 Received MENLO_CMD request below "
5198				"minimum size\n");
5199		rc = -ERANGE;
5200		goto no_dd_data;
5201	}
5202
5203	if (job->reply_len < sizeof(*bsg_reply) +
5204				sizeof(struct menlo_response)) {
5205		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5206				"2785 Received MENLO_CMD reply below "
5207				"minimum size\n");
5208		rc = -ERANGE;
5209		goto no_dd_data;
5210	}
5211
5212	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5213		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5214				"2786 Adapter does not support menlo "
5215				"commands\n");
5216		rc = -EPERM;
5217		goto no_dd_data;
5218	}
5219
5220	menlo_cmd = (struct menlo_command *)
5221		bsg_request->rqst_data.h_vendor.vendor_cmd;
5222
5223	/* allocate our bsg tracking structure */
5224	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5225	if (!dd_data) {
5226		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5227				"2787 Failed allocation of dd_data\n");
5228		rc = -ENOMEM;
5229		goto no_dd_data;
5230	}
5231
5232	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5233	if (!bmp) {
5234		rc = -ENOMEM;
5235		goto free_dd;
5236	}
5237
5238	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5239	if (!bmp->virt) {
5240		rc = -ENOMEM;
5241		goto free_bmp;
5242	}
5243
5244	INIT_LIST_HEAD(&bmp->list);
5245
5246	bpl = (struct ulp_bde64 *)bmp->virt;
5247	request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5248	cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5249				     1, bpl, &request_nseg);
5250	if (!cmp) {
5251		rc = -ENOMEM;
5252		goto free_bmp;
5253	}
5254	lpfc_bsg_copy_data(cmp, &job->request_payload,
5255			   job->request_payload.payload_len, 1);
5256
5257	bpl += request_nseg;
5258	reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5259	rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5260				     bpl, &reply_nseg);
5261	if (!rmp) {
5262		rc = -ENOMEM;
5263		goto free_cmp;
5264	}
5265
5266	cmdiocbq = lpfc_sli_get_iocbq(phba);
5267	if (!cmdiocbq) {
5268		rc = -ENOMEM;
5269		goto free_rmp;
5270	}
5271
5272	cmd = &cmdiocbq->iocb;
5273	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5274	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5275	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5276	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5277	cmd->un.genreq64.bdl.bdeSize =
5278	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5279	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5280	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5281	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5282	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5283	cmd->ulpBdeCount = 1;
5284	cmd->ulpClass = CLASS3;
5285	cmd->ulpOwner = OWN_CHIP;
5286	cmd->ulpLe = 1; /* Limited Edition */
5287	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5288	cmdiocbq->vport = phba->pport;
5289	/* We want the firmware to timeout before we do */
5290	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5291	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5292	cmdiocbq->context1 = dd_data;
5293	cmdiocbq->context2 = cmp;
5294	cmdiocbq->context3 = bmp;
5295	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5296		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5297		cmd->ulpPU = MENLO_PU; /* 3 */
5298		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5299		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5300	} else {
5301		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5302		cmd->ulpPU = 1;
5303		cmd->un.ulpWord[4] = 0;
5304		cmd->ulpContext = menlo_cmd->xri;
5305	}
5306
5307	dd_data->type = TYPE_MENLO;
5308	dd_data->set_job = job;
5309	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5310	dd_data->context_un.menlo.rmp = rmp;
5311	job->dd_data = dd_data;
5312
5313	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5314		MENLO_TIMEOUT - 5);
5315	if (rc == IOCB_SUCCESS)
5316		return 0; /* done for now */
5317
5318	lpfc_sli_release_iocbq(phba, cmdiocbq);
5319
5320free_rmp:
5321	lpfc_free_bsg_buffers(phba, rmp);
5322free_cmp:
5323	lpfc_free_bsg_buffers(phba, cmp);
5324free_bmp:
5325	if (bmp->virt)
5326		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5327	kfree(bmp);
5328free_dd:
5329	kfree(dd_data);
5330no_dd_data:
5331	/* make error code available to userspace */
5332	bsg_reply->result = rc;
5333	job->dd_data = NULL;
5334	return rc;
5335}
5336
5337static int
5338lpfc_forced_link_speed(struct bsg_job *job)
5339{
5340	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5341	struct lpfc_vport *vport = shost_priv(shost);
5342	struct lpfc_hba *phba = vport->phba;
5343	struct fc_bsg_reply *bsg_reply = job->reply;
5344	struct forced_link_speed_support_reply *forced_reply;
5345	int rc = 0;
5346
5347	if (job->request_len <
5348	    sizeof(struct fc_bsg_request) +
5349	    sizeof(struct get_forced_link_speed_support)) {
5350		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5351				"0048 Received FORCED_LINK_SPEED request "
5352				"below minimum size\n");
5353		rc = -EINVAL;
5354		goto job_error;
5355	}
5356
5357	forced_reply = (struct forced_link_speed_support_reply *)
5358		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5359
5360	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) {
5361		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5362				"0049 Received FORCED_LINK_SPEED reply below "
5363				"minimum size\n");
5364		rc = -EINVAL;
5365		goto job_error;
5366	}
5367
5368	forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5369				   ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5370				   : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5371job_error:
5372	bsg_reply->result = rc;
5373	if (rc == 0)
5374		bsg_job_done(job, bsg_reply->result,
5375			       bsg_reply->reply_payload_rcv_len);
5376	return rc;
5377}
5378
5379/**
5380 * lpfc_check_fwlog_support: Check FW log support on the adapter
5381 * @phba: Pointer to HBA context object.
5382 *
5383 * Check if FW Logging support by the adapter
5384 **/
5385int
5386lpfc_check_fwlog_support(struct lpfc_hba *phba)
5387{
5388	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5389
5390	ras_fwlog = &phba->ras_fwlog;
5391
5392	if (ras_fwlog->ras_hwsupport == false)
5393		return -EACCES;
5394	else if (ras_fwlog->ras_enabled == false)
5395		return -EPERM;
5396	else
5397		return 0;
5398}
5399
5400/**
5401 * lpfc_bsg_get_ras_config: Get RAS configuration settings
5402 * @job: fc_bsg_job to handle
5403 *
5404 * Get RAS configuration values set.
5405 **/
5406static int
5407lpfc_bsg_get_ras_config(struct bsg_job *job)
5408{
5409	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5410	struct lpfc_vport *vport = shost_priv(shost);
5411	struct fc_bsg_reply *bsg_reply = job->reply;
5412	struct lpfc_hba *phba = vport->phba;
5413	struct lpfc_bsg_get_ras_config_reply *ras_reply;
5414	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5415	int rc = 0;
5416
5417	if (job->request_len <
5418	    sizeof(struct fc_bsg_request) +
5419	    sizeof(struct lpfc_bsg_ras_req)) {
5420		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5421				"6192 FW_LOG request received "
5422				"below minimum size\n");
5423		rc = -EINVAL;
5424		goto ras_job_error;
5425	}
5426
5427	/* Check FW log status */
5428	rc = lpfc_check_fwlog_support(phba);
5429	if (rc)
5430		goto ras_job_error;
5431
5432	ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
5433		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5434
5435	/* Current logging state */
5436	spin_lock_irq(&phba->hbalock);
5437	if (ras_fwlog->state == ACTIVE)
5438		ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
5439	else
5440		ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
5441	spin_unlock_irq(&phba->hbalock);
5442
5443	ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
5444	ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
5445
5446ras_job_error:
5447	/* make error code available to userspace */
5448	bsg_reply->result = rc;
5449
5450	/* complete the job back to userspace */
5451	if (!rc)
5452		bsg_job_done(job, bsg_reply->result,
5453			     bsg_reply->reply_payload_rcv_len);
5454	return rc;
5455}
5456
5457/**
5458 * lpfc_bsg_set_ras_config: Set FW logging parameters
5459 * @job: fc_bsg_job to handle
5460 *
5461 * Set log-level parameters for FW-logging in host memory
5462 **/
5463static int
5464lpfc_bsg_set_ras_config(struct bsg_job *job)
5465{
5466	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5467	struct lpfc_vport *vport = shost_priv(shost);
5468	struct lpfc_hba *phba = vport->phba;
5469	struct lpfc_bsg_set_ras_config_req *ras_req;
5470	struct fc_bsg_request *bsg_request = job->request;
5471	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5472	struct fc_bsg_reply *bsg_reply = job->reply;
5473	uint8_t action = 0, log_level = 0;
5474	int rc = 0, action_status = 0;
5475
5476	if (job->request_len <
5477	    sizeof(struct fc_bsg_request) +
5478	    sizeof(struct lpfc_bsg_set_ras_config_req)) {
5479		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5480				"6182 Received RAS_LOG request "
5481				"below minimum size\n");
5482		rc = -EINVAL;
5483		goto ras_job_error;
5484	}
5485
5486	/* Check FW log status */
5487	rc = lpfc_check_fwlog_support(phba);
5488	if (rc)
5489		goto ras_job_error;
5490
5491	ras_req = (struct lpfc_bsg_set_ras_config_req *)
5492		bsg_request->rqst_data.h_vendor.vendor_cmd;
5493	action = ras_req->action;
5494	log_level = ras_req->log_level;
5495
5496	if (action == LPFC_RASACTION_STOP_LOGGING) {
5497		/* Check if already disabled */
5498		spin_lock_irq(&phba->hbalock);
5499		if (ras_fwlog->state != ACTIVE) {
5500			spin_unlock_irq(&phba->hbalock);
5501			rc = -ESRCH;
5502			goto ras_job_error;
5503		}
5504		spin_unlock_irq(&phba->hbalock);
5505
5506		/* Disable logging */
5507		lpfc_ras_stop_fwlog(phba);
5508	} else {
5509		/*action = LPFC_RASACTION_START_LOGGING*/
5510
5511		/* Even though FW-logging is active re-initialize
5512		 * FW-logging with new log-level. Return status
5513		 * "Logging already Running" to caller.
5514		 **/
5515		spin_lock_irq(&phba->hbalock);
5516		if (ras_fwlog->state != INACTIVE)
5517			action_status = -EINPROGRESS;
5518		spin_unlock_irq(&phba->hbalock);
5519
5520		/* Enable logging */
5521		rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
5522					      LPFC_RAS_ENABLE_LOGGING);
5523		if (rc) {
5524			rc = -EINVAL;
5525			goto ras_job_error;
5526		}
5527
5528		/* Check if FW-logging is re-initialized */
5529		if (action_status == -EINPROGRESS)
5530			rc = action_status;
5531	}
5532ras_job_error:
5533	/* make error code available to userspace */
5534	bsg_reply->result = rc;
5535
5536	/* complete the job back to userspace */
5537	if (!rc)
5538		bsg_job_done(job, bsg_reply->result,
5539			     bsg_reply->reply_payload_rcv_len);
5540
5541	return rc;
5542}
5543
5544/**
5545 * lpfc_bsg_get_ras_lwpd: Get log write position data
5546 * @job: fc_bsg_job to handle
5547 *
5548 * Get Offset/Wrap count of the log message written
5549 * in host memory
5550 **/
5551static int
5552lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
5553{
5554	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5555	struct lpfc_vport *vport = shost_priv(shost);
5556	struct lpfc_bsg_get_ras_lwpd *ras_reply;
5557	struct lpfc_hba *phba = vport->phba;
5558	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
5559	struct fc_bsg_reply *bsg_reply = job->reply;
5560	u32 *lwpd_ptr = NULL;
5561	int rc = 0;
5562
5563	rc = lpfc_check_fwlog_support(phba);
5564	if (rc)
5565		goto ras_job_error;
5566
5567	if (job->request_len <
5568	    sizeof(struct fc_bsg_request) +
5569	    sizeof(struct lpfc_bsg_ras_req)) {
5570		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5571				"6183 Received RAS_LOG request "
5572				"below minimum size\n");
5573		rc = -EINVAL;
5574		goto ras_job_error;
5575	}
5576
5577	ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
5578		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5579
5580	if (!ras_fwlog->lwpd.virt) {
5581		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5582				"6193 Restart FW Logging\n");
5583		rc = -EINVAL;
5584		goto ras_job_error;
5585	}
5586
5587	/* Get lwpd offset */
5588	lwpd_ptr = (uint32_t *)(ras_fwlog->lwpd.virt);
5589	ras_reply->offset = be32_to_cpu(*lwpd_ptr & 0xffffffff);
5590
5591	/* Get wrap count */
5592	ras_reply->wrap_count = be32_to_cpu(*(++lwpd_ptr) & 0xffffffff);
5593
5594ras_job_error:
5595	/* make error code available to userspace */
5596	bsg_reply->result = rc;
5597
5598	/* complete the job back to userspace */
5599	if (!rc)
5600		bsg_job_done(job, bsg_reply->result,
5601			     bsg_reply->reply_payload_rcv_len);
5602
5603	return rc;
5604}
5605
5606/**
5607 * lpfc_bsg_get_ras_fwlog: Read FW log
5608 * @job: fc_bsg_job to handle
5609 *
5610 * Copy the FW log into the passed buffer.
5611 **/
5612static int
5613lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
5614{
5615	struct Scsi_Host *shost = fc_bsg_to_shost(job);
5616	struct lpfc_vport *vport = shost_priv(shost);
5617	struct lpfc_hba *phba = vport->phba;
5618	struct fc_bsg_request *bsg_request = job->request;
5619	struct fc_bsg_reply *bsg_reply = job->reply;
5620	struct lpfc_bsg_get_fwlog_req *ras_req;
5621	u32 rd_offset, rd_index, offset;
5622	void *src, *fwlog_buff;
5623	struct lpfc_ras_fwlog *ras_fwlog = NULL;
5624	struct lpfc_dmabuf *dmabuf, *next;
5625	int rc = 0;
5626
5627	ras_fwlog = &phba->ras_fwlog;
5628
5629	rc = lpfc_check_fwlog_support(phba);
5630	if (rc)
5631		goto ras_job_error;
5632
5633	/* Logging to be stopped before reading */
5634	spin_lock_irq(&phba->hbalock);
5635	if (ras_fwlog->state == ACTIVE) {
5636		spin_unlock_irq(&phba->hbalock);
5637		rc = -EINPROGRESS;
5638		goto ras_job_error;
5639	}
5640	spin_unlock_irq(&phba->hbalock);
5641
5642	if (job->request_len <
5643	    sizeof(struct fc_bsg_request) +
5644	    sizeof(struct lpfc_bsg_get_fwlog_req)) {
5645		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5646				"6184 Received RAS_LOG request "
5647				"below minimum size\n");
5648		rc = -EINVAL;
5649		goto ras_job_error;
5650	}
5651
5652	ras_req = (struct lpfc_bsg_get_fwlog_req *)
5653		bsg_request->rqst_data.h_vendor.vendor_cmd;
5654	rd_offset = ras_req->read_offset;
5655
5656	/* Allocate memory to read fw log*/
5657	fwlog_buff = vmalloc(ras_req->read_size);
5658	if (!fwlog_buff) {
5659		rc = -ENOMEM;
5660		goto ras_job_error;
5661	}
5662
5663	rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
5664	offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
5665
5666	list_for_each_entry_safe(dmabuf, next,
5667			      &ras_fwlog->fwlog_buff_list, list) {
5668
5669		if (dmabuf->buffer_tag < rd_index)
5670			continue;
5671
5672		src = dmabuf->virt + offset;
5673		memcpy(fwlog_buff, src, ras_req->read_size);
5674		break;
5675	}
5676
5677	bsg_reply->reply_payload_rcv_len =
5678		sg_copy_from_buffer(job->reply_payload.sg_list,
5679				    job->reply_payload.sg_cnt,
5680				    fwlog_buff, ras_req->read_size);
5681
5682	vfree(fwlog_buff);
5683
5684ras_job_error:
5685	bsg_reply->result = rc;
5686	if (!rc)
5687		bsg_job_done(job, bsg_reply->result,
5688			     bsg_reply->reply_payload_rcv_len);
5689
5690	return rc;
5691}
5692
5693static int
5694lpfc_get_trunk_info(struct bsg_job *job)
5695{
5696	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5697	struct lpfc_hba *phba = vport->phba;
5698	struct fc_bsg_reply *bsg_reply = job->reply;
5699	struct lpfc_trunk_info *event_reply;
5700	int rc = 0;
5701
5702	if (job->request_len <
5703	    sizeof(struct fc_bsg_request) + sizeof(struct get_trunk_info_req)) {
5704		lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
5705				"2744 Received GET TRUNK _INFO request below "
5706				"minimum size\n");
5707		rc = -EINVAL;
5708		goto job_error;
5709	}
5710
5711	event_reply = (struct lpfc_trunk_info *)
5712		bsg_reply->reply_data.vendor_reply.vendor_rsp;
5713
5714	if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) {
5715		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5716				"2728 Received GET TRUNK _INFO reply below "
5717				"minimum size\n");
5718		rc = -EINVAL;
5719		goto job_error;
5720	}
5721	if (event_reply == NULL) {
5722		rc = -EINVAL;
5723		goto job_error;
5724	}
5725
5726	bsg_bf_set(lpfc_trunk_info_link_status, event_reply,
5727		   (phba->link_state >= LPFC_LINK_UP) ? 1 : 0);
5728
5729	bsg_bf_set(lpfc_trunk_info_trunk_active0, event_reply,
5730		   (phba->trunk_link.link0.state == LPFC_LINK_UP) ? 1 : 0);
5731
5732	bsg_bf_set(lpfc_trunk_info_trunk_active1, event_reply,
5733		   (phba->trunk_link.link1.state == LPFC_LINK_UP) ? 1 : 0);
5734
5735	bsg_bf_set(lpfc_trunk_info_trunk_active2, event_reply,
5736		   (phba->trunk_link.link2.state == LPFC_LINK_UP) ? 1 : 0);
5737
5738	bsg_bf_set(lpfc_trunk_info_trunk_active3, event_reply,
5739		   (phba->trunk_link.link3.state == LPFC_LINK_UP) ? 1 : 0);
5740
5741	bsg_bf_set(lpfc_trunk_info_trunk_config0, event_reply,
5742		   bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba));
5743
5744	bsg_bf_set(lpfc_trunk_info_trunk_config1, event_reply,
5745		   bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba));
5746
5747	bsg_bf_set(lpfc_trunk_info_trunk_config2, event_reply,
5748		   bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba));
5749
5750	bsg_bf_set(lpfc_trunk_info_trunk_config3, event_reply,
5751		   bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba));
5752
5753	event_reply->port_speed = phba->sli4_hba.link_state.speed / 1000;
5754	event_reply->logical_speed =
5755				phba->sli4_hba.link_state.logical_speed / 1000;
5756job_error:
5757	bsg_reply->result = rc;
5758	if (!rc)
5759		bsg_job_done(job, bsg_reply->result,
5760			     bsg_reply->reply_payload_rcv_len);
5761	return rc;
5762
5763}
5764
5765/**
5766 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5767 * @job: fc_bsg_job to handle
5768 **/
5769static int
5770lpfc_bsg_hst_vendor(struct bsg_job *job)
5771{
5772	struct fc_bsg_request *bsg_request = job->request;
5773	struct fc_bsg_reply *bsg_reply = job->reply;
5774	int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5775	int rc;
5776
5777	switch (command) {
5778	case LPFC_BSG_VENDOR_SET_CT_EVENT:
5779		rc = lpfc_bsg_hba_set_event(job);
5780		break;
5781	case LPFC_BSG_VENDOR_GET_CT_EVENT:
5782		rc = lpfc_bsg_hba_get_event(job);
5783		break;
5784	case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5785		rc = lpfc_bsg_send_mgmt_rsp(job);
5786		break;
5787	case LPFC_BSG_VENDOR_DIAG_MODE:
5788		rc = lpfc_bsg_diag_loopback_mode(job);
5789		break;
5790	case LPFC_BSG_VENDOR_DIAG_MODE_END:
5791		rc = lpfc_sli4_bsg_diag_mode_end(job);
5792		break;
5793	case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5794		rc = lpfc_bsg_diag_loopback_run(job);
5795		break;
5796	case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5797		rc = lpfc_sli4_bsg_link_diag_test(job);
5798		break;
5799	case LPFC_BSG_VENDOR_GET_MGMT_REV:
5800		rc = lpfc_bsg_get_dfc_rev(job);
5801		break;
5802	case LPFC_BSG_VENDOR_MBOX:
5803		rc = lpfc_bsg_mbox_cmd(job);
5804		break;
5805	case LPFC_BSG_VENDOR_MENLO_CMD:
5806	case LPFC_BSG_VENDOR_MENLO_DATA:
5807		rc = lpfc_menlo_cmd(job);
5808		break;
5809	case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5810		rc = lpfc_forced_link_speed(job);
5811		break;
5812	case LPFC_BSG_VENDOR_RAS_GET_LWPD:
5813		rc = lpfc_bsg_get_ras_lwpd(job);
5814		break;
5815	case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
5816		rc = lpfc_bsg_get_ras_fwlog(job);
5817		break;
5818	case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
5819		rc = lpfc_bsg_get_ras_config(job);
5820		break;
5821	case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
5822		rc = lpfc_bsg_set_ras_config(job);
5823		break;
5824	case LPFC_BSG_VENDOR_GET_TRUNK_INFO:
5825		rc = lpfc_get_trunk_info(job);
5826		break;
5827	default:
5828		rc = -EINVAL;
5829		bsg_reply->reply_payload_rcv_len = 0;
5830		/* make error code available to userspace */
5831		bsg_reply->result = rc;
5832		break;
5833	}
5834
5835	return rc;
5836}
5837
5838/**
5839 * lpfc_bsg_request - handle a bsg request from the FC transport
5840 * @job: bsg_job to handle
5841 **/
5842int
5843lpfc_bsg_request(struct bsg_job *job)
5844{
5845	struct fc_bsg_request *bsg_request = job->request;
5846	struct fc_bsg_reply *bsg_reply = job->reply;
5847	uint32_t msgcode;
5848	int rc;
5849
5850	msgcode = bsg_request->msgcode;
5851	switch (msgcode) {
5852	case FC_BSG_HST_VENDOR:
5853		rc = lpfc_bsg_hst_vendor(job);
5854		break;
5855	case FC_BSG_RPT_ELS:
5856		rc = lpfc_bsg_rport_els(job);
5857		break;
5858	case FC_BSG_RPT_CT:
5859		rc = lpfc_bsg_send_mgmt_cmd(job);
5860		break;
5861	default:
5862		rc = -EINVAL;
5863		bsg_reply->reply_payload_rcv_len = 0;
5864		/* make error code available to userspace */
5865		bsg_reply->result = rc;
5866		break;
5867	}
5868
5869	return rc;
5870}
5871
5872/**
5873 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5874 * @job: bsg_job that has timed out
5875 *
5876 * This function just aborts the job's IOCB.  The aborted IOCB will return to
5877 * the waiting function which will handle passing the error back to userspace
5878 **/
5879int
5880lpfc_bsg_timeout(struct bsg_job *job)
5881{
5882	struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5883	struct lpfc_hba *phba = vport->phba;
5884	struct lpfc_iocbq *cmdiocb;
5885	struct lpfc_sli_ring *pring;
5886	struct bsg_job_data *dd_data;
5887	unsigned long flags;
5888	int rc = 0;
5889	LIST_HEAD(completions);
5890	struct lpfc_iocbq *check_iocb, *next_iocb;
5891
5892	pring = lpfc_phba_elsring(phba);
5893	if (unlikely(!pring))
5894		return -EIO;
5895
5896	/* if job's driver data is NULL, the command completed or is in the
5897	 * the process of completing.  In this case, return status to request
5898	 * so the timeout is retried.  This avoids double completion issues
5899	 * and the request will be pulled off the timer queue when the
5900	 * command's completion handler executes.  Otherwise, prevent the
5901	 * command's completion handler from executing the job done callback
5902	 * and continue processing to abort the outstanding the command.
5903	 */
5904
5905	spin_lock_irqsave(&phba->ct_ev_lock, flags);
5906	dd_data = (struct bsg_job_data *)job->dd_data;
5907	if (dd_data) {
5908		dd_data->set_job = NULL;
5909		job->dd_data = NULL;
5910	} else {
5911		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5912		return -EAGAIN;
5913	}
5914
5915	switch (dd_data->type) {
5916	case TYPE_IOCB:
5917		/* Check to see if IOCB was issued to the port or not. If not,
5918		 * remove it from the txq queue and call cancel iocbs.
5919		 * Otherwise, call abort iotag
5920		 */
5921		cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5922		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5923
5924		spin_lock_irqsave(&phba->hbalock, flags);
5925		/* make sure the I/O abort window is still open */
5926		if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5927			spin_unlock_irqrestore(&phba->hbalock, flags);
5928			return -EAGAIN;
5929		}
5930		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5931					 list) {
5932			if (check_iocb == cmdiocb) {
5933				list_move_tail(&check_iocb->list, &completions);
5934				break;
5935			}
5936		}
5937		if (list_empty(&completions))
5938			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5939		spin_unlock_irqrestore(&phba->hbalock, flags);
5940		if (!list_empty(&completions)) {
5941			lpfc_sli_cancel_iocbs(phba, &completions,
5942					      IOSTAT_LOCAL_REJECT,
5943					      IOERR_SLI_ABORTED);
5944		}
5945		break;
5946
5947	case TYPE_EVT:
5948		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5949		break;
5950
5951	case TYPE_MBOX:
5952		/* Update the ext buf ctx state if needed */
5953
5954		if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5955			phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5956		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5957		break;
5958	case TYPE_MENLO:
5959		/* Check to see if IOCB was issued to the port or not. If not,
5960		 * remove it from the txq queue and call cancel iocbs.
5961		 * Otherwise, call abort iotag.
5962		 */
5963		cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5964		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5965
5966		spin_lock_irqsave(&phba->hbalock, flags);
5967		list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5968					 list) {
5969			if (check_iocb == cmdiocb) {
5970				list_move_tail(&check_iocb->list, &completions);
5971				break;
5972			}
5973		}
5974		if (list_empty(&completions))
5975			lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5976		spin_unlock_irqrestore(&phba->hbalock, flags);
5977		if (!list_empty(&completions)) {
5978			lpfc_sli_cancel_iocbs(phba, &completions,
5979					      IOSTAT_LOCAL_REJECT,
5980					      IOERR_SLI_ABORTED);
5981		}
5982		break;
5983	default:
5984		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5985		break;
5986	}
5987
5988	/* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5989	 * otherwise an error message will be displayed on the console
5990	 * so always return success (zero)
5991	 */
5992	return rc;
5993}
5994