1/*
2 * bnx2fc_els.c: QLogic Linux FCoE offload driver.
3 * This file contains helper routines that handle ELS requests
4 * and responses.
5 *
6 * Copyright (c) 2008-2013 Broadcom Corporation
7 * Copyright (c) 2014-2016 QLogic Corporation
8 * Copyright (c) 2016-2017 Cavium Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation.
13 *
14 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
15 */
16
17#include "bnx2fc.h"
18
19static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
20			     void *arg);
21static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
22			      void *arg);
23static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
24			void *data, u32 data_len,
25			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
26			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec);
27
28static void bnx2fc_rrq_compl(struct bnx2fc_els_cb_arg *cb_arg)
29{
30	struct bnx2fc_cmd *orig_io_req;
31	struct bnx2fc_cmd *rrq_req;
32	int rc = 0;
33
34	BUG_ON(!cb_arg);
35	rrq_req = cb_arg->io_req;
36	orig_io_req = cb_arg->aborted_io_req;
37	BUG_ON(!orig_io_req);
38	BNX2FC_ELS_DBG("rrq_compl: orig xid = 0x%x, rrq_xid = 0x%x\n",
39		   orig_io_req->xid, rrq_req->xid);
40
41	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
42
43	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rrq_req->req_flags)) {
44		/*
45		 * els req is timed out. cleanup the IO with FW and
46		 * drop the completion. Remove from active_cmd_queue.
47		 */
48		BNX2FC_ELS_DBG("rrq xid - 0x%x timed out, clean it up\n",
49			   rrq_req->xid);
50
51		if (rrq_req->on_active_queue) {
52			list_del_init(&rrq_req->link);
53			rrq_req->on_active_queue = 0;
54			rc = bnx2fc_initiate_cleanup(rrq_req);
55			BUG_ON(rc);
56		}
57	}
58	kfree(cb_arg);
59}
60int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
61{
62
63	struct fc_els_rrq rrq;
64	struct bnx2fc_rport *tgt = aborted_io_req->tgt;
65	struct fc_lport *lport = NULL;
66	struct bnx2fc_els_cb_arg *cb_arg = NULL;
67	u32 sid = 0;
68	u32 r_a_tov = 0;
69	unsigned long start = jiffies;
70	int rc;
71
72	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
73		return -EINVAL;
74
75	lport = tgt->rdata->local_port;
76	sid = tgt->sid;
77	r_a_tov = lport->r_a_tov;
78
79	BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
80		   aborted_io_req->xid);
81	memset(&rrq, 0, sizeof(rrq));
82
83	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_NOIO);
84	if (!cb_arg) {
85		printk(KERN_ERR PFX "Unable to allocate cb_arg for RRQ\n");
86		rc = -ENOMEM;
87		goto rrq_err;
88	}
89
90	cb_arg->aborted_io_req = aborted_io_req;
91
92	rrq.rrq_cmd = ELS_RRQ;
93	hton24(rrq.rrq_s_id, sid);
94	rrq.rrq_ox_id = htons(aborted_io_req->xid);
95	rrq.rrq_rx_id = htons(aborted_io_req->task->rxwr_txrd.var_ctx.rx_id);
96
97retry_rrq:
98	rc = bnx2fc_initiate_els(tgt, ELS_RRQ, &rrq, sizeof(rrq),
99				 bnx2fc_rrq_compl, cb_arg,
100				 r_a_tov);
101	if (rc == -ENOMEM) {
102		if (time_after(jiffies, start + (10 * HZ))) {
103			BNX2FC_ELS_DBG("rrq Failed\n");
104			rc = FAILED;
105			goto rrq_err;
106		}
107		msleep(20);
108		goto retry_rrq;
109	}
110rrq_err:
111	if (rc) {
112		BNX2FC_ELS_DBG("RRQ failed - release orig io req 0x%x\n",
113			aborted_io_req->xid);
114		kfree(cb_arg);
115		spin_lock_bh(&tgt->tgt_lock);
116		kref_put(&aborted_io_req->refcount, bnx2fc_cmd_release);
117		spin_unlock_bh(&tgt->tgt_lock);
118	}
119	return rc;
120}
121
122static void bnx2fc_l2_els_compl(struct bnx2fc_els_cb_arg *cb_arg)
123{
124	struct bnx2fc_cmd *els_req;
125	struct bnx2fc_rport *tgt;
126	struct bnx2fc_mp_req *mp_req;
127	struct fc_frame_header *fc_hdr;
128	unsigned char *buf;
129	void *resp_buf;
130	u32 resp_len, hdr_len;
131	u16 l2_oxid;
132	int frame_len;
133	int rc = 0;
134
135	l2_oxid = cb_arg->l2_oxid;
136	BNX2FC_ELS_DBG("ELS COMPL - l2_oxid = 0x%x\n", l2_oxid);
137
138	els_req = cb_arg->io_req;
139	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &els_req->req_flags)) {
140		/*
141		 * els req is timed out. cleanup the IO with FW and
142		 * drop the completion. libfc will handle the els timeout
143		 */
144		if (els_req->on_active_queue) {
145			list_del_init(&els_req->link);
146			els_req->on_active_queue = 0;
147			rc = bnx2fc_initiate_cleanup(els_req);
148			BUG_ON(rc);
149		}
150		goto free_arg;
151	}
152
153	tgt = els_req->tgt;
154	mp_req = &(els_req->mp_req);
155	fc_hdr = &(mp_req->resp_fc_hdr);
156	resp_len = mp_req->resp_len;
157	resp_buf = mp_req->resp_buf;
158
159	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
160	if (!buf) {
161		printk(KERN_ERR PFX "Unable to alloc mp buf\n");
162		goto free_arg;
163	}
164	hdr_len = sizeof(*fc_hdr);
165	if (hdr_len + resp_len > PAGE_SIZE) {
166		printk(KERN_ERR PFX "l2_els_compl: resp len is "
167				    "beyond page size\n");
168		goto free_buf;
169	}
170	memcpy(buf, fc_hdr, hdr_len);
171	memcpy(buf + hdr_len, resp_buf, resp_len);
172	frame_len = hdr_len + resp_len;
173
174	bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, l2_oxid);
175
176free_buf:
177	kfree(buf);
178free_arg:
179	kfree(cb_arg);
180}
181
182int bnx2fc_send_adisc(struct bnx2fc_rport *tgt, struct fc_frame *fp)
183{
184	struct fc_els_adisc *adisc;
185	struct fc_frame_header *fh;
186	struct bnx2fc_els_cb_arg *cb_arg;
187	struct fc_lport *lport = tgt->rdata->local_port;
188	u32 r_a_tov = lport->r_a_tov;
189	int rc;
190
191	fh = fc_frame_header_get(fp);
192	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
193	if (!cb_arg) {
194		printk(KERN_ERR PFX "Unable to allocate cb_arg for ADISC\n");
195		return -ENOMEM;
196	}
197
198	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
199
200	BNX2FC_ELS_DBG("send ADISC: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
201	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
202	/* adisc is initialized by libfc */
203	rc = bnx2fc_initiate_els(tgt, ELS_ADISC, adisc, sizeof(*adisc),
204				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
205	if (rc)
206		kfree(cb_arg);
207	return rc;
208}
209
210int bnx2fc_send_logo(struct bnx2fc_rport *tgt, struct fc_frame *fp)
211{
212	struct fc_els_logo *logo;
213	struct fc_frame_header *fh;
214	struct bnx2fc_els_cb_arg *cb_arg;
215	struct fc_lport *lport = tgt->rdata->local_port;
216	u32 r_a_tov = lport->r_a_tov;
217	int rc;
218
219	fh = fc_frame_header_get(fp);
220	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
221	if (!cb_arg) {
222		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
223		return -ENOMEM;
224	}
225
226	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
227
228	BNX2FC_ELS_DBG("Send LOGO: l2_oxid = 0x%x\n", cb_arg->l2_oxid);
229	logo = fc_frame_payload_get(fp, sizeof(*logo));
230	/* logo is initialized by libfc */
231	rc = bnx2fc_initiate_els(tgt, ELS_LOGO, logo, sizeof(*logo),
232				 bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
233	if (rc)
234		kfree(cb_arg);
235	return rc;
236}
237
238int bnx2fc_send_rls(struct bnx2fc_rport *tgt, struct fc_frame *fp)
239{
240	struct fc_els_rls *rls;
241	struct fc_frame_header *fh;
242	struct bnx2fc_els_cb_arg *cb_arg;
243	struct fc_lport *lport = tgt->rdata->local_port;
244	u32 r_a_tov = lport->r_a_tov;
245	int rc;
246
247	fh = fc_frame_header_get(fp);
248	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
249	if (!cb_arg) {
250		printk(KERN_ERR PFX "Unable to allocate cb_arg for LOGO\n");
251		return -ENOMEM;
252	}
253
254	cb_arg->l2_oxid = ntohs(fh->fh_ox_id);
255
256	rls = fc_frame_payload_get(fp, sizeof(*rls));
257	/* rls is initialized by libfc */
258	rc = bnx2fc_initiate_els(tgt, ELS_RLS, rls, sizeof(*rls),
259				  bnx2fc_l2_els_compl, cb_arg, 2 * r_a_tov);
260	if (rc)
261		kfree(cb_arg);
262	return rc;
263}
264
265static void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
266{
267	struct bnx2fc_mp_req *mp_req;
268	struct fc_frame_header *fc_hdr, *fh;
269	struct bnx2fc_cmd *srr_req;
270	struct bnx2fc_cmd *orig_io_req;
271	struct fc_frame *fp;
272	unsigned char *buf;
273	void *resp_buf;
274	u32 resp_len, hdr_len;
275	u8 opcode;
276	int rc = 0;
277
278	orig_io_req = cb_arg->aborted_io_req;
279	srr_req = cb_arg->io_req;
280	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
281		/* SRR timedout */
282		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
283		       "orig_io - 0x%x\n",
284			orig_io_req->xid);
285		rc = bnx2fc_initiate_abts(srr_req);
286		if (rc != SUCCESS) {
287			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
288				"failed. issue cleanup\n");
289			bnx2fc_initiate_cleanup(srr_req);
290		}
291		if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
292		    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
293			BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
294				      orig_io_req->xid, orig_io_req->req_flags);
295			goto srr_compl_done;
296		}
297		orig_io_req->srr_retry++;
298		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
299			struct bnx2fc_rport *tgt = orig_io_req->tgt;
300			spin_unlock_bh(&tgt->tgt_lock);
301			rc = bnx2fc_send_srr(orig_io_req,
302					     orig_io_req->srr_offset,
303					     orig_io_req->srr_rctl);
304			spin_lock_bh(&tgt->tgt_lock);
305			if (!rc)
306				goto srr_compl_done;
307		}
308
309		rc = bnx2fc_initiate_abts(orig_io_req);
310		if (rc != SUCCESS) {
311			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
312				"failed xid = 0x%x. issue cleanup\n",
313				orig_io_req->xid);
314			bnx2fc_initiate_cleanup(orig_io_req);
315		}
316		goto srr_compl_done;
317	}
318	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
319	    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
320		BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
321			      orig_io_req->xid, orig_io_req->req_flags);
322		goto srr_compl_done;
323	}
324	mp_req = &(srr_req->mp_req);
325	fc_hdr = &(mp_req->resp_fc_hdr);
326	resp_len = mp_req->resp_len;
327	resp_buf = mp_req->resp_buf;
328
329	hdr_len = sizeof(*fc_hdr);
330	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
331	if (!buf) {
332		printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
333		goto srr_compl_done;
334	}
335	memcpy(buf, fc_hdr, hdr_len);
336	memcpy(buf + hdr_len, resp_buf, resp_len);
337
338	fp = fc_frame_alloc(NULL, resp_len);
339	if (!fp) {
340		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
341		goto free_buf;
342	}
343
344	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
345	/* Copy FC Frame header and payload into the frame */
346	memcpy(fh, buf, hdr_len + resp_len);
347
348	opcode = fc_frame_payload_op(fp);
349	switch (opcode) {
350	case ELS_LS_ACC:
351		BNX2FC_IO_DBG(srr_req, "SRR success\n");
352		break;
353	case ELS_LS_RJT:
354		BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
355		rc = bnx2fc_initiate_abts(orig_io_req);
356		if (rc != SUCCESS) {
357			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
358				"failed xid = 0x%x. issue cleanup\n",
359				orig_io_req->xid);
360			bnx2fc_initiate_cleanup(orig_io_req);
361		}
362		break;
363	default:
364		BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
365			opcode);
366		break;
367	}
368	fc_frame_free(fp);
369free_buf:
370	kfree(buf);
371srr_compl_done:
372	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
373}
374
375static void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
376{
377	struct bnx2fc_cmd *orig_io_req, *new_io_req;
378	struct bnx2fc_cmd *rec_req;
379	struct bnx2fc_mp_req *mp_req;
380	struct fc_frame_header *fc_hdr, *fh;
381	struct fc_els_ls_rjt *rjt;
382	struct fc_els_rec_acc *acc;
383	struct bnx2fc_rport *tgt;
384	struct fcoe_err_report_entry *err_entry;
385	struct scsi_cmnd *sc_cmd;
386	enum fc_rctl r_ctl;
387	unsigned char *buf;
388	void *resp_buf;
389	struct fc_frame *fp;
390	u8 opcode;
391	u32 offset;
392	u32 e_stat;
393	u32 resp_len, hdr_len;
394	int rc = 0;
395	bool send_seq_clnp = false;
396	bool abort_io = false;
397
398	BNX2FC_MISC_DBG("Entered rec_compl callback\n");
399	rec_req = cb_arg->io_req;
400	orig_io_req = cb_arg->aborted_io_req;
401	BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
402	tgt = orig_io_req->tgt;
403
404	/* Handle REC timeout case */
405	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
406		BNX2FC_IO_DBG(rec_req, "timed out, abort "
407		       "orig_io - 0x%x\n",
408			orig_io_req->xid);
409		/* els req is timed out. send abts for els */
410		rc = bnx2fc_initiate_abts(rec_req);
411		if (rc != SUCCESS) {
412			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
413				"failed. issue cleanup\n");
414			bnx2fc_initiate_cleanup(rec_req);
415		}
416		orig_io_req->rec_retry++;
417		/* REC timedout. send ABTS to the orig IO req */
418		if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
419			spin_unlock_bh(&tgt->tgt_lock);
420			rc = bnx2fc_send_rec(orig_io_req);
421			spin_lock_bh(&tgt->tgt_lock);
422			if (!rc)
423				goto rec_compl_done;
424		}
425		rc = bnx2fc_initiate_abts(orig_io_req);
426		if (rc != SUCCESS) {
427			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
428				"failed xid = 0x%x. issue cleanup\n",
429				orig_io_req->xid);
430			bnx2fc_initiate_cleanup(orig_io_req);
431		}
432		goto rec_compl_done;
433	}
434
435	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
436		BNX2FC_IO_DBG(rec_req, "completed"
437		       "orig_io - 0x%x\n",
438			orig_io_req->xid);
439		goto rec_compl_done;
440	}
441	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
442		BNX2FC_IO_DBG(rec_req, "abts in prog "
443		       "orig_io - 0x%x\n",
444			orig_io_req->xid);
445		goto rec_compl_done;
446	}
447
448	mp_req = &(rec_req->mp_req);
449	fc_hdr = &(mp_req->resp_fc_hdr);
450	resp_len = mp_req->resp_len;
451	acc = resp_buf = mp_req->resp_buf;
452
453	hdr_len = sizeof(*fc_hdr);
454
455	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
456	if (!buf) {
457		printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
458		goto rec_compl_done;
459	}
460	memcpy(buf, fc_hdr, hdr_len);
461	memcpy(buf + hdr_len, resp_buf, resp_len);
462
463	fp = fc_frame_alloc(NULL, resp_len);
464	if (!fp) {
465		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
466		goto free_buf;
467	}
468
469	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
470	/* Copy FC Frame header and payload into the frame */
471	memcpy(fh, buf, hdr_len + resp_len);
472
473	opcode = fc_frame_payload_op(fp);
474	if (opcode == ELS_LS_RJT) {
475		BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
476		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
477		if ((rjt->er_reason == ELS_RJT_LOGIC ||
478		    rjt->er_reason == ELS_RJT_UNAB) &&
479		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
480			BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
481			new_io_req = bnx2fc_cmd_alloc(tgt);
482			if (!new_io_req)
483				goto abort_io;
484			new_io_req->sc_cmd = orig_io_req->sc_cmd;
485			/* cleanup orig_io_req that is with the FW */
486			set_bit(BNX2FC_FLAG_CMD_LOST,
487				&orig_io_req->req_flags);
488			bnx2fc_initiate_cleanup(orig_io_req);
489			/* Post a new IO req with the same sc_cmd */
490			BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
491			rc = bnx2fc_post_io_req(tgt, new_io_req);
492			if (!rc)
493				goto free_frame;
494			BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
495		}
496abort_io:
497		rc = bnx2fc_initiate_abts(orig_io_req);
498		if (rc != SUCCESS) {
499			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
500				"failed. issue cleanup\n");
501			bnx2fc_initiate_cleanup(orig_io_req);
502		}
503	} else if (opcode == ELS_LS_ACC) {
504		/* REVISIT: Check if the exchange is already aborted */
505		offset = ntohl(acc->reca_fc4value);
506		e_stat = ntohl(acc->reca_e_stat);
507		if (e_stat & ESB_ST_SEQ_INIT)  {
508			BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
509			goto free_frame;
510		}
511		BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
512			e_stat, offset);
513		/* Seq initiative is with us */
514		err_entry = (struct fcoe_err_report_entry *)
515			     &orig_io_req->err_entry;
516		sc_cmd = orig_io_req->sc_cmd;
517		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
518			/* SCSI WRITE command */
519			if (offset == orig_io_req->data_xfer_len) {
520				BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
521				/* FCP_RSP lost */
522				r_ctl = FC_RCTL_DD_CMD_STATUS;
523				offset = 0;
524			} else  {
525				/* start transmitting from offset */
526				BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
527				send_seq_clnp = true;
528				r_ctl = FC_RCTL_DD_DATA_DESC;
529				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
530								offset, r_ctl))
531					abort_io = true;
532				/* XFER_RDY */
533			}
534		} else {
535			/* SCSI READ command */
536			if (err_entry->data.rx_buf_off ==
537					orig_io_req->data_xfer_len) {
538				/* FCP_RSP lost */
539				BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
540				r_ctl = FC_RCTL_DD_CMD_STATUS;
541				offset = 0;
542			} else  {
543				/* request retransmission from this offset */
544				send_seq_clnp = true;
545				offset = err_entry->data.rx_buf_off;
546				BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
547				/* FCP_DATA lost */
548				r_ctl = FC_RCTL_DD_SOL_DATA;
549				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
550								offset, r_ctl))
551					abort_io = true;
552			}
553		}
554		if (abort_io) {
555			rc = bnx2fc_initiate_abts(orig_io_req);
556			if (rc != SUCCESS) {
557				BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
558					      " failed. issue cleanup\n");
559				bnx2fc_initiate_cleanup(orig_io_req);
560			}
561		} else if (!send_seq_clnp) {
562			BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
563			spin_unlock_bh(&tgt->tgt_lock);
564			rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
565			spin_lock_bh(&tgt->tgt_lock);
566
567			if (rc) {
568				BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
569					" IO will abort\n");
570			}
571		}
572	}
573free_frame:
574	fc_frame_free(fp);
575free_buf:
576	kfree(buf);
577rec_compl_done:
578	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
579	kfree(cb_arg);
580}
581
582int bnx2fc_send_rec(struct bnx2fc_cmd *orig_io_req)
583{
584	struct fc_els_rec rec;
585	struct bnx2fc_rport *tgt = orig_io_req->tgt;
586	struct fc_lport *lport = tgt->rdata->local_port;
587	struct bnx2fc_els_cb_arg *cb_arg = NULL;
588	u32 sid = tgt->sid;
589	u32 r_a_tov = lport->r_a_tov;
590	int rc;
591
592	BNX2FC_IO_DBG(orig_io_req, "Sending REC\n");
593	memset(&rec, 0, sizeof(rec));
594
595	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
596	if (!cb_arg) {
597		printk(KERN_ERR PFX "Unable to allocate cb_arg for REC\n");
598		rc = -ENOMEM;
599		goto rec_err;
600	}
601	kref_get(&orig_io_req->refcount);
602
603	cb_arg->aborted_io_req = orig_io_req;
604
605	rec.rec_cmd = ELS_REC;
606	hton24(rec.rec_s_id, sid);
607	rec.rec_ox_id = htons(orig_io_req->xid);
608	rec.rec_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
609
610	rc = bnx2fc_initiate_els(tgt, ELS_REC, &rec, sizeof(rec),
611				 bnx2fc_rec_compl, cb_arg,
612				 r_a_tov);
613	if (rc) {
614		BNX2FC_IO_DBG(orig_io_req, "REC failed - release\n");
615		spin_lock_bh(&tgt->tgt_lock);
616		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
617		spin_unlock_bh(&tgt->tgt_lock);
618		kfree(cb_arg);
619	}
620rec_err:
621	return rc;
622}
623
624int bnx2fc_send_srr(struct bnx2fc_cmd *orig_io_req, u32 offset, u8 r_ctl)
625{
626	struct fcp_srr srr;
627	struct bnx2fc_rport *tgt = orig_io_req->tgt;
628	struct fc_lport *lport = tgt->rdata->local_port;
629	struct bnx2fc_els_cb_arg *cb_arg = NULL;
630	u32 r_a_tov = lport->r_a_tov;
631	int rc;
632
633	BNX2FC_IO_DBG(orig_io_req, "Sending SRR\n");
634	memset(&srr, 0, sizeof(srr));
635
636	cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC);
637	if (!cb_arg) {
638		printk(KERN_ERR PFX "Unable to allocate cb_arg for SRR\n");
639		rc = -ENOMEM;
640		goto srr_err;
641	}
642	kref_get(&orig_io_req->refcount);
643
644	cb_arg->aborted_io_req = orig_io_req;
645
646	srr.srr_op = ELS_SRR;
647	srr.srr_ox_id = htons(orig_io_req->xid);
648	srr.srr_rx_id = htons(orig_io_req->task->rxwr_txrd.var_ctx.rx_id);
649	srr.srr_rel_off = htonl(offset);
650	srr.srr_r_ctl = r_ctl;
651	orig_io_req->srr_offset = offset;
652	orig_io_req->srr_rctl = r_ctl;
653
654	rc = bnx2fc_initiate_els(tgt, ELS_SRR, &srr, sizeof(srr),
655				 bnx2fc_srr_compl, cb_arg,
656				 r_a_tov);
657	if (rc) {
658		BNX2FC_IO_DBG(orig_io_req, "SRR failed - release\n");
659		spin_lock_bh(&tgt->tgt_lock);
660		kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
661		spin_unlock_bh(&tgt->tgt_lock);
662		kfree(cb_arg);
663	} else
664		set_bit(BNX2FC_FLAG_SRR_SENT, &orig_io_req->req_flags);
665
666srr_err:
667	return rc;
668}
669
670static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
671			void *data, u32 data_len,
672			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
673			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
674{
675	struct fcoe_port *port = tgt->port;
676	struct bnx2fc_interface *interface = port->priv;
677	struct fc_rport *rport = tgt->rport;
678	struct fc_lport *lport = port->lport;
679	struct bnx2fc_cmd *els_req;
680	struct bnx2fc_mp_req *mp_req;
681	struct fc_frame_header *fc_hdr;
682	struct fcoe_task_ctx_entry *task;
683	struct fcoe_task_ctx_entry *task_page;
684	int rc = 0;
685	int task_idx, index;
686	u32 did, sid;
687	u16 xid;
688
689	rc = fc_remote_port_chkready(rport);
690	if (rc) {
691		printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
692		rc = -EINVAL;
693		goto els_err;
694	}
695	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
696		printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
697		rc = -EINVAL;
698		goto els_err;
699	}
700	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))) {
701		printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
702		rc = -EINVAL;
703		goto els_err;
704	}
705	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
706	if (!els_req) {
707		rc = -ENOMEM;
708		goto els_err;
709	}
710
711	els_req->sc_cmd = NULL;
712	els_req->port = port;
713	els_req->tgt = tgt;
714	els_req->cb_func = cb_func;
715	cb_arg->io_req = els_req;
716	els_req->cb_arg = cb_arg;
717	els_req->data_xfer_len = data_len;
718
719	mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
720	rc = bnx2fc_init_mp_req(els_req);
721	if (rc == FAILED) {
722		printk(KERN_ERR PFX "ELS MP request init failed\n");
723		spin_lock_bh(&tgt->tgt_lock);
724		kref_put(&els_req->refcount, bnx2fc_cmd_release);
725		spin_unlock_bh(&tgt->tgt_lock);
726		rc = -ENOMEM;
727		goto els_err;
728	} else {
729		/* rc SUCCESS */
730		rc = 0;
731	}
732
733	/* Set the data_xfer_len to the size of ELS payload */
734	mp_req->req_len = data_len;
735	els_req->data_xfer_len = mp_req->req_len;
736
737	/* Fill ELS Payload */
738	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
739		memcpy(mp_req->req_buf, data, data_len);
740	} else {
741		printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
742		els_req->cb_func = NULL;
743		els_req->cb_arg = NULL;
744		spin_lock_bh(&tgt->tgt_lock);
745		kref_put(&els_req->refcount, bnx2fc_cmd_release);
746		spin_unlock_bh(&tgt->tgt_lock);
747		rc = -EINVAL;
748	}
749
750	if (rc)
751		goto els_err;
752
753	/* Fill FC header */
754	fc_hdr = &(mp_req->req_fc_hdr);
755
756	did = tgt->rport->port_id;
757	sid = tgt->sid;
758
759	if (op == ELS_SRR)
760		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
761				   FC_TYPE_FCP, FC_FC_FIRST_SEQ |
762				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
763	else
764		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
765				   FC_TYPE_ELS, FC_FC_FIRST_SEQ |
766				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
767
768	/* Obtain exchange id */
769	xid = els_req->xid;
770	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
771	index = xid % BNX2FC_TASKS_PER_PAGE;
772
773	/* Initialize task context for this IO request */
774	task_page = (struct fcoe_task_ctx_entry *)
775			interface->hba->task_ctx[task_idx];
776	task = &(task_page[index]);
777	bnx2fc_init_mp_task(els_req, task);
778
779	spin_lock_bh(&tgt->tgt_lock);
780
781	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
782		printk(KERN_ERR PFX "initiate_els.. session not ready\n");
783		els_req->cb_func = NULL;
784		els_req->cb_arg = NULL;
785		kref_put(&els_req->refcount, bnx2fc_cmd_release);
786		spin_unlock_bh(&tgt->tgt_lock);
787		return -EINVAL;
788	}
789
790	if (timer_msec)
791		bnx2fc_cmd_timer_set(els_req, timer_msec);
792	bnx2fc_add_2_sq(tgt, xid);
793
794	els_req->on_active_queue = 1;
795	list_add_tail(&els_req->link, &tgt->els_queue);
796
797	/* Ring doorbell */
798	bnx2fc_ring_doorbell(tgt);
799	spin_unlock_bh(&tgt->tgt_lock);
800
801els_err:
802	return rc;
803}
804
805void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
806			      struct fcoe_task_ctx_entry *task, u8 num_rq)
807{
808	struct bnx2fc_mp_req *mp_req;
809	struct fc_frame_header *fc_hdr;
810	u64 *hdr;
811	u64 *temp_hdr;
812
813	BNX2FC_ELS_DBG("Entered process_els_compl xid = 0x%x"
814			"cmd_type = %d\n", els_req->xid, els_req->cmd_type);
815
816	if (test_and_set_bit(BNX2FC_FLAG_ELS_DONE,
817			     &els_req->req_flags)) {
818		BNX2FC_ELS_DBG("Timer context finished processing this "
819			   "els - 0x%x\n", els_req->xid);
820		/* This IO doesn't receive cleanup completion */
821		kref_put(&els_req->refcount, bnx2fc_cmd_release);
822		return;
823	}
824
825	/* Cancel the timeout_work, as we received the response */
826	if (cancel_delayed_work(&els_req->timeout_work))
827		kref_put(&els_req->refcount,
828			 bnx2fc_cmd_release); /* drop timer hold */
829
830	if (els_req->on_active_queue) {
831		list_del_init(&els_req->link);
832		els_req->on_active_queue = 0;
833	}
834
835	mp_req = &(els_req->mp_req);
836	fc_hdr = &(mp_req->resp_fc_hdr);
837
838	hdr = (u64 *)fc_hdr;
839	temp_hdr = (u64 *)
840		&task->rxwr_only.union_ctx.comp_info.mp_rsp.fc_hdr;
841	hdr[0] = cpu_to_be64(temp_hdr[0]);
842	hdr[1] = cpu_to_be64(temp_hdr[1]);
843	hdr[2] = cpu_to_be64(temp_hdr[2]);
844
845	mp_req->resp_len =
846		task->rxwr_only.union_ctx.comp_info.mp_rsp.mp_payload_len;
847
848	/* Parse ELS response */
849	if ((els_req->cb_func) && (els_req->cb_arg)) {
850		els_req->cb_func(els_req->cb_arg);
851		els_req->cb_arg = NULL;
852	}
853
854	kref_put(&els_req->refcount, bnx2fc_cmd_release);
855}
856
857#define		BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC	1
858#define		BNX2FC_FCOE_MAC_METHOD_FCF_MAP		2
859#define		BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC	3
860static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
861			      void *arg)
862{
863	struct fcoe_ctlr *fip = arg;
864	struct fc_exch *exch = fc_seq_exch(seq);
865	struct fc_lport *lport = exch->lp;
866
867	struct fc_frame_header *fh;
868	u8 *granted_mac;
869	u8 fcoe_mac[6];
870	u8 fc_map[3];
871	int method;
872
873	if (IS_ERR(fp))
874		goto done;
875
876	fh = fc_frame_header_get(fp);
877	granted_mac = fr_cb(fp)->granted_mac;
878
879	/*
880	 * We set the source MAC for FCoE traffic based on the Granted MAC
881	 * address from the switch.
882	 *
883	 * If granted_mac is non-zero, we use that.
884	 * If the granted_mac is zeroed out, create the FCoE MAC based on
885	 * the sel_fcf->fc_map and the d_id fo the FLOGI frame.
886	 * If sel_fcf->fc_map is 0, then we use the default FCF-MAC plus the
887	 * d_id of the FLOGI frame.
888	 */
889	if (!is_zero_ether_addr(granted_mac)) {
890		ether_addr_copy(fcoe_mac, granted_mac);
891		method = BNX2FC_FCOE_MAC_METHOD_GRANGED_MAC;
892	} else if (fip->sel_fcf && fip->sel_fcf->fc_map != 0) {
893		hton24(fc_map, fip->sel_fcf->fc_map);
894		fcoe_mac[0] = fc_map[0];
895		fcoe_mac[1] = fc_map[1];
896		fcoe_mac[2] = fc_map[2];
897		fcoe_mac[3] = fh->fh_d_id[0];
898		fcoe_mac[4] = fh->fh_d_id[1];
899		fcoe_mac[5] = fh->fh_d_id[2];
900		method = BNX2FC_FCOE_MAC_METHOD_FCF_MAP;
901	} else {
902		fc_fcoe_set_mac(fcoe_mac, fh->fh_d_id);
903		method = BNX2FC_FCOE_MAC_METHOD_FCOE_SET_MAC;
904	}
905
906	BNX2FC_HBA_DBG(lport, "fcoe_mac=%pM method=%d\n", fcoe_mac, method);
907	fip->update_mac(lport, fcoe_mac);
908done:
909	fc_lport_flogi_resp(seq, fp, lport);
910}
911
912static void bnx2fc_logo_resp(struct fc_seq *seq, struct fc_frame *fp,
913			     void *arg)
914{
915	struct fcoe_ctlr *fip = arg;
916	struct fc_exch *exch = fc_seq_exch(seq);
917	struct fc_lport *lport = exch->lp;
918	static u8 zero_mac[ETH_ALEN] = { 0 };
919
920	if (!IS_ERR(fp))
921		fip->update_mac(lport, zero_mac);
922	fc_lport_logo_resp(seq, fp, lport);
923}
924
925struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
926				      struct fc_frame *fp, unsigned int op,
927				      void (*resp)(struct fc_seq *,
928						   struct fc_frame *,
929						   void *),
930				      void *arg, u32 timeout)
931{
932	struct fcoe_port *port = lport_priv(lport);
933	struct bnx2fc_interface *interface = port->priv;
934	struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
935	struct fc_frame_header *fh = fc_frame_header_get(fp);
936
937	switch (op) {
938	case ELS_FLOGI:
939	case ELS_FDISC:
940		return fc_elsct_send(lport, did, fp, op, bnx2fc_flogi_resp,
941				     fip, timeout);
942	case ELS_LOGO:
943		/* only hook onto fabric logouts, not port logouts */
944		if (ntoh24(fh->fh_d_id) != FC_FID_FLOGI)
945			break;
946		return fc_elsct_send(lport, did, fp, op, bnx2fc_logo_resp,
947				     fip, timeout);
948	}
949	return fc_elsct_send(lport, did, fp, op, resp, arg, timeout);
950}
951