1/*
2 * Copyright 2008 Cisco Systems, Inc.  All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
4 *
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
16 * SOFTWARE.
17 */
18#include <linux/errno.h>
19#include <linux/pci.h>
20#include <linux/slab.h>
21#include <linux/skbuff.h>
22#include <linux/interrupt.h>
23#include <linux/spinlock.h>
24#include <linux/if_ether.h>
25#include <linux/if_vlan.h>
26#include <linux/workqueue.h>
27#include <scsi/fc/fc_fip.h>
28#include <scsi/fc/fc_els.h>
29#include <scsi/fc/fc_fcoe.h>
30#include <scsi/fc_frame.h>
31#include <scsi/libfc.h>
32#include "fnic_io.h"
33#include "fnic.h"
34#include "fnic_fip.h"
35#include "cq_enet_desc.h"
36#include "cq_exch_desc.h"
37
38static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
39struct workqueue_struct *fnic_fip_queue;
40struct workqueue_struct *fnic_event_queue;
41
42static void fnic_set_eth_mode(struct fnic *);
43static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
44static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
45static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
46static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
47static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
48
49void fnic_handle_link(struct work_struct *work)
50{
51	struct fnic *fnic = container_of(work, struct fnic, link_work);
52	unsigned long flags;
53	int old_link_status;
54	u32 old_link_down_cnt;
55	u64 old_port_speed, new_port_speed;
56
57	spin_lock_irqsave(&fnic->fnic_lock, flags);
58
59	if (fnic->stop_rx_link_events) {
60		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
61		return;
62	}
63
64	old_link_down_cnt = fnic->link_down_cnt;
65	old_link_status = fnic->link_status;
66	old_port_speed = atomic64_read(
67			&fnic->fnic_stats.misc_stats.current_port_speed);
68
69	fnic->link_status = vnic_dev_link_status(fnic->vdev);
70	fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
71
72	new_port_speed = vnic_dev_port_speed(fnic->vdev);
73	atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
74			new_port_speed);
75	if (old_port_speed != new_port_speed)
76		shost_printk(KERN_INFO, fnic->lport->host,
77				"Current vnic speed set to :  %llu\n",
78				new_port_speed);
79
80	switch (vnic_dev_port_speed(fnic->vdev)) {
81	case DCEM_PORTSPEED_10G:
82		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_10GBIT;
83		fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
84		break;
85	case DCEM_PORTSPEED_20G:
86		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_20GBIT;
87		fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
88		break;
89	case DCEM_PORTSPEED_25G:
90		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_25GBIT;
91		fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
92		break;
93	case DCEM_PORTSPEED_40G:
94	case DCEM_PORTSPEED_4x10G:
95		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_40GBIT;
96		fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
97		break;
98	case DCEM_PORTSPEED_100G:
99		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_100GBIT;
100		fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
101		break;
102	default:
103		fc_host_speed(fnic->lport->host)   = FC_PORTSPEED_UNKNOWN;
104		fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
105		break;
106	}
107
108	if (old_link_status == fnic->link_status) {
109		if (!fnic->link_status) {
110			/* DOWN -> DOWN */
111			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
112			fnic_fc_trace_set_data(fnic->lport->host->host_no,
113				FNIC_FC_LE, "Link Status: DOWN->DOWN",
114				strlen("Link Status: DOWN->DOWN"));
115		} else {
116			if (old_link_down_cnt != fnic->link_down_cnt) {
117				/* UP -> DOWN -> UP */
118				fnic->lport->host_stats.link_failure_count++;
119				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
120				fnic_fc_trace_set_data(
121					fnic->lport->host->host_no,
122					FNIC_FC_LE,
123					"Link Status:UP_DOWN_UP",
124					strlen("Link_Status:UP_DOWN_UP")
125					);
126				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
127					     "link down\n");
128				fcoe_ctlr_link_down(&fnic->ctlr);
129				if (fnic->config.flags & VFCF_FIP_CAPABLE) {
130					/* start FCoE VLAN discovery */
131					fnic_fc_trace_set_data(
132						fnic->lport->host->host_no,
133						FNIC_FC_LE,
134						"Link Status: UP_DOWN_UP_VLAN",
135						strlen(
136						"Link Status: UP_DOWN_UP_VLAN")
137						);
138					fnic_fcoe_send_vlan_req(fnic);
139					return;
140				}
141				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
142					     "link up\n");
143				fcoe_ctlr_link_up(&fnic->ctlr);
144			} else {
145				/* UP -> UP */
146				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
147				fnic_fc_trace_set_data(
148					fnic->lport->host->host_no, FNIC_FC_LE,
149					"Link Status: UP_UP",
150					strlen("Link Status: UP_UP"));
151			}
152		}
153	} else if (fnic->link_status) {
154		/* DOWN -> UP */
155		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
156		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
157			/* start FCoE VLAN discovery */
158				fnic_fc_trace_set_data(
159				fnic->lport->host->host_no,
160				FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
161				strlen("Link Status: DOWN_UP_VLAN"));
162			fnic_fcoe_send_vlan_req(fnic);
163			return;
164		}
165		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
166		fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
167			"Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
168		fcoe_ctlr_link_up(&fnic->ctlr);
169	} else {
170		/* UP -> DOWN */
171		fnic->lport->host_stats.link_failure_count++;
172		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
173		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
174		fnic_fc_trace_set_data(
175			fnic->lport->host->host_no, FNIC_FC_LE,
176			"Link Status: UP_DOWN",
177			strlen("Link Status: UP_DOWN"));
178		if (fnic->config.flags & VFCF_FIP_CAPABLE) {
179			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
180				"deleting fip-timer during link-down\n");
181			del_timer_sync(&fnic->fip_timer);
182		}
183		fcoe_ctlr_link_down(&fnic->ctlr);
184	}
185
186}
187
188/*
189 * This function passes incoming fabric frames to libFC
190 */
191void fnic_handle_frame(struct work_struct *work)
192{
193	struct fnic *fnic = container_of(work, struct fnic, frame_work);
194	struct fc_lport *lp = fnic->lport;
195	unsigned long flags;
196	struct sk_buff *skb;
197	struct fc_frame *fp;
198
199	while ((skb = skb_dequeue(&fnic->frame_queue))) {
200
201		spin_lock_irqsave(&fnic->fnic_lock, flags);
202		if (fnic->stop_rx_link_events) {
203			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
204			dev_kfree_skb(skb);
205			return;
206		}
207		fp = (struct fc_frame *)skb;
208
209		/*
210		 * If we're in a transitional state, just re-queue and return.
211		 * The queue will be serviced when we get to a stable state.
212		 */
213		if (fnic->state != FNIC_IN_FC_MODE &&
214		    fnic->state != FNIC_IN_ETH_MODE) {
215			skb_queue_head(&fnic->frame_queue, skb);
216			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
217			return;
218		}
219		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
220
221		fc_exch_recv(lp, fp);
222	}
223}
224
225void fnic_fcoe_evlist_free(struct fnic *fnic)
226{
227	struct fnic_event *fevt = NULL;
228	struct fnic_event *next = NULL;
229	unsigned long flags;
230
231	spin_lock_irqsave(&fnic->fnic_lock, flags);
232	if (list_empty(&fnic->evlist)) {
233		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
234		return;
235	}
236
237	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
238		list_del(&fevt->list);
239		kfree(fevt);
240	}
241	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
242}
243
244void fnic_handle_event(struct work_struct *work)
245{
246	struct fnic *fnic = container_of(work, struct fnic, event_work);
247	struct fnic_event *fevt = NULL;
248	struct fnic_event *next = NULL;
249	unsigned long flags;
250
251	spin_lock_irqsave(&fnic->fnic_lock, flags);
252	if (list_empty(&fnic->evlist)) {
253		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
254		return;
255	}
256
257	list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
258		if (fnic->stop_rx_link_events) {
259			list_del(&fevt->list);
260			kfree(fevt);
261			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
262			return;
263		}
264		/*
265		 * If we're in a transitional state, just re-queue and return.
266		 * The queue will be serviced when we get to a stable state.
267		 */
268		if (fnic->state != FNIC_IN_FC_MODE &&
269		    fnic->state != FNIC_IN_ETH_MODE) {
270			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
271			return;
272		}
273
274		list_del(&fevt->list);
275		switch (fevt->event) {
276		case FNIC_EVT_START_VLAN_DISC:
277			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
278			fnic_fcoe_send_vlan_req(fnic);
279			spin_lock_irqsave(&fnic->fnic_lock, flags);
280			break;
281		case FNIC_EVT_START_FCF_DISC:
282			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
283				  "Start FCF Discovery\n");
284			fnic_fcoe_start_fcf_disc(fnic);
285			break;
286		default:
287			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
288				  "Unknown event 0x%x\n", fevt->event);
289			break;
290		}
291		kfree(fevt);
292	}
293	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
294}
295
296/**
297 * Check if the Received FIP FLOGI frame is rejected
298 * @fip: The FCoE controller that received the frame
299 * @skb: The received FIP frame
300 *
301 * Returns non-zero if the frame is rejected with unsupported cmd with
302 * insufficient resource els explanation.
303 */
304static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
305					 struct sk_buff *skb)
306{
307	struct fc_lport *lport = fip->lp;
308	struct fip_header *fiph;
309	struct fc_frame_header *fh = NULL;
310	struct fip_desc *desc;
311	struct fip_encaps *els;
312	u16 op;
313	u8 els_op;
314	u8 sub;
315
316	size_t rlen;
317	size_t dlen = 0;
318
319	if (skb_linearize(skb))
320		return 0;
321
322	if (skb->len < sizeof(*fiph))
323		return 0;
324
325	fiph = (struct fip_header *)skb->data;
326	op = ntohs(fiph->fip_op);
327	sub = fiph->fip_subcode;
328
329	if (op != FIP_OP_LS)
330		return 0;
331
332	if (sub != FIP_SC_REP)
333		return 0;
334
335	rlen = ntohs(fiph->fip_dl_len) * 4;
336	if (rlen + sizeof(*fiph) > skb->len)
337		return 0;
338
339	desc = (struct fip_desc *)(fiph + 1);
340	dlen = desc->fip_dlen * FIP_BPW;
341
342	if (desc->fip_dtype == FIP_DT_FLOGI) {
343
344		if (dlen < sizeof(*els) + sizeof(*fh) + 1)
345			return 0;
346
347		els = (struct fip_encaps *)desc;
348		fh = (struct fc_frame_header *)(els + 1);
349
350		if (!fh)
351			return 0;
352
353		/*
354		 * ELS command code, reason and explanation should be = Reject,
355		 * unsupported command and insufficient resource
356		 */
357		els_op = *(u8 *)(fh + 1);
358		if (els_op == ELS_LS_RJT) {
359			shost_printk(KERN_INFO, lport->host,
360				  "Flogi Request Rejected by Switch\n");
361			return 1;
362		}
363		shost_printk(KERN_INFO, lport->host,
364				"Flogi Request Accepted by Switch\n");
365	}
366	return 0;
367}
368
369static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
370{
371	struct fcoe_ctlr *fip = &fnic->ctlr;
372	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
373	struct sk_buff *skb;
374	char *eth_fr;
375	struct fip_vlan *vlan;
376	u64 vlan_tov;
377
378	fnic_fcoe_reset_vlans(fnic);
379	fnic->set_vlan(fnic, 0);
380
381	if (printk_ratelimit())
382		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
383			  "Sending VLAN request...\n");
384
385	skb = dev_alloc_skb(sizeof(struct fip_vlan));
386	if (!skb)
387		return;
388
389	eth_fr = (char *)skb->data;
390	vlan = (struct fip_vlan *)eth_fr;
391
392	memset(vlan, 0, sizeof(*vlan));
393	memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN);
394	memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN);
395	vlan->eth.h_proto = htons(ETH_P_FIP);
396
397	vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER);
398	vlan->fip.fip_op = htons(FIP_OP_VLAN);
399	vlan->fip.fip_subcode = FIP_SC_VL_REQ;
400	vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW);
401
402	vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC;
403	vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW;
404	memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN);
405
406	vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME;
407	vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW;
408	put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn);
409	atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs);
410
411	skb_put(skb, sizeof(*vlan));
412	skb->protocol = htons(ETH_P_FIP);
413	skb_reset_mac_header(skb);
414	skb_reset_network_header(skb);
415	fip->send(fip, skb);
416
417	/* set a timer so that we can retry if there no response */
418	vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV);
419	mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
420}
421
422static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
423{
424	struct fcoe_ctlr *fip = &fnic->ctlr;
425	struct fip_header *fiph;
426	struct fip_desc *desc;
427	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
428	u16 vid;
429	size_t rlen;
430	size_t dlen;
431	struct fcoe_vlan *vlan;
432	u64 sol_time;
433	unsigned long flags;
434
435	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
436		  "Received VLAN response...\n");
437
438	fiph = (struct fip_header *) skb->data;
439
440	FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
441		  "Received VLAN response... OP 0x%x SUB_OP 0x%x\n",
442		  ntohs(fiph->fip_op), fiph->fip_subcode);
443
444	rlen = ntohs(fiph->fip_dl_len) * 4;
445	fnic_fcoe_reset_vlans(fnic);
446	spin_lock_irqsave(&fnic->vlans_lock, flags);
447	desc = (struct fip_desc *)(fiph + 1);
448	while (rlen > 0) {
449		dlen = desc->fip_dlen * FIP_BPW;
450		switch (desc->fip_dtype) {
451		case FIP_DT_VLAN:
452			vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan);
453			shost_printk(KERN_INFO, fnic->lport->host,
454				  "process_vlan_resp: FIP VLAN %d\n", vid);
455			vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
456			if (!vlan) {
457				/* retry from timer */
458				spin_unlock_irqrestore(&fnic->vlans_lock,
459							flags);
460				goto out;
461			}
462			vlan->vid = vid & 0x0fff;
463			vlan->state = FIP_VLAN_AVAIL;
464			list_add_tail(&vlan->list, &fnic->vlans);
465			break;
466		}
467		desc = (struct fip_desc *)((char *)desc + dlen);
468		rlen -= dlen;
469	}
470
471	/* any VLAN descriptors present ? */
472	if (list_empty(&fnic->vlans)) {
473		/* retry from timer */
474		atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID);
475		FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
476			  "No VLAN descriptors in FIP VLAN response\n");
477		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
478		goto out;
479	}
480
481	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
482	fnic->set_vlan(fnic, vlan->vid);
483	vlan->state = FIP_VLAN_SENT; /* sent now */
484	vlan->sol_count++;
485	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
486
487	/* start the solicitation */
488	fcoe_ctlr_link_up(fip);
489
490	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
491	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
492out:
493	return;
494}
495
496static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
497{
498	unsigned long flags;
499	struct fcoe_vlan *vlan;
500	u64 sol_time;
501
502	spin_lock_irqsave(&fnic->vlans_lock, flags);
503	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
504	fnic->set_vlan(fnic, vlan->vid);
505	vlan->state = FIP_VLAN_SENT; /* sent now */
506	vlan->sol_count = 1;
507	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
508
509	/* start the solicitation */
510	fcoe_ctlr_link_up(&fnic->ctlr);
511
512	sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY);
513	mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
514}
515
516static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
517{
518	unsigned long flags;
519	struct fcoe_vlan *fvlan;
520
521	spin_lock_irqsave(&fnic->vlans_lock, flags);
522	if (list_empty(&fnic->vlans)) {
523		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
524		return -EINVAL;
525	}
526
527	fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
528	if (fvlan->state == FIP_VLAN_USED) {
529		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
530		return 0;
531	}
532
533	if (fvlan->state == FIP_VLAN_SENT) {
534		fvlan->state = FIP_VLAN_USED;
535		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
536		return 0;
537	}
538	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
539	return -EINVAL;
540}
541
542static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
543{
544	struct fnic_event *fevt;
545	unsigned long flags;
546
547	fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC);
548	if (!fevt)
549		return;
550
551	fevt->fnic = fnic;
552	fevt->event = ev;
553
554	spin_lock_irqsave(&fnic->fnic_lock, flags);
555	list_add_tail(&fevt->list, &fnic->evlist);
556	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
557
558	schedule_work(&fnic->event_work);
559}
560
561static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
562{
563	struct fip_header *fiph;
564	int ret = 1;
565	u16 op;
566	u8 sub;
567
568	if (!skb || !(skb->data))
569		return -1;
570
571	if (skb_linearize(skb))
572		goto drop;
573
574	fiph = (struct fip_header *)skb->data;
575	op = ntohs(fiph->fip_op);
576	sub = fiph->fip_subcode;
577
578	if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER)
579		goto drop;
580
581	if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len)
582		goto drop;
583
584	if (op == FIP_OP_DISC && sub == FIP_SC_ADV) {
585		if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
586			goto drop;
587		/* pass it on to fcoe */
588		ret = 1;
589	} else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) {
590		/* set the vlan as used */
591		fnic_fcoe_process_vlan_resp(fnic, skb);
592		ret = 0;
593	} else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) {
594		/* received CVL request, restart vlan disc */
595		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
596		/* pass it on to fcoe */
597		ret = 1;
598	}
599drop:
600	return ret;
601}
602
603void fnic_handle_fip_frame(struct work_struct *work)
604{
605	struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
606	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
607	unsigned long flags;
608	struct sk_buff *skb;
609	struct ethhdr *eh;
610
611	while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
612		spin_lock_irqsave(&fnic->fnic_lock, flags);
613		if (fnic->stop_rx_link_events) {
614			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
615			dev_kfree_skb(skb);
616			return;
617		}
618		/*
619		 * If we're in a transitional state, just re-queue and return.
620		 * The queue will be serviced when we get to a stable state.
621		 */
622		if (fnic->state != FNIC_IN_FC_MODE &&
623		    fnic->state != FNIC_IN_ETH_MODE) {
624			skb_queue_head(&fnic->fip_frame_queue, skb);
625			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
626			return;
627		}
628		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
629		eh = (struct ethhdr *)skb->data;
630		if (eh->h_proto == htons(ETH_P_FIP)) {
631			skb_pull(skb, sizeof(*eh));
632			if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
633				dev_kfree_skb(skb);
634				continue;
635			}
636			/*
637			 * If there's FLOGI rejects - clear all
638			 * fcf's & restart from scratch
639			 */
640			if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
641				atomic64_inc(
642					&fnic_stats->vlan_stats.flogi_rejects);
643				shost_printk(KERN_INFO, fnic->lport->host,
644					  "Trigger a Link down - VLAN Disc\n");
645				fcoe_ctlr_link_down(&fnic->ctlr);
646				/* start FCoE VLAN discovery */
647				fnic_fcoe_send_vlan_req(fnic);
648				dev_kfree_skb(skb);
649				continue;
650			}
651			fcoe_ctlr_recv(&fnic->ctlr, skb);
652			continue;
653		}
654	}
655}
656
657/**
658 * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame.
659 * @fnic:	fnic instance.
660 * @skb:	Ethernet Frame.
661 */
662static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
663{
664	struct fc_frame *fp;
665	struct ethhdr *eh;
666	struct fcoe_hdr *fcoe_hdr;
667	struct fcoe_crc_eof *ft;
668
669	/*
670	 * Undo VLAN encapsulation if present.
671	 */
672	eh = (struct ethhdr *)skb->data;
673	if (eh->h_proto == htons(ETH_P_8021Q)) {
674		memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2);
675		eh = skb_pull(skb, VLAN_HLEN);
676		skb_reset_mac_header(skb);
677	}
678	if (eh->h_proto == htons(ETH_P_FIP)) {
679		if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
680			printk(KERN_ERR "Dropped FIP frame, as firmware "
681					"uses non-FIP mode, Enable FIP "
682					"using UCSM\n");
683			goto drop;
684		}
685		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
686			FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
687			printk(KERN_ERR "fnic ctlr frame trace error!!!");
688		}
689		skb_queue_tail(&fnic->fip_frame_queue, skb);
690		queue_work(fnic_fip_queue, &fnic->fip_frame_work);
691		return 1;		/* let caller know packet was used */
692	}
693	if (eh->h_proto != htons(ETH_P_FCOE))
694		goto drop;
695	skb_set_network_header(skb, sizeof(*eh));
696	skb_pull(skb, sizeof(*eh));
697
698	fcoe_hdr = (struct fcoe_hdr *)skb->data;
699	if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER)
700		goto drop;
701
702	fp = (struct fc_frame *)skb;
703	fc_frame_init(fp);
704	fr_sof(fp) = fcoe_hdr->fcoe_sof;
705	skb_pull(skb, sizeof(struct fcoe_hdr));
706	skb_reset_transport_header(skb);
707
708	ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft));
709	fr_eof(fp) = ft->fcoe_eof;
710	skb_trim(skb, skb->len - sizeof(*ft));
711	return 0;
712drop:
713	dev_kfree_skb_irq(skb);
714	return -1;
715}
716
717/**
718 * fnic_update_mac_locked() - set data MAC address and filters.
719 * @fnic:	fnic instance.
720 * @new:	newly-assigned FCoE MAC address.
721 *
722 * Called with the fnic lock held.
723 */
724void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
725{
726	u8 *ctl = fnic->ctlr.ctl_src_addr;
727	u8 *data = fnic->data_src_addr;
728
729	if (is_zero_ether_addr(new))
730		new = ctl;
731	if (ether_addr_equal(data, new))
732		return;
733	FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
734	if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
735		vnic_dev_del_addr(fnic->vdev, data);
736	memcpy(data, new, ETH_ALEN);
737	if (!ether_addr_equal(new, ctl))
738		vnic_dev_add_addr(fnic->vdev, new);
739}
740
741/**
742 * fnic_update_mac() - set data MAC address and filters.
743 * @lport:	local port.
744 * @new:	newly-assigned FCoE MAC address.
745 */
746void fnic_update_mac(struct fc_lport *lport, u8 *new)
747{
748	struct fnic *fnic = lport_priv(lport);
749
750	spin_lock_irq(&fnic->fnic_lock);
751	fnic_update_mac_locked(fnic, new);
752	spin_unlock_irq(&fnic->fnic_lock);
753}
754
755/**
756 * fnic_set_port_id() - set the port_ID after successful FLOGI.
757 * @lport:	local port.
758 * @port_id:	assigned FC_ID.
759 * @fp:		received frame containing the FLOGI accept or NULL.
760 *
761 * This is called from libfc when a new FC_ID has been assigned.
762 * This causes us to reset the firmware to FC_MODE and setup the new MAC
763 * address and FC_ID.
764 *
765 * It is also called with FC_ID 0 when we're logged off.
766 *
767 * If the FC_ID is due to point-to-point, fp may be NULL.
768 */
769void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp)
770{
771	struct fnic *fnic = lport_priv(lport);
772	u8 *mac;
773	int ret;
774
775	FNIC_FCS_DBG(KERN_DEBUG, lport->host, "set port_id %x fp %p\n",
776		     port_id, fp);
777
778	/*
779	 * If we're clearing the FC_ID, change to use the ctl_src_addr.
780	 * Set ethernet mode to send FLOGI.
781	 */
782	if (!port_id) {
783		fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
784		fnic_set_eth_mode(fnic);
785		return;
786	}
787
788	if (fp) {
789		mac = fr_cb(fp)->granted_mac;
790		if (is_zero_ether_addr(mac)) {
791			/* non-FIP - FLOGI already accepted - ignore return */
792			fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
793		}
794		fnic_update_mac(lport, mac);
795	}
796
797	/* Change state to reflect transition to FC mode */
798	spin_lock_irq(&fnic->fnic_lock);
799	if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
800		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
801	else {
802		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
803			     "Unexpected fnic state %s while"
804			     " processing flogi resp\n",
805			     fnic_state_to_str(fnic->state));
806		spin_unlock_irq(&fnic->fnic_lock);
807		return;
808	}
809	spin_unlock_irq(&fnic->fnic_lock);
810
811	/*
812	 * Send FLOGI registration to firmware to set up FC mode.
813	 * The new address will be set up when registration completes.
814	 */
815	ret = fnic_flogi_reg_handler(fnic, port_id);
816
817	if (ret < 0) {
818		spin_lock_irq(&fnic->fnic_lock);
819		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
820			fnic->state = FNIC_IN_ETH_MODE;
821		spin_unlock_irq(&fnic->fnic_lock);
822	}
823}
824
825static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
826				    *cq_desc, struct vnic_rq_buf *buf,
827				    int skipped __attribute__((unused)),
828				    void *opaque)
829{
830	struct fnic *fnic = vnic_dev_priv(rq->vdev);
831	struct sk_buff *skb;
832	struct fc_frame *fp;
833	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
834	u8 type, color, eop, sop, ingress_port, vlan_stripped;
835	u8 fcoe = 0, fcoe_sof, fcoe_eof;
836	u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
837	u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
838	u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc;
839	u8 fcs_ok = 1, packet_error = 0;
840	u16 q_number, completed_index, bytes_written = 0, vlan, checksum;
841	u32 rss_hash;
842	u16 exchange_id, tmpl;
843	u8 sof = 0;
844	u8 eof = 0;
845	u32 fcp_bytes_written = 0;
846	unsigned long flags;
847
848	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
849			 DMA_FROM_DEVICE);
850	skb = buf->os_buf;
851	fp = (struct fc_frame *)skb;
852	buf->os_buf = NULL;
853
854	cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index);
855	if (type == CQ_DESC_TYPE_RQ_FCP) {
856		cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc,
857				   &type, &color, &q_number, &completed_index,
858				   &eop, &sop, &fcoe_fc_crc_ok, &exchange_id,
859				   &tmpl, &fcp_bytes_written, &sof, &eof,
860				   &ingress_port, &packet_error,
861				   &fcoe_enc_error, &fcs_ok, &vlan_stripped,
862				   &vlan);
863		skb_trim(skb, fcp_bytes_written);
864		fr_sof(fp) = sof;
865		fr_eof(fp) = eof;
866
867	} else if (type == CQ_DESC_TYPE_RQ_ENET) {
868		cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
869				    &type, &color, &q_number, &completed_index,
870				    &ingress_port, &fcoe, &eop, &sop,
871				    &rss_type, &csum_not_calc, &rss_hash,
872				    &bytes_written, &packet_error,
873				    &vlan_stripped, &vlan, &checksum,
874				    &fcoe_sof, &fcoe_fc_crc_ok,
875				    &fcoe_enc_error, &fcoe_eof,
876				    &tcp_udp_csum_ok, &udp, &tcp,
877				    &ipv4_csum_ok, &ipv6, &ipv4,
878				    &ipv4_fragment, &fcs_ok);
879		skb_trim(skb, bytes_written);
880		if (!fcs_ok) {
881			atomic64_inc(&fnic_stats->misc_stats.frame_errors);
882			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
883				     "fcs error.  dropping packet.\n");
884			goto drop;
885		}
886		if (fnic_import_rq_eth_pkt(fnic, skb))
887			return;
888
889	} else {
890		/* wrong CQ type*/
891		shost_printk(KERN_ERR, fnic->lport->host,
892			     "fnic rq_cmpl wrong cq type x%x\n", type);
893		goto drop;
894	}
895
896	if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) {
897		atomic64_inc(&fnic_stats->misc_stats.frame_errors);
898		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
899			     "fnic rq_cmpl fcoe x%x fcsok x%x"
900			     " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err"
901			     " x%x\n",
902			     fcoe, fcs_ok, packet_error,
903			     fcoe_fc_crc_ok, fcoe_enc_error);
904		goto drop;
905	}
906
907	spin_lock_irqsave(&fnic->fnic_lock, flags);
908	if (fnic->stop_rx_link_events) {
909		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
910		goto drop;
911	}
912	fr_dev(fp) = fnic->lport;
913	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
914	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
915					(char *)skb->data, skb->len)) != 0) {
916		printk(KERN_ERR "fnic ctlr frame trace error!!!");
917	}
918
919	skb_queue_tail(&fnic->frame_queue, skb);
920	queue_work(fnic_event_queue, &fnic->frame_work);
921
922	return;
923drop:
924	dev_kfree_skb_irq(skb);
925}
926
927static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev,
928				     struct cq_desc *cq_desc, u8 type,
929				     u16 q_number, u16 completed_index,
930				     void *opaque)
931{
932	struct fnic *fnic = vnic_dev_priv(vdev);
933
934	vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
935			VNIC_RQ_RETURN_DESC, fnic_rq_cmpl_frame_recv,
936			NULL);
937	return 0;
938}
939
940int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
941{
942	unsigned int tot_rq_work_done = 0, cur_work_done;
943	unsigned int i;
944	int err;
945
946	for (i = 0; i < fnic->rq_count; i++) {
947		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
948						fnic_rq_cmpl_handler_cont,
949						NULL);
950		if (cur_work_done) {
951			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
952			if (err)
953				shost_printk(KERN_ERR, fnic->lport->host,
954					     "fnic_alloc_rq_frame can't alloc"
955					     " frame\n");
956		}
957		tot_rq_work_done += cur_work_done;
958	}
959
960	return tot_rq_work_done;
961}
962
963/*
964 * This function is called once at init time to allocate and fill RQ
965 * buffers. Subsequently, it is called in the interrupt context after RQ
966 * buffer processing to replenish the buffers in the RQ
967 */
968int fnic_alloc_rq_frame(struct vnic_rq *rq)
969{
970	struct fnic *fnic = vnic_dev_priv(rq->vdev);
971	struct sk_buff *skb;
972	u16 len;
973	dma_addr_t pa;
974	int r;
975
976	len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM;
977	skb = dev_alloc_skb(len);
978	if (!skb) {
979		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
980			     "Unable to allocate RQ sk_buff\n");
981		return -ENOMEM;
982	}
983	skb_reset_mac_header(skb);
984	skb_reset_transport_header(skb);
985	skb_reset_network_header(skb);
986	skb_put(skb, len);
987	pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
988	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
989		r = -ENOMEM;
990		printk(KERN_ERR "PCI mapping failed with error %d\n", r);
991		goto free_skb;
992	}
993
994	fnic_queue_rq_desc(rq, skb, pa, len);
995	return 0;
996
997free_skb:
998	kfree_skb(skb);
999	return r;
1000}
1001
1002void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
1003{
1004	struct fc_frame *fp = buf->os_buf;
1005	struct fnic *fnic = vnic_dev_priv(rq->vdev);
1006
1007	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1008			 DMA_FROM_DEVICE);
1009
1010	dev_kfree_skb(fp_skb(fp));
1011	buf->os_buf = NULL;
1012}
1013
1014/**
1015 * fnic_eth_send() - Send Ethernet frame.
1016 * @fip:	fcoe_ctlr instance.
1017 * @skb:	Ethernet Frame, FIP, without VLAN encapsulation.
1018 */
1019void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
1020{
1021	struct fnic *fnic = fnic_from_ctlr(fip);
1022	struct vnic_wq *wq = &fnic->wq[0];
1023	dma_addr_t pa;
1024	struct ethhdr *eth_hdr;
1025	struct vlan_ethhdr *vlan_hdr;
1026	unsigned long flags;
1027
1028	if (!fnic->vlan_hw_insert) {
1029		eth_hdr = (struct ethhdr *)skb_mac_header(skb);
1030		vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr));
1031		memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN);
1032		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1033		vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
1034		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1035		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1036			FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
1037			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1038		}
1039	} else {
1040		if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1041			FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
1042			printk(KERN_ERR "fnic ctlr frame trace error!!!");
1043		}
1044	}
1045
1046	pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1047			DMA_TO_DEVICE);
1048	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1049		printk(KERN_ERR "DMA mapping failed\n");
1050		goto free_skb;
1051	}
1052
1053	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1054	if (!vnic_wq_desc_avail(wq))
1055		goto irq_restore;
1056
1057	fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
1058			       0 /* hw inserts cos value */,
1059			       fnic->vlan_id, 1);
1060	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1061	return;
1062
1063irq_restore:
1064	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1065	dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1066free_skb:
1067	kfree_skb(skb);
1068}
1069
1070/*
1071 * Send FC frame.
1072 */
1073static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1074{
1075	struct vnic_wq *wq = &fnic->wq[0];
1076	struct sk_buff *skb;
1077	dma_addr_t pa;
1078	struct ethhdr *eth_hdr;
1079	struct vlan_ethhdr *vlan_hdr;
1080	struct fcoe_hdr *fcoe_hdr;
1081	struct fc_frame_header *fh;
1082	u32 tot_len, eth_hdr_len;
1083	int ret = 0;
1084	unsigned long flags;
1085
1086	fh = fc_frame_header_get(fp);
1087	skb = fp_skb(fp);
1088
1089	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
1090	    fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1091		return 0;
1092
1093	if (!fnic->vlan_hw_insert) {
1094		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
1095		vlan_hdr = skb_push(skb, eth_hdr_len);
1096		eth_hdr = (struct ethhdr *)vlan_hdr;
1097		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
1098		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
1099		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1100		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
1101	} else {
1102		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
1103		eth_hdr = skb_push(skb, eth_hdr_len);
1104		eth_hdr->h_proto = htons(ETH_P_FCOE);
1105		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
1106	}
1107
1108	if (fnic->ctlr.map_dest)
1109		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
1110	else
1111		memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1112	memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1113
1114	tot_len = skb->len;
1115	BUG_ON(tot_len % 4);
1116
1117	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
1118	fcoe_hdr->fcoe_sof = fr_sof(fp);
1119	if (FC_FCOE_VER)
1120		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
1121
1122	pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1123	if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1124		ret = -ENOMEM;
1125		printk(KERN_ERR "DMA map failed with error %d\n", ret);
1126		goto free_skb_on_err;
1127	}
1128
1129	if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1130				(char *)eth_hdr, tot_len)) != 0) {
1131		printk(KERN_ERR "fnic ctlr frame trace error!!!");
1132	}
1133
1134	spin_lock_irqsave(&fnic->wq_lock[0], flags);
1135
1136	if (!vnic_wq_desc_avail(wq)) {
1137		dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1138		ret = -1;
1139		goto irq_restore;
1140	}
1141
1142	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
1143			   0 /* hw inserts cos value */,
1144			   fnic->vlan_id, 1, 1, 1);
1145
1146irq_restore:
1147	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1148
1149free_skb_on_err:
1150	if (ret)
1151		dev_kfree_skb_any(fp_skb(fp));
1152
1153	return ret;
1154}
1155
1156/*
1157 * fnic_send
1158 * Routine to send a raw frame
1159 */
1160int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
1161{
1162	struct fnic *fnic = lport_priv(lp);
1163	unsigned long flags;
1164
1165	if (fnic->in_remove) {
1166		dev_kfree_skb(fp_skb(fp));
1167		return -1;
1168	}
1169
1170	/*
1171	 * Queue frame if in a transitional state.
1172	 * This occurs while registering the Port_ID / MAC address after FLOGI.
1173	 */
1174	spin_lock_irqsave(&fnic->fnic_lock, flags);
1175	if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1176		skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1177		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1178		return 0;
1179	}
1180	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1181
1182	return fnic_send_frame(fnic, fp);
1183}
1184
1185/**
1186 * fnic_flush_tx() - send queued frames.
1187 * @fnic: fnic device
1188 *
1189 * Send frames that were waiting to go out in FC or Ethernet mode.
1190 * Whenever changing modes we purge queued frames, so these frames should
1191 * be queued for the stable mode that we're in, either FC or Ethernet.
1192 *
1193 * Called without fnic_lock held.
1194 */
1195void fnic_flush_tx(struct fnic *fnic)
1196{
1197	struct sk_buff *skb;
1198	struct fc_frame *fp;
1199
1200	while ((skb = skb_dequeue(&fnic->tx_queue))) {
1201		fp = (struct fc_frame *)skb;
1202		fnic_send_frame(fnic, fp);
1203	}
1204}
1205
1206/**
1207 * fnic_set_eth_mode() - put fnic into ethernet mode.
1208 * @fnic: fnic device
1209 *
1210 * Called without fnic lock held.
1211 */
1212static void fnic_set_eth_mode(struct fnic *fnic)
1213{
1214	unsigned long flags;
1215	enum fnic_state old_state;
1216	int ret;
1217
1218	spin_lock_irqsave(&fnic->fnic_lock, flags);
1219again:
1220	old_state = fnic->state;
1221	switch (old_state) {
1222	case FNIC_IN_FC_MODE:
1223	case FNIC_IN_ETH_TRANS_FC_MODE:
1224	default:
1225		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1226		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1227
1228		ret = fnic_fw_reset_handler(fnic);
1229
1230		spin_lock_irqsave(&fnic->fnic_lock, flags);
1231		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1232			goto again;
1233		if (ret)
1234			fnic->state = old_state;
1235		break;
1236
1237	case FNIC_IN_FC_TRANS_ETH_MODE:
1238	case FNIC_IN_ETH_MODE:
1239		break;
1240	}
1241	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1242}
1243
1244static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
1245					struct cq_desc *cq_desc,
1246					struct vnic_wq_buf *buf, void *opaque)
1247{
1248	struct sk_buff *skb = buf->os_buf;
1249	struct fc_frame *fp = (struct fc_frame *)skb;
1250	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1251
1252	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1253			 DMA_TO_DEVICE);
1254	dev_kfree_skb_irq(fp_skb(fp));
1255	buf->os_buf = NULL;
1256}
1257
1258static int fnic_wq_cmpl_handler_cont(struct vnic_dev *vdev,
1259				     struct cq_desc *cq_desc, u8 type,
1260				     u16 q_number, u16 completed_index,
1261				     void *opaque)
1262{
1263	struct fnic *fnic = vnic_dev_priv(vdev);
1264	unsigned long flags;
1265
1266	spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1267	vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1268			fnic_wq_complete_frame_send, NULL);
1269	spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1270
1271	return 0;
1272}
1273
1274int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1275{
1276	unsigned int wq_work_done = 0;
1277	unsigned int i;
1278
1279	for (i = 0; i < fnic->raw_wq_count; i++) {
1280		wq_work_done  += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1281						 work_to_do,
1282						 fnic_wq_cmpl_handler_cont,
1283						 NULL);
1284	}
1285
1286	return wq_work_done;
1287}
1288
1289
1290void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
1291{
1292	struct fc_frame *fp = buf->os_buf;
1293	struct fnic *fnic = vnic_dev_priv(wq->vdev);
1294
1295	dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1296			 DMA_TO_DEVICE);
1297
1298	dev_kfree_skb(fp_skb(fp));
1299	buf->os_buf = NULL;
1300}
1301
1302void fnic_fcoe_reset_vlans(struct fnic *fnic)
1303{
1304	unsigned long flags;
1305	struct fcoe_vlan *vlan;
1306	struct fcoe_vlan *next;
1307
1308	/*
1309	 * indicate a link down to fcoe so that all fcf's are free'd
1310	 * might not be required since we did this before sending vlan
1311	 * discovery request
1312	 */
1313	spin_lock_irqsave(&fnic->vlans_lock, flags);
1314	if (!list_empty(&fnic->vlans)) {
1315		list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1316			list_del(&vlan->list);
1317			kfree(vlan);
1318		}
1319	}
1320	spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1321}
1322
1323void fnic_handle_fip_timer(struct fnic *fnic)
1324{
1325	unsigned long flags;
1326	struct fcoe_vlan *vlan;
1327	struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1328	u64 sol_time;
1329
1330	spin_lock_irqsave(&fnic->fnic_lock, flags);
1331	if (fnic->stop_rx_link_events) {
1332		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1333		return;
1334	}
1335	spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1336
1337	if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1338		return;
1339
1340	spin_lock_irqsave(&fnic->vlans_lock, flags);
1341	if (list_empty(&fnic->vlans)) {
1342		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1343		/* no vlans available, try again */
1344		if (printk_ratelimit())
1345			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1346				  "Start VLAN Discovery\n");
1347		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1348		return;
1349	}
1350
1351	vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1352	shost_printk(KERN_DEBUG, fnic->lport->host,
1353		  "fip_timer: vlan %d state %d sol_count %d\n",
1354		  vlan->vid, vlan->state, vlan->sol_count);
1355	switch (vlan->state) {
1356	case FIP_VLAN_USED:
1357		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1358			  "FIP VLAN is selected for FC transaction\n");
1359		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1360		break;
1361	case FIP_VLAN_FAILED:
1362		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1363		/* if all vlans are in failed state, restart vlan disc */
1364		if (printk_ratelimit())
1365			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1366				  "Start VLAN Discovery\n");
1367		fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1368		break;
1369	case FIP_VLAN_SENT:
1370		if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) {
1371			/*
1372			 * no response on this vlan, remove  from the list.
1373			 * Try the next vlan
1374			 */
1375			shost_printk(KERN_INFO, fnic->lport->host,
1376				  "Dequeue this VLAN ID %d from list\n",
1377				  vlan->vid);
1378			list_del(&vlan->list);
1379			kfree(vlan);
1380			vlan = NULL;
1381			if (list_empty(&fnic->vlans)) {
1382				/* we exhausted all vlans, restart vlan disc */
1383				spin_unlock_irqrestore(&fnic->vlans_lock,
1384							flags);
1385				shost_printk(KERN_INFO, fnic->lport->host,
1386					  "fip_timer: vlan list empty, "
1387					  "trigger vlan disc\n");
1388				fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1389				return;
1390			}
1391			/* check the next vlan */
1392			vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1393							list);
1394			fnic->set_vlan(fnic, vlan->vid);
1395			vlan->state = FIP_VLAN_SENT; /* sent now */
1396		}
1397		spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1398		atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count);
1399		vlan->sol_count++;
1400		sol_time = jiffies + msecs_to_jiffies
1401					(FCOE_CTLR_START_DELAY);
1402		mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
1403		break;
1404	}
1405}
1406