Lines Matching refs:fnic

21 #include "fnic.h"
30 static void fnic_set_eth_mode(struct fnic *);
31 static void fnic_fcoe_send_vlan_req(struct fnic *fnic);
32 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic);
33 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *);
34 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag);
35 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb);
39 struct fnic *fnic = container_of(work, struct fnic, link_work);
45 spin_lock_irqsave(&fnic->fnic_lock, flags);
47 fnic->link_events = 1; /* less work to just set everytime*/
49 if (fnic->stop_rx_link_events) {
50 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
54 old_link_down_cnt = fnic->link_down_cnt;
55 old_link_status = fnic->link_status;
57 &fnic->fnic_stats.misc_stats.current_port_speed);
59 fnic->link_status = vnic_dev_link_status(fnic->vdev);
60 fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
62 new_port_speed = vnic_dev_port_speed(fnic->vdev);
63 atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed,
66 FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host,
70 switch (vnic_dev_port_speed(fnic->vdev)) {
72 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
73 fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
76 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT;
77 fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT;
80 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
81 fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
85 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
86 fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
89 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
90 fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
93 fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
94 fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
98 if (old_link_status == fnic->link_status) {
99 if (!fnic->link_status) {
101 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
102 fnic_fc_trace_set_data(fnic->lport->host->host_no,
106 if (old_link_down_cnt != fnic->link_down_cnt) {
108 fnic->lport->host_stats.link_failure_count++;
109 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
111 fnic->lport->host->host_no,
116 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
118 fcoe_ctlr_link_down(&fnic->ctlr);
119 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
122 fnic->lport->host->host_no,
128 fnic_fcoe_send_vlan_req(fnic);
131 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
133 fcoe_ctlr_link_up(&fnic->ctlr);
136 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
138 fnic->lport->host->host_no, FNIC_FC_LE,
143 } else if (fnic->link_status) {
145 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
146 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
149 fnic->lport->host->host_no,
152 fnic_fcoe_send_vlan_req(fnic);
155 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
156 fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
158 fcoe_ctlr_link_up(&fnic->ctlr);
161 fnic->lport->host_stats.link_failure_count++;
162 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
163 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
165 fnic->lport->host->host_no, FNIC_FC_LE,
168 if (fnic->config.flags & VFCF_FIP_CAPABLE) {
169 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
171 del_timer_sync(&fnic->fip_timer);
173 fcoe_ctlr_link_down(&fnic->ctlr);
183 struct fnic *fnic = container_of(work, struct fnic, frame_work);
184 struct fc_lport *lp = fnic->lport;
189 while ((skb = skb_dequeue(&fnic->frame_queue))) {
191 spin_lock_irqsave(&fnic->fnic_lock, flags);
192 if (fnic->stop_rx_link_events) {
193 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
203 if (fnic->state != FNIC_IN_FC_MODE &&
204 fnic->state != FNIC_IN_ETH_MODE) {
205 skb_queue_head(&fnic->frame_queue, skb);
206 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
209 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
215 void fnic_fcoe_evlist_free(struct fnic *fnic)
221 spin_lock_irqsave(&fnic->fnic_lock, flags);
222 if (list_empty(&fnic->evlist)) {
223 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
227 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
231 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
236 struct fnic *fnic = container_of(work, struct fnic, event_work);
241 spin_lock_irqsave(&fnic->fnic_lock, flags);
242 if (list_empty(&fnic->evlist)) {
243 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
247 list_for_each_entry_safe(fevt, next, &fnic->evlist, list) {
248 if (fnic->stop_rx_link_events) {
251 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
258 if (fnic->state != FNIC_IN_FC_MODE &&
259 fnic->state != FNIC_IN_ETH_MODE) {
260 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
267 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
268 fnic_fcoe_send_vlan_req(fnic);
269 spin_lock_irqsave(&fnic->fnic_lock, flags);
272 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
274 fnic_fcoe_start_fcf_disc(fnic);
277 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
283 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
359 static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
361 struct fcoe_ctlr *fip = &fnic->ctlr;
362 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
368 fnic_fcoe_reset_vlans(fnic);
369 fnic->set_vlan(fnic, 0);
372 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
409 mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov));
412 static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb)
414 struct fcoe_ctlr *fip = &fnic->ctlr;
417 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
425 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
430 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
435 fnic_fcoe_reset_vlans(fnic);
436 spin_lock_irqsave(&fnic->vlans_lock, flags);
443 shost_printk(KERN_INFO, fnic->lport->host,
448 spin_unlock_irqrestore(&fnic->vlans_lock,
454 list_add_tail(&vlan->list, &fnic->vlans);
462 if (list_empty(&fnic->vlans)) {
465 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
467 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
471 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
472 fnic->set_vlan(fnic, vlan->vid);
475 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
481 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
486 static void fnic_fcoe_start_fcf_disc(struct fnic *fnic)
492 spin_lock_irqsave(&fnic->vlans_lock, flags);
493 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
494 fnic->set_vlan(fnic, vlan->vid);
497 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
500 fcoe_ctlr_link_up(&fnic->ctlr);
503 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));
506 static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag)
511 spin_lock_irqsave(&fnic->vlans_lock, flags);
512 if (list_empty(&fnic->vlans)) {
513 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
517 fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
519 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
525 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
528 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
532 static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev)
541 fevt->fnic = fnic;
544 spin_lock_irqsave(&fnic->fnic_lock, flags);
545 list_add_tail(&fevt->list, &fnic->evlist);
546 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
548 schedule_work(&fnic->event_work);
551 static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb)
575 if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags)))
581 fnic_fcoe_process_vlan_resp(fnic, skb);
585 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
595 struct fnic *fnic = container_of(work, struct fnic, fip_frame_work);
596 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
601 while ((skb = skb_dequeue(&fnic->fip_frame_queue))) {
602 spin_lock_irqsave(&fnic->fnic_lock, flags);
603 if (fnic->stop_rx_link_events) {
604 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
612 if (fnic->state != FNIC_IN_FC_MODE &&
613 fnic->state != FNIC_IN_ETH_MODE) {
614 skb_queue_head(&fnic->fip_frame_queue, skb);
615 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
618 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
622 if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) {
630 if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) {
633 shost_printk(KERN_INFO, fnic->lport->host,
635 fcoe_ctlr_link_down(&fnic->ctlr);
637 fnic_fcoe_send_vlan_req(fnic);
641 fcoe_ctlr_recv(&fnic->ctlr, skb);
649 * @fnic: fnic instance.
652 static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
669 if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) {
675 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
677 printk(KERN_ERR "fnic ctlr frame trace error!!!");
679 skb_queue_tail(&fnic->fip_frame_queue, skb);
680 queue_work(fnic_fip_queue, &fnic->fip_frame_work);
709 * @fnic: fnic instance.
712 * Called with the fnic lock held.
714 void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
716 u8 *ctl = fnic->ctlr.ctl_src_addr;
717 u8 *data = fnic->data_src_addr;
723 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
725 vnic_dev_del_addr(fnic->vdev, data);
728 vnic_dev_add_addr(fnic->vdev, new);
738 struct fnic *fnic = lport_priv(lport);
740 spin_lock_irq(&fnic->fnic_lock);
741 fnic_update_mac_locked(fnic, new);
742 spin_unlock_irq(&fnic->fnic_lock);
761 struct fnic *fnic = lport_priv(lport);
773 fnic_update_mac(lport, fnic->ctlr.ctl_src_addr);
774 fnic_set_eth_mode(fnic);
782 fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp);
788 spin_lock_irq(&fnic->fnic_lock);
789 if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE)
790 fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;
792 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
793 "Unexpected fnic state %s while"
795 fnic_state_to_str(fnic->state));
796 spin_unlock_irq(&fnic->fnic_lock);
799 spin_unlock_irq(&fnic->fnic_lock);
805 ret = fnic_flogi_reg_handler(fnic, port_id);
808 spin_lock_irq(&fnic->fnic_lock);
809 if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
810 fnic->state = FNIC_IN_ETH_MODE;
811 spin_unlock_irq(&fnic->fnic_lock);
820 struct fnic *fnic = vnic_dev_priv(rq->vdev);
823 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
838 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
872 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
876 if (fnic_import_rq_eth_pkt(fnic, skb))
881 shost_printk(KERN_ERR, fnic->lport->host,
882 "fnic rq_cmpl wrong cq type x%x\n", type);
888 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
889 "fnic rq_cmpl fcoe x%x fcsok x%x"
897 spin_lock_irqsave(&fnic->fnic_lock, flags);
898 if (fnic->stop_rx_link_events) {
899 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
902 fr_dev(fp) = fnic->lport;
903 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
904 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
906 printk(KERN_ERR "fnic ctlr frame trace error!!!");
909 skb_queue_tail(&fnic->frame_queue, skb);
910 queue_work(fnic_event_queue, &fnic->frame_work);
922 struct fnic *fnic = vnic_dev_priv(vdev);
924 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index,
930 int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
936 for (i = 0; i < fnic->rq_count; i++) {
937 cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
941 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
943 shost_printk(KERN_ERR, fnic->lport->host,
960 struct fnic *fnic = vnic_dev_priv(rq->vdev);
969 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
977 pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
978 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
995 struct fnic *fnic = vnic_dev_priv(rq->vdev);
997 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1011 struct fnic *fnic = fnic_from_ctlr(fip);
1012 struct vnic_wq *wq = &fnic->wq[0];
1018 if (!fnic->vlan_hw_insert) {
1024 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1025 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1027 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1030 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
1032 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1036 pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
1038 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1043 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1049 fnic->vlan_id, 1);
1050 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1054 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1055 dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
1063 static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
1065 struct vnic_wq *wq = &fnic->wq[0];
1080 fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb))
1083 if (!fnic->vlan_hw_insert) {
1089 vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
1098 if (fnic->ctlr.map_dest)
1101 memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN);
1102 memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
1112 pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
1113 if (dma_mapping_error(&fnic->pdev->dev, pa)) {
1119 if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
1121 printk(KERN_ERR "fnic ctlr frame trace error!!!");
1124 spin_lock_irqsave(&fnic->wq_lock[0], flags);
1127 dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
1134 fnic->vlan_id, 1, 1, 1);
1137 spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
1152 struct fnic *fnic = lport_priv(lp);
1155 if (fnic->in_remove) {
1164 spin_lock_irqsave(&fnic->fnic_lock, flags);
1165 if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) {
1166 skb_queue_tail(&fnic->tx_queue, fp_skb(fp));
1167 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1170 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1172 return fnic_send_frame(fnic, fp);
1177 * @fnic: fnic device
1185 void fnic_flush_tx(struct fnic *fnic)
1190 while ((skb = skb_dequeue(&fnic->tx_queue))) {
1192 fnic_send_frame(fnic, fp);
1197 * fnic_set_eth_mode() - put fnic into ethernet mode.
1198 * @fnic: fnic device
1200 * Called without fnic lock held.
1202 static void fnic_set_eth_mode(struct fnic *fnic)
1208 spin_lock_irqsave(&fnic->fnic_lock, flags);
1210 old_state = fnic->state;
1215 fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
1216 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1218 ret = fnic_fw_reset_handler(fnic);
1220 spin_lock_irqsave(&fnic->fnic_lock, flags);
1221 if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
1224 fnic->state = old_state;
1231 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1240 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1242 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1253 struct fnic *fnic = vnic_dev_priv(vdev);
1256 spin_lock_irqsave(&fnic->wq_lock[q_number], flags);
1257 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index,
1259 spin_unlock_irqrestore(&fnic->wq_lock[q_number], flags);
1264 int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do)
1269 for (i = 0; i < fnic->raw_wq_count; i++) {
1270 wq_work_done += vnic_cq_service(&fnic->cq[fnic->rq_count+i],
1283 struct fnic *fnic = vnic_dev_priv(wq->vdev);
1285 dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
1292 void fnic_fcoe_reset_vlans(struct fnic *fnic)
1303 spin_lock_irqsave(&fnic->vlans_lock, flags);
1304 if (!list_empty(&fnic->vlans)) {
1305 list_for_each_entry_safe(vlan, next, &fnic->vlans, list) {
1310 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1313 void fnic_handle_fip_timer(struct fnic *fnic)
1317 struct fnic_stats *fnic_stats = &fnic->fnic_stats;
1320 spin_lock_irqsave(&fnic->fnic_lock, flags);
1321 if (fnic->stop_rx_link_events) {
1322 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1325 spin_unlock_irqrestore(&fnic->fnic_lock, flags);
1327 if (fnic->ctlr.mode == FIP_MODE_NON_FIP)
1330 spin_lock_irqsave(&fnic->vlans_lock, flags);
1331 if (list_empty(&fnic->vlans)) {
1332 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1336 shost_printk(KERN_DEBUG, fnic->lport->host,
1338 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1342 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list);
1343 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1348 FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
1350 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1353 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1357 shost_printk(KERN_DEBUG, fnic->lport->host,
1359 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1367 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1373 if (list_empty(&fnic->vlans)) {
1375 spin_unlock_irqrestore(&fnic->vlans_lock,
1377 FNIC_FCS_DBG(KERN_INFO, fnic->lport->host,
1380 fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC);
1384 vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan,
1386 fnic->set_vlan(fnic, vlan->vid);
1389 spin_unlock_irqrestore(&fnic->vlans_lock, flags);
1394 mod_timer(&fnic->fip_timer, round_jiffies(sol_time));