Lines Matching refs:qedf

23 #include "qedf.h"
72 "by the qedf driver. Default is 0 (use OS default).");
122 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
126 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
127 qedf->vlan_id = vlan_id_tmp;
128 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
130 vlan_id_tmp, qedf->prio);
134 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
137 while (qedf->fipvlan_retries--) {
139 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
140 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
144 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
145 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
149 if (qedf->vlan_id > 0) {
150 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
152 qedf->vlan_id);
153 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
154 fcoe_ctlr_link_up(&qedf->ctlr);
158 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
159 "Retry %d.\n", qedf->fipvlan_retries);
160 init_completion(&qedf->fipvlan_compl);
161 qedf_fcoe_send_vlan_req(qedf);
162 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
170 struct qedf_ctx *qedf =
174 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
175 atomic_read(&qedf->link_state));
177 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
178 rc = qedf_initiate_fipvlan_req(qedf);
182 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
183 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
185 qedf->vlan_id = 0;
194 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
197 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
203 eth_zero_addr(qedf->data_src_addr);
204 fcoe_ctlr_link_up(&qedf->ctlr);
205 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
211 atomic_set(&qedf->link_down_tmo_valid, 0);
212 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
214 fcoe_ctlr_link_down(&qedf->ctlr);
215 if (qedf_wait_for_upload(qedf) == false)
216 QEDF_ERR(&qedf->dbg_ctx,
219 qedf->fipvlan_retries = qedf_fipvlan_retries;
226 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
247 ether_addr_copy(qedf->data_src_addr, granted_mac);
249 } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
250 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
251 qedf->data_src_addr[0] = fc_map[0];
252 qedf->data_src_addr[1] = fc_map[1];
253 qedf->data_src_addr[2] = fc_map[2];
254 qedf->data_src_addr[3] = fh->fh_d_id[0];
255 qedf->data_src_addr[4] = fh->fh_d_id[1];
256 qedf->data_src_addr[5] = fh->fh_d_id[2];
259 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
263 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
264 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
272 struct qedf_ctx *qedf = lport_priv(lport);
274 if (!qedf) {
275 QEDF_ERR(NULL, "qedf is NULL.\n");
284 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
291 qedf->flogi_failed++;
294 qedf_set_data_src_addr(qedf, fp);
295 qedf->flogi_pending = 0;
299 complete(&qedf->flogi_compl);
313 struct qedf_ctx *qedf = lport_priv(lport);
320 qedf->flogi_cnt++;
321 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
322 schedule_delayed_work(&qedf->stag_work, 2);
325 qedf->flogi_pending++;
333 int qedf_send_flogi(struct qedf_ctx *qedf)
338 lport = qedf->lport;
341 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
347 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
351 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
356 init_completion(&qedf->flogi_compl);
368 struct qedf_ctx *qedf =
370 struct fc_lport *lport = qedf->lport;
379 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
386 qedf->ctlr.state = FIP_ST_LINK_WAIT;
387 fcoe_ctlr_link_down(&qedf->ctlr);
393 fcoe_ctlr_link_up(&qedf->ctlr);
396 qedf->fipvlan_retries = qedf_fipvlan_retries;
397 rc = qedf_initiate_fipvlan_req(qedf);
400 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
407 if (qedf->ctlr.sel_fcf) {
408 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
417 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
422 rval = qedf_send_flogi(qedf);
427 i = wait_for_completion_timeout(&qedf->flogi_compl,
428 qedf->lport->r_a_tov);
430 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
448 static void qedf_update_link_speed(struct qedf_ctx *qedf,
452 struct fc_lport *lport = qedf->lport;
547 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
551 qed_ops->common->get_link(qedf->cdev, &link);
553 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
554 QEDF_ERR(&qedf->dbg_ctx,
560 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
561 qedf_update_link_speed(qedf, &link);
563 QEDF_ERR(&qedf->dbg_ctx,
567 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
573 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
579 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
580 QEDF_ERR(&qedf->dbg_ctx,
586 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
587 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
591 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
595 cancel_delayed_work(&qedf->link_update);
597 atomic_set(&qedf->link_state, QEDF_LINK_UP);
598 qedf_update_link_speed(qedf, link);
600 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
602 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
604 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
605 queue_delayed_work(qedf->link_update_wq,
606 &qedf->link_recovery, 0);
608 queue_delayed_work(qedf->link_update_wq,
609 &qedf->link_update, 0);
610 atomic_set(&qedf->link_down_tmo_valid, 0);
614 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
616 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
617 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
623 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
625 atomic_set(&qedf->link_down_tmo_valid, 1);
627 qedf->vlan_id = 0;
628 qedf_update_link_speed(qedf, link);
629 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
637 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
640 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
646 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
647 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
652 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
663 qedf->prio = qedf_default_prio;
665 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
668 qedf->prio = QEDF_DEFAULT_PRIO;
670 qedf->prio = tmp_prio;
672 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
674 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
675 queue_delayed_work(qedf->link_update_wq,
676 &qedf->link_recovery, 0);
678 queue_delayed_work(qedf->link_update_wq,
679 &qedf->link_update, 0);
680 atomic_set(&qedf->link_down_tmo_valid, 0);
688 struct qedf_ctx *qedf;
690 qedf = (struct qedf_ctx *)cookie;
691 return qedf->flogi_failed;
720 struct qedf_ctx *qedf;
732 qedf = (struct qedf_ctx *)lport_priv(lport);
738 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
746 QEDF_ERR(&qedf->dbg_ctx,
760 QEDF_ERR(&qedf->dbg_ctx,
769 QEDF_ERR(&qedf->dbg_ctx,
782 QEDF_ERR(&qedf->dbg_ctx,
790 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
793 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
800 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
804 QEDF_ERR(&qedf->dbg_ctx,
809 if (qedf->stop_io_on_error) {
810 qedf_stop_all_io(qedf);
818 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
824 qedf_scsi_done(qedf, io_req, DID_ERROR);
845 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
848 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
875 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
881 if (atomic_read(&qedf->num_offloads))
882 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
884 atomic_read(&qedf->num_offloads));
891 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
895 QEDF_ERR(&qedf->dbg_ctx,
899 QEDF_ERR(&qedf->dbg_ctx,
911 struct qedf_ctx *qedf;
919 qedf = lport_priv(lport);
921 qedf->flogi_pending = 0;
923 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
924 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
926 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
929 if (qedf_wait_for_upload(qedf) == false) {
930 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
931 WARN_ON(atomic_read(&qedf->num_offloads));
935 qed_ops->common->get_link(qedf->cdev, &if_link);
938 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
943 flush_delayed_work(&qedf->link_update);
946 atomic_set(&qedf->link_state, QEDF_LINK_UP);
947 qedf->vlan_id = 0;
948 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
950 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
958 struct qedf_ctx *qedf;
961 qedf = lport_priv(lport);
963 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
964 test_bit(QEDF_UNLOADING, &qedf->flags))
967 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
1013 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1019 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1055 * qedf_xmit - qedf FCoE frame transmit function
1060 struct qedf_ctx *qedf;
1077 qedf = (struct qedf_ctx *)lport_priv(lport);
1105 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1117 if (!qedf->ctlr.sel_fcf) {
1122 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1123 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1128 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1129 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1135 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1140 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1197 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1202 if (qedf->ctlr.map_dest)
1206 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1209 ether_addr_copy(eh->h_source, qedf->data_src_addr);
1228 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1235 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1237 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1245 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1259 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1262 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1267 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1271 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1292 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1298 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1301 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1304 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1308 static int qedf_offload_connection(struct qedf_ctx *qedf,
1316 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1318 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1321 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1327 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1341 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1343 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1346 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1351 conn_info.vlan_tag = qedf->vlan_id <<
1354 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1359 port_id = fc_host_port_id(qedf->lport->host);
1377 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1387 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1389 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1393 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1401 qed_ops->release_conn(qedf->cdev, fcport->handle);
1407 static void qedf_upload_connection(struct qedf_ctx *qedf,
1417 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1420 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1423 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1424 qed_ops->release_conn(qedf->cdev, fcport->handle);
1426 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1430 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1435 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1442 qedf_upload_connection(qedf, fcport);
1443 qedf_free_sq(qedf, fcport);
1445 fcport->qedf = NULL;
1451 * initiated target login. qedf can proceed with initiating the session
1458 struct qedf_ctx *qedf = lport_priv(lport);
1466 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1472 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1478 fcport->qedf = qedf;
1480 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1481 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1492 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1505 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1512 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1517 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1527 rval = qedf_alloc_sq(qedf, fcport);
1529 qedf_cleanup_fcport(qedf, fcport);
1538 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1545 rval = qedf_offload_connection(qedf, fcport);
1547 qedf_cleanup_fcport(qedf, fcport);
1552 spin_lock_irqsave(&qedf->hba_lock, flags);
1553 list_add_rcu(&fcport->peers, &qedf->fcports);
1554 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1561 atomic_inc(&qedf->num_offloads);
1572 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1577 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1583 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1602 qedf_cleanup_fcport(qedf, fcport);
1607 spin_lock_irqsave(&qedf->hba_lock, flags);
1609 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1613 atomic_dec(&qedf->num_offloads);
1646 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1648 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1650 qedf->ctlr.send = qedf_fip_send;
1651 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1652 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1655 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1657 struct fc_lport *lport = qedf->lport;
1674 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1678 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1692 if (qedf->pdev->device == QL45xxx) {
1701 if (qedf->pdev->device == QL41xxx) {
1711 FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1723 static int qedf_lport_setup(struct qedf_ctx *qedf)
1725 struct fc_lport *lport = qedf->lport;
1740 fc_set_wwnn(lport, qedf->wwnn);
1741 fc_set_wwpn(lport, qedf->wwpn);
1743 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1744 QEDF_ERR(&qedf->dbg_ctx,
1767 if (qedf->pdev->device == QL45xxx)
1771 if (qedf->pdev->device == QL41xxx)
1775 qedf_setup_fdmi(qedf);
1855 /* Set qedf data specific to this vport */
1961 struct qedf_ctx *qedf = lport_priv(vn_port);
1963 if (!qedf) {
1964 QEDF_ERR(NULL, "qedf is NULL.\n");
1969 set_bit(QEDF_UNLOADING, &qedf->flags);
2020 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2022 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2024 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2027 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2060 struct qedf_ctx *qedf = lport_priv(lport);
2071 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2076 mutex_lock(&qedf->stats_mutex);
2079 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2105 qedf_stats->fcp_input_requests += qedf->input_requests;
2106 qedf_stats->fcp_output_requests += qedf->output_requests;
2107 qedf_stats->fcp_control_requests += qedf->control_requests;
2108 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2109 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2111 mutex_unlock(&qedf->stats_mutex);
2189 struct qedf_ctx *qedf = fp->qedf;
2196 que = qedf->global_queues[fp->sb_id];
2217 struct qedf_ctx *qedf = fp->qedf;
2235 que = qedf->global_queues[fp->sb_id];
2258 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2260 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2269 io_req = &qedf->cmd_mgr->cmds[xid];
2285 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2287 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2298 io_work->qedf = fp->qedf;
2360 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2362 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2366 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2372 if (qedf->int_info.msix_cnt) {
2373 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2374 vector_idx = i * qedf->dev_info.common.num_hwfns +
2375 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2376 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2379 vector = qedf->int_info.msix[vector_idx].vector;
2383 free_irq(vector, &qedf->fp_array[i]);
2386 qed_ops->common->simd_handler_clean(qedf->cdev,
2389 qedf->int_info.used_cnt = 0;
2390 qed_ops->common->set_fp_int(qedf->cdev, 0);
2393 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2400 for (i = 0; i < qedf->num_queues; i++) {
2401 vector_idx = i * qedf->dev_info.common.num_hwfns +
2402 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2403 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2406 vector = qedf->int_info.msix[vector_idx].vector;
2407 rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2408 &qedf->fp_array[i]);
2411 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2412 qedf_sync_free_irqs(qedf);
2416 qedf->int_info.used_cnt++;
2424 static int qedf_setup_int(struct qedf_ctx *qedf)
2431 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2435 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2439 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2440 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2443 if (qedf->int_info.msix_cnt)
2444 return qedf_request_msix_irq(qedf);
2446 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2448 qedf->int_info.used_cnt = 1;
2450 QEDF_ERR(&qedf->dbg_ctx,
2456 static void qedf_recv_frame(struct qedf_ctx *qedf,
2471 lport = qedf->lport;
2538 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2544 if (qedf->ctlr.state) {
2545 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2546 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2548 mac, qedf->ctlr.dest_addr);
2562 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2573 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2586 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2590 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2596 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2610 struct qedf_ctx *qedf = skb_work->qedf;
2614 if (!qedf) {
2615 QEDF_ERR(NULL, "qedf is NULL\n");
2634 qedf_fip_recv(qedf, skb);
2638 qedf_recv_frame(qedf, skb);
2653 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2656 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2657 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2665 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2673 skb_work->qedf = qedf;
2674 queue_work(qedf->ll2_recv_wq, &skb_work->work);
2700 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2702 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2707 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2714 sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2718 QEDF_ERR(&qedf->dbg_ctx,
2724 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2728 QEDF_ERR(&qedf->dbg_ctx,
2737 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2740 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2744 static void qedf_destroy_sb(struct qedf_ctx *qedf)
2749 for (id = 0; id < qedf->num_queues; id++) {
2750 fp = &(qedf->fp_array[id]);
2753 qedf_free_sb(qedf, fp->sb_info);
2756 kfree(qedf->fp_array);
2759 static int qedf_prepare_sb(struct qedf_ctx *qedf)
2765 qedf->fp_array =
2766 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2769 if (!qedf->fp_array) {
2770 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2775 for (id = 0; id < qedf->num_queues; id++) {
2776 fp = &(qedf->fp_array[id]);
2780 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2784 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2786 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2791 fp->qedf = qedf;
2793 qedf->global_queues[id]->cq_mem_size /
2800 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2813 io_req = &qedf->cmd_mgr->cmds[xid];
2817 QEDF_ERR(&qedf->dbg_ctx,
2825 QEDF_ERR(&qedf->dbg_ctx,
2836 QEDF_ERR(&qedf->dbg_ctx,
2850 qedf_scsi_completion(qedf, cqe, io_req);
2853 qedf_process_els_compl(qedf, cqe, io_req);
2856 qedf_process_tmf_compl(qedf, cqe, io_req);
2859 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2865 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2867 qedf_process_error_detect(qedf, cqe, io_req);
2871 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2873 qedf_process_cleanup_compl(qedf, cqe, io_req);
2877 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2879 qedf_process_abts_compl(qedf, cqe, io_req);
2883 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2888 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2893 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2895 qedf_process_warning_compl(qedf, cqe, io_req);
2899 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2903 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2909 static void qedf_free_bdq(struct qedf_ctx *qedf)
2913 if (qedf->bdq_pbl_list)
2914 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2915 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2917 if (qedf->bdq_pbl)
2918 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2919 qedf->bdq_pbl, qedf->bdq_pbl_dma);
2922 if (qedf->bdq[i].buf_addr) {
2923 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2924 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2929 static void qedf_free_global_queues(struct qedf_ctx *qedf)
2932 struct global_queue **gl = qedf->global_queues;
2934 for (i = 0; i < qedf->num_queues; i++) {
2939 dma_free_coherent(&qedf->pdev->dev,
2942 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2948 qedf_free_bdq(qedf);
2951 static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2960 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2961 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2962 if (!qedf->bdq[i].buf_addr) {
2963 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2970 qedf->bdq_pbl_mem_size =
2972 qedf->bdq_pbl_mem_size =
2973 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2975 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2976 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2977 if (!qedf->bdq_pbl) {
2978 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2982 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2984 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2990 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2992 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2993 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
3001 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
3003 &qedf->bdq_pbl_list_dma,
3005 if (!qedf->bdq_pbl_list) {
3006 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
3014 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
3016 list = (u64 *)qedf->bdq_pbl_list;
3017 page = qedf->bdq_pbl_list_dma;
3018 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
3019 *list = qedf->bdq_pbl_dma;
3027 static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3041 if (!qedf->num_queues) {
3042 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3050 if (!qedf->p_cpuq) {
3051 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3055 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3056 * qedf->num_queues), GFP_KERNEL);
3057 if (!qedf->global_queues) {
3058 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3062 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3063 "qedf->global_queues=%p.\n", qedf->global_queues);
3066 status = qedf_alloc_bdq(qedf);
3068 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3073 for (i = 0; i < qedf->num_queues; i++) {
3074 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3076 if (!qedf->global_queues[i]) {
3077 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3083 qedf->global_queues[i]->cq_mem_size =
3085 qedf->global_queues[i]->cq_mem_size =
3086 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3088 qedf->global_queues[i]->cq_pbl_size =
3089 (qedf->global_queues[i]->cq_mem_size /
3091 qedf->global_queues[i]->cq_pbl_size =
3092 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3094 qedf->global_queues[i]->cq =
3095 dma_alloc_coherent(&qedf->pdev->dev,
3096 qedf->global_queues[i]->cq_mem_size,
3097 &qedf->global_queues[i]->cq_dma,
3100 if (!qedf->global_queues[i]->cq) {
3101 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3106 qedf->global_queues[i]->cq_pbl =
3107 dma_alloc_coherent(&qedf->pdev->dev,
3108 qedf->global_queues[i]->cq_pbl_size,
3109 &qedf->global_queues[i]->cq_pbl_dma,
3112 if (!qedf->global_queues[i]->cq_pbl) {
3113 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3119 num_pages = qedf->global_queues[i]->cq_mem_size /
3121 page = qedf->global_queues[i]->cq_dma;
3122 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3132 qedf->global_queues[i]->cq_cons_idx = 0;
3135 list = (u32 *)qedf->p_cpuq;
3143 for (i = 0; i < qedf->num_queues; i++) {
3144 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3146 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3157 qedf_free_global_queues(qedf);
3161 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3176 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3178 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3179 qedf->num_queues);
3181 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3182 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3183 &qedf->hw_p_cpuq, GFP_KERNEL);
3185 if (!qedf->p_cpuq) {
3186 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3190 rval = qedf_alloc_global_queues(qedf);
3192 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3207 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3210 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3211 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3212 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3213 (u64)qedf->hw_p_cpuq;
3214 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3216 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3218 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3219 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3222 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3224 qedf->pf_params.fcoe_pf_params.mtu = 9000;
3225 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3226 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3229 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3230 qedf->bdq_pbl_list_dma;
3231 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3232 qedf->bdq_pbl_list_num_entries;
3233 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3235 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3237 qedf->bdq_pbl_list,
3238 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3239 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3241 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3243 qedf->pf_params.fcoe_pf_params.cq_num_entries);
3249 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3253 if (qedf->p_cpuq) {
3254 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3255 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3256 qedf->hw_p_cpuq);
3259 qedf_free_global_queues(qedf);
3261 kfree(qedf->global_queues);
3288 struct qedf_ctx *qedf = NULL;
3321 qedf = lport_priv(lport);
3322 set_bit(QEDF_PROBING, &qedf->flags);
3323 qedf->lport = lport;
3324 qedf->ctlr.lp = lport;
3325 qedf->pdev = pdev;
3326 qedf->dbg_ctx.pdev = pdev;
3327 qedf->dbg_ctx.host_no = lport->host->host_no;
3328 spin_lock_init(&qedf->hba_lock);
3329 INIT_LIST_HEAD(&qedf->fcports);
3330 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3331 atomic_set(&qedf->num_offloads, 0);
3332 qedf->stop_io_on_error = false;
3333 pci_set_drvdata(pdev, qedf);
3334 init_completion(&qedf->fipvlan_compl);
3335 mutex_init(&qedf->stats_mutex);
3336 mutex_init(&qedf->flush_mutex);
3337 qedf->flogi_pending = 0;
3339 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3340 "QLogic FastLinQ FCoE Module qedf %s, "
3346 qedf = pci_get_drvdata(pdev);
3347 set_bit(QEDF_PROBING, &qedf->flags);
3348 lport = qedf->lport;
3351 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3356 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3358 if (qedf->io_mempool == NULL) {
3359 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3362 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3363 qedf->io_mempool);
3366 qedf->lport->host->host_no);
3367 qedf->link_update_wq = create_workqueue(host_buf);
3368 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3369 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3370 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3371 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3372 qedf->fipvlan_retries = qedf_fipvlan_retries;
3379 qedf->prio = qedf_default_prio;
3381 qedf->prio = QEDF_DEFAULT_PRIO;
3392 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3393 if (!qedf->cdev) {
3395 QEDF_ERR(&qedf->dbg_ctx,
3400 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3405 /* Learn information crucial for qedf to progress */
3406 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3408 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3412 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3414 qedf->dev_info.common.num_hwfns,
3415 qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3425 rc = qedf_set_fcoe_pf_param(qedf);
3427 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3430 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3432 /* Learn information crucial for qedf to progress */
3433 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3435 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3440 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3441 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3442 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3443 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3444 qedf->bdq_secondary_prod);
3446 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3448 rc = qedf_prepare_sb(qedf);
3451 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3461 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
3462 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3464 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3472 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3475 rc = qedf_setup_int(qedf);
3477 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3481 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3483 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3486 task_start = qedf_get_task_mem(&qedf->tasks, 0);
3487 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3488 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3490 qedf->tasks.size);
3497 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3498 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3500 qedf->bdq_prod_idx);
3501 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3502 readw(qedf->bdq_primary_prod);
3503 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3504 readw(qedf->bdq_secondary_prod);
3506 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3511 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3512 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3513 qedf->mac);
3522 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3523 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3525 qedf->wwnn = qedf->dev_info.wwnn;
3526 qedf->wwpn = qedf->dev_info.wwpn;
3528 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3530 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3531 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3533 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
3534 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3537 qed_ops->common->set_name(qedf->cdev, host_buf);
3540 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3541 if (!qedf->cmd_mgr) {
3542 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3554 QEDF_WARN(&qedf->dbg_ctx,
3562 ether_addr_copy(params.ll2_mac_address, qedf->mac);
3566 qedf->ll2_recv_wq =
3568 if (!qedf->ll2_recv_wq) {
3569 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3575 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3580 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3581 rc = qed_ops->ll2->start(qedf->cdev, &params);
3583 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3586 set_bit(QEDF_LL2_STARTED, &qedf->flags);
3589 qedf->vlan_id = 0;
3597 qedf_fcoe_ctlr_setup(qedf);
3600 rc = qedf_lport_setup(qedf);
3602 QEDF_ERR(&(qedf->dbg_ctx),
3608 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3609 qedf->timer_work_queue =
3611 if (!qedf->timer_work_queue) {
3612 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3621 qedf->lport->host->host_no);
3622 qedf->dpc_wq = create_workqueue(host_buf);
3624 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3631 qedf->grcdump_size =
3632 qed_ops->common->dbg_all_data_size(qedf->cdev);
3633 if (qedf->grcdump_size) {
3634 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3635 qedf->grcdump_size);
3637 QEDF_ERR(&(qedf->dbg_ctx),
3639 qedf->grcdump = NULL;
3642 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3644 qedf->grcdump, qedf->grcdump_size);
3646 qedf_create_sysfs_ctx_attr(qedf);
3649 spin_lock_init(&qedf->io_trace_lock);
3650 qedf->io_trace_idx = 0;
3653 init_completion(&qedf->flogi_compl);
3655 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3657 QEDF_ERR(&(qedf->dbg_ctx),
3662 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3664 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3668 fcoe_ctlr_link_up(&qedf->ctlr);
3672 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3674 clear_bit(QEDF_PROBING, &qedf->flags);
3680 if (qedf->ll2_recv_wq)
3681 destroy_workqueue(qedf->ll2_recv_wq);
3682 fc_remove_host(qedf->lport->host);
3683 scsi_remove_host(qedf->lport->host);
3685 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3688 qedf_cmd_mgr_free(qedf->cmd_mgr);
3690 qed_ops->stop(qedf->cdev);
3692 qedf_free_fcoe_pf_param(qedf);
3693 qedf_sync_free_irqs(qedf);
3695 qed_ops->common->slowpath_stop(qedf->cdev);
3697 qed_ops->common->remove(qedf->cdev);
3711 struct qedf_ctx *qedf;
3719 qedf = pci_get_drvdata(pdev);
3725 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3726 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3731 set_bit(QEDF_UNLOADING, &qedf->flags);
3735 fcoe_ctlr_link_down(&qedf->ctlr);
3737 fc_fabric_logoff(qedf->lport);
3739 if (qedf_wait_for_upload(qedf) == false)
3740 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3743 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3747 cancel_delayed_work_sync(&qedf->link_update);
3748 destroy_workqueue(qedf->link_update_wq);
3749 qedf->link_update_wq = NULL;
3751 if (qedf->timer_work_queue)
3752 destroy_workqueue(qedf->timer_work_queue);
3755 clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3756 qed_ops->ll2->stop(qedf->cdev);
3757 if (qedf->ll2_recv_wq)
3758 destroy_workqueue(qedf->ll2_recv_wq);
3761 qedf_sync_free_irqs(qedf);
3762 qedf_destroy_sb(qedf);
3769 qedf_free_grc_dump_buf(&qedf->grcdump);
3770 qedf_remove_sysfs_ctx_attr(qedf);
3773 fcoe_ctlr_destroy(&qedf->ctlr);
3774 fc_lport_destroy(qedf->lport);
3775 fc_remove_host(qedf->lport->host);
3776 scsi_remove_host(qedf->lport->host);
3779 qedf_cmd_mgr_free(qedf->cmd_mgr);
3782 fc_exch_mgr_free(qedf->lport);
3783 fc_lport_free_stats(qedf->lport);
3786 qedf_wait_for_vport_destroy(qedf);
3793 qed_ops->stop(qedf->cdev);
3796 if (qedf->dpc_wq) {
3798 destroy_workqueue(qedf->dpc_wq);
3799 qedf->dpc_wq = NULL;
3804 qedf_free_fcoe_pf_param(qedf);
3806 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3810 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3812 QEDF_ERR(&(qedf->dbg_ctx),
3815 qed_ops->common->slowpath_stop(qedf->cdev);
3816 qed_ops->common->remove(qedf->cdev);
3818 mempool_destroy(qedf->io_mempool);
3822 scsi_host_put(qedf->lport->host);
3836 struct qedf_ctx *qedf =
3839 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3840 qedf_capture_grc_dump(qedf);
3845 struct qedf_ctx *qedf = dev;
3847 QEDF_ERR(&(qedf->dbg_ctx),
3851 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3852 QEDF_ERR(&(qedf->dbg_ctx),
3859 schedule_delayed_work(&qedf->board_disable_work, 0);
3866 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3870 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3873 qed_ops->common->recovery_process(qedf->cdev);
3886 struct qedf_ctx *qedf = dev;
3893 if (!qedf) {
3894 QEDF_ERR(NULL, "qedf is null.\n");
3898 if (test_bit(QEDF_PROBING, &qedf->flags)) {
3899 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3903 lport = qedf->lport;
3926 if (qedf->ctlr.sel_fcf) {
3928 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3932 /* For qedf we're either link down or fabric attach */
3965 fcoe->tx_lun_rst = qedf->lun_resets;
3968 fcoe->abort_task_sets = qedf->packet_aborts;
3971 fcoe->scsi_busy = qedf->busy;
3974 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3980 struct qedf_ctx *qedf =
3983 if (!qedf) {
3984 QEDF_ERR(NULL, "qedf is NULL");
3987 QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
3988 qedf_ctx_soft_reset(qedf->lport);
3998 struct qedf_ctx *qedf;
4005 qedf = pci_get_drvdata(pdev);
4007 QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
4017 struct qedf_ctx *qedf = dev;
4019 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4020 schedule_delayed_work(&qedf->recovery_work, 0);
4025 struct qedf_ctx *qedf =
4028 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4035 qed_ops->common->recovery_prolog(qedf->cdev);
4037 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4038 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4044 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4045 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4046 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4047 clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4048 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4054 struct qedf_ctx *qedf;
4061 qedf = (struct qedf_ctx *)dev;
4064 ether_addr_copy(data->mac[0], qedf->mac);
4111 qedf_dbg_init("qedf");