Lines Matching refs:qedf
23 #include "qedf.h"
72 "by the qedf driver. Default is 0 (use OS default).");
122 void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id)
126 vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT);
127 qedf->vlan_id = vlan_id_tmp;
128 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
130 vlan_id_tmp, qedf->prio);
134 static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf)
137 while (qedf->fipvlan_retries--) {
139 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
140 QEDF_ERR(&qedf->dbg_ctx, "Link not up.\n");
144 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
145 QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n");
149 if (qedf->vlan_id > 0) {
150 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
152 qedf->vlan_id);
153 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
154 fcoe_ctlr_link_up(&qedf->ctlr);
158 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
159 "Retry %d.\n", qedf->fipvlan_retries);
160 init_completion(&qedf->fipvlan_compl);
161 qedf_fcoe_send_vlan_req(qedf);
162 wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ);
170 struct qedf_ctx *qedf =
174 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Entered. link_state=%d.\n",
175 atomic_read(&qedf->link_state));
177 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
178 rc = qedf_initiate_fipvlan_req(qedf);
182 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
183 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
185 qedf->vlan_id = 0;
194 QEDF_WARN(&(qedf->dbg_ctx), "Did not receive FIP VLAN "
197 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
203 eth_zero_addr(qedf->data_src_addr);
204 fcoe_ctlr_link_up(&qedf->ctlr);
205 } else if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
211 atomic_set(&qedf->link_down_tmo_valid, 0);
212 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
214 fcoe_ctlr_link_down(&qedf->ctlr);
215 if (qedf_wait_for_upload(qedf) == false)
216 QEDF_ERR(&qedf->dbg_ctx,
219 qedf->fipvlan_retries = qedf_fipvlan_retries;
226 static void qedf_set_data_src_addr(struct qedf_ctx *qedf, struct fc_frame *fp)
247 ether_addr_copy(qedf->data_src_addr, granted_mac);
249 } else if (qedf->ctlr.sel_fcf->fc_map != 0) {
250 hton24(fc_map, qedf->ctlr.sel_fcf->fc_map);
251 qedf->data_src_addr[0] = fc_map[0];
252 qedf->data_src_addr[1] = fc_map[1];
253 qedf->data_src_addr[2] = fc_map[2];
254 qedf->data_src_addr[3] = fh->fh_d_id[0];
255 qedf->data_src_addr[4] = fh->fh_d_id[1];
256 qedf->data_src_addr[5] = fh->fh_d_id[2];
259 fc_fcoe_set_mac(qedf->data_src_addr, fh->fh_d_id);
263 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
264 "QEDF data_src_mac=%pM method=%d.\n", qedf->data_src_addr, method);
272 struct qedf_ctx *qedf = lport_priv(lport);
274 if (!qedf) {
275 QEDF_ERR(NULL, "qedf is NULL.\n");
284 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
291 qedf->flogi_failed++;
294 qedf_set_data_src_addr(qedf, fp);
295 qedf->flogi_pending = 0;
299 complete(&qedf->flogi_compl);
313 struct qedf_ctx *qedf = lport_priv(lport);
320 qedf->flogi_cnt++;
321 if (qedf->flogi_pending >= QEDF_FLOGI_RETRY_CNT) {
322 schedule_delayed_work(&qedf->stag_work, 2);
325 qedf->flogi_pending++;
333 int qedf_send_flogi(struct qedf_ctx *qedf)
338 lport = qedf->lport;
341 QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n");
347 QEDF_ERR(&(qedf->dbg_ctx), "fc_frame_alloc failed.\n");
351 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
356 init_completion(&qedf->flogi_compl);
368 struct qedf_ctx *qedf =
370 struct fc_lport *lport = qedf->lport;
379 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
386 qedf->ctlr.state = FIP_ST_LINK_WAIT;
387 fcoe_ctlr_link_down(&qedf->ctlr);
393 fcoe_ctlr_link_up(&qedf->ctlr);
396 qedf->fipvlan_retries = qedf_fipvlan_retries;
397 rc = qedf_initiate_fipvlan_req(qedf);
400 qedf_set_vlan_id(qedf, qedf_fallback_vlan);
407 if (qedf->ctlr.sel_fcf) {
408 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
417 QEDF_ERR(&(qedf->dbg_ctx), "Exhausted retries waiting for "
422 rval = qedf_send_flogi(qedf);
427 i = wait_for_completion_timeout(&qedf->flogi_compl,
428 qedf->lport->r_a_tov);
430 QEDF_ERR(&(qedf->dbg_ctx), "FLOGI timed out.\n");
448 static void qedf_update_link_speed(struct qedf_ctx *qedf,
452 struct fc_lport *lport = qedf->lport;
547 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
551 qed_ops->common->get_link(qedf->cdev, &link);
553 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
554 QEDF_ERR(&qedf->dbg_ctx,
560 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP)
561 qedf_update_link_speed(qedf, &link);
563 QEDF_ERR(&qedf->dbg_ctx,
567 QEDF_ERR(&qedf->dbg_ctx, "link_up is not set.\n");
573 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
579 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
580 QEDF_ERR(&qedf->dbg_ctx,
586 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) {
587 QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC,
591 QEDF_ERR(&(qedf->dbg_ctx), "LINK UP (%d GB/s).\n",
595 cancel_delayed_work(&qedf->link_update);
597 atomic_set(&qedf->link_state, QEDF_LINK_UP);
598 qedf_update_link_speed(qedf, link);
600 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE ||
602 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
604 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
605 queue_delayed_work(qedf->link_update_wq,
606 &qedf->link_recovery, 0);
608 queue_delayed_work(qedf->link_update_wq,
609 &qedf->link_update, 0);
610 atomic_set(&qedf->link_down_tmo_valid, 0);
614 QEDF_ERR(&(qedf->dbg_ctx), "LINK DOWN.\n");
616 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
617 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
623 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
625 atomic_set(&qedf->link_down_tmo_valid, 1);
627 qedf->vlan_id = 0;
628 qedf_update_link_speed(qedf, link);
629 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
637 struct qedf_ctx *qedf = (struct qedf_ctx *)dev;
640 QEDF_ERR(&(qedf->dbg_ctx), "DCBx event valid=%d enabled=%d fcoe "
646 if (atomic_read(&qedf->dcbx) == QEDF_DCBX_DONE) {
647 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
652 atomic_set(&qedf->dcbx, QEDF_DCBX_DONE);
663 qedf->prio = qedf_default_prio;
665 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
668 qedf->prio = QEDF_DEFAULT_PRIO;
670 qedf->prio = tmp_prio;
672 if (atomic_read(&qedf->link_state) == QEDF_LINK_UP &&
674 if (atomic_read(&qedf->link_down_tmo_valid) > 0)
675 queue_delayed_work(qedf->link_update_wq,
676 &qedf->link_recovery, 0);
678 queue_delayed_work(qedf->link_update_wq,
679 &qedf->link_update, 0);
680 atomic_set(&qedf->link_down_tmo_valid, 0);
688 struct qedf_ctx *qedf;
690 qedf = (struct qedf_ctx *)cookie;
691 return qedf->flogi_failed;
720 struct qedf_ctx *qedf;
732 qedf = (struct qedf_ctx *)lport_priv(lport);
738 QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
746 QEDF_ERR(&qedf->dbg_ctx,
760 QEDF_ERR(&qedf->dbg_ctx,
769 QEDF_ERR(&qedf->dbg_ctx,
782 QEDF_ERR(&qedf->dbg_ctx,
790 QEDF_ERR(&qedf->dbg_ctx, "ABTS succeeded\n");
793 QEDF_ERR(&qedf->dbg_ctx, "ABTS failed\n");
800 QEDF_ERR(&qedf->dbg_ctx, "link not ready.\n");
804 QEDF_ERR(&qedf->dbg_ctx,
809 if (qedf->stop_io_on_error) {
810 qedf_stop_all_io(qedf);
818 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
824 qedf_scsi_done(qedf, io_req, DID_ERROR);
845 QEDF_ERR(&(qedf->dbg_ctx), "ABTS succeeded, xid=0x%x.\n",
848 QEDF_ERR(&(qedf->dbg_ctx), "ABTS failed, xid=0x%x.\n",
875 bool qedf_wait_for_upload(struct qedf_ctx *qedf)
881 if (atomic_read(&qedf->num_offloads))
882 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
884 atomic_read(&qedf->num_offloads));
891 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
895 QEDF_ERR(&qedf->dbg_ctx,
899 QEDF_ERR(&qedf->dbg_ctx,
911 struct qedf_ctx *qedf;
919 qedf = lport_priv(lport);
921 qedf->flogi_pending = 0;
923 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
924 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
926 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
929 if (qedf_wait_for_upload(qedf) == false) {
930 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
931 WARN_ON(atomic_read(&qedf->num_offloads));
935 qed_ops->common->get_link(qedf->cdev, &if_link);
938 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
943 flush_delayed_work(&qedf->link_update);
946 atomic_set(&qedf->link_state, QEDF_LINK_UP);
947 qedf->vlan_id = 0;
948 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
950 queue_delayed_work(qedf->link_update_wq, &qedf->link_update,
958 struct qedf_ctx *qedf;
961 qedf = lport_priv(lport);
963 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN ||
964 test_bit(QEDF_UNLOADING, &qedf->flags))
967 QEDF_ERR(&(qedf->dbg_ctx), "HOST RESET Issued...");
1014 static struct qedf_rport *qedf_fcport_lookup(struct qedf_ctx *qedf, u32 port_id)
1020 list_for_each_entry_rcu(fcport, &qedf->fcports, peers) {
1056 * qedf_xmit - qedf FCoE frame transmit function
1061 struct qedf_ctx *qedf;
1077 qedf = (struct qedf_ctx *)lport_priv(lport);
1105 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
1117 if (!qedf->ctlr.sel_fcf) {
1122 if (!test_bit(QEDF_LL2_STARTED, &qedf->flags)) {
1123 QEDF_WARN(&(qedf->dbg_ctx), "LL2 not started\n");
1128 if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
1129 QEDF_WARN(&(qedf->dbg_ctx), "qedf link down\n");
1135 if (fcoe_ctlr_els_send(&qedf->ctlr, lport, skb))
1140 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
1197 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), qedf->vlan_id);
1202 if (qedf->ctlr.map_dest)
1206 ether_addr_copy(eh->h_dest, qedf->ctlr.dest_addr);
1209 ether_addr_copy(eh->h_source, qedf->data_src_addr);
1226 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame send: "
1233 rc = qed_ops->ll2->start_xmit(qedf->cdev, skb, 0);
1235 QEDF_ERR(&qedf->dbg_ctx, "start_xmit failed rc = %d.\n", rc);
1243 static int qedf_alloc_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1257 fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1260 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue.\n");
1265 fcport->sq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
1269 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate send queue PBL.\n");
1290 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq,
1296 static void qedf_free_sq(struct qedf_ctx *qedf, struct qedf_rport *fcport)
1299 dma_free_coherent(&qedf->pdev->dev, fcport->sq_pbl_size,
1302 dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size,
1306 static int qedf_offload_connection(struct qedf_ctx *qedf,
1314 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offloading connection "
1316 rval = qed_ops->acquire_conn(qedf->cdev, &fcport->handle,
1319 QEDF_WARN(&(qedf->dbg_ctx), "Could not acquire connection "
1325 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "portid=%06x "
1339 ether_addr_copy(conn_info.src_mac, qedf->data_src_addr);
1341 ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
1344 conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
1349 conn_info.vlan_tag = qedf->vlan_id <<
1352 qedf->prio << FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT;
1357 port_id = fc_host_port_id(qedf->lport->host);
1375 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN,
1385 rval = qed_ops->offload_conn(qedf->cdev, fcport->handle, &conn_info);
1387 QEDF_WARN(&(qedf->dbg_ctx), "Could not offload connection "
1391 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Offload "
1399 qed_ops->release_conn(qedf->cdev, fcport->handle);
1405 static void qedf_upload_connection(struct qedf_ctx *qedf,
1415 term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
1420 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
1423 qed_ops->destroy_conn(qedf->cdev, fcport->handle, term_params_dma);
1424 qed_ops->release_conn(qedf->cdev, fcport->handle);
1426 dma_free_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE, term_params,
1430 static void qedf_cleanup_fcport(struct qedf_ctx *qedf,
1435 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Cleaning up portid=%06x.\n",
1442 qedf_upload_connection(qedf, fcport);
1443 qedf_free_sq(qedf, fcport);
1445 fcport->qedf = NULL;
1451 * initiated target login. qedf can proceed with initiating the session
1458 struct qedf_ctx *qedf = lport_priv(lport);
1466 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "event = %d, "
1472 QEDF_WARN(&(qedf->dbg_ctx), "rport is NULL.\n");
1478 fcport->qedf = qedf;
1480 if (atomic_read(&qedf->num_offloads) >= QEDF_MAX_SESSIONS) {
1481 QEDF_ERR(&(qedf->dbg_ctx), "Not offloading "
1492 QEDF_WARN(&(qedf->dbg_ctx), "Session already "
1505 QEDF_WARN(&(qedf->dbg_ctx), "rport struct does not "
1512 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1517 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1527 rval = qedf_alloc_sq(qedf, fcport);
1529 qedf_cleanup_fcport(qedf, fcport);
1538 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1545 rval = qedf_offload_connection(qedf, fcport);
1547 qedf_cleanup_fcport(qedf, fcport);
1552 spin_lock_irqsave(&qedf->hba_lock, flags);
1553 list_add_rcu(&fcport->peers, &qedf->fcports);
1554 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1561 atomic_inc(&qedf->num_offloads);
1572 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1577 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1583 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
1602 qedf_cleanup_fcport(qedf, fcport);
1607 spin_lock_irqsave(&qedf->hba_lock, flags);
1609 spin_unlock_irqrestore(&qedf->hba_lock, flags);
1613 atomic_dec(&qedf->num_offloads);
1646 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
1648 fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
1650 qedf->ctlr.send = qedf_fip_send;
1651 qedf->ctlr.get_src_addr = qedf_get_src_mac;
1652 ether_addr_copy(qedf->ctlr.ctl_src_addr, qedf->mac);
1655 static void qedf_setup_fdmi(struct qedf_ctx *qedf)
1657 struct fc_lport *lport = qedf->lport;
1674 pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
1678 pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
1692 if (qedf->pdev->device == QL45xxx) {
1701 if (qedf->pdev->device == QL41xxx) {
1711 FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
1726 static int qedf_lport_setup(struct qedf_ctx *qedf)
1728 struct fc_lport *lport = qedf->lport;
1743 fc_set_wwnn(lport, qedf->wwnn);
1744 fc_set_wwpn(lport, qedf->wwpn);
1746 if (fcoe_libfc_config(lport, &qedf->ctlr, &qedf_lport_template, 0)) {
1747 QEDF_ERR(&qedf->dbg_ctx,
1770 if (qedf->pdev->device == QL45xxx)
1774 if (qedf->pdev->device == QL41xxx)
1778 qedf_setup_fdmi(qedf);
1858 /* Set qedf data specific to this vport */
1965 struct qedf_ctx *qedf = lport_priv(vn_port);
1967 if (!qedf) {
1968 QEDF_ERR(NULL, "qedf is NULL.\n");
1973 set_bit(QEDF_UNLOADING, &qedf->flags);
2024 static void qedf_wait_for_vport_destroy(struct qedf_ctx *qedf)
2026 struct fc_host_attrs *fc_host = shost_to_fc_host(qedf->lport->host);
2028 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2031 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_NPIV,
2064 struct qedf_ctx *qedf = lport_priv(lport);
2075 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate memory for "
2080 mutex_lock(&qedf->stats_mutex);
2083 qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
2109 qedf_stats->fcp_input_requests += qedf->input_requests;
2110 qedf_stats->fcp_output_requests += qedf->output_requests;
2111 qedf_stats->fcp_control_requests += qedf->control_requests;
2112 qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
2113 qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
2115 mutex_unlock(&qedf->stats_mutex);
2193 struct qedf_ctx *qedf = fp->qedf;
2200 que = qedf->global_queues[fp->sb_id];
2221 struct qedf_ctx *qedf = fp->qedf;
2238 que = qedf->global_queues[fp->sb_id];
2260 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2262 qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
2271 io_req = &qedf->cmd_mgr->cmds[xid];
2287 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2289 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2300 io_work->qedf = fp->qedf;
2362 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2364 QEDF_WARN(&(qedf->dbg_ctx), "qedf=%p.\n", qedf);
2368 static void qedf_sync_free_irqs(struct qedf_ctx *qedf)
2374 if (qedf->int_info.msix_cnt) {
2375 for (i = 0; i < qedf->int_info.used_cnt; i++) {
2376 vector_idx = i * qedf->dev_info.common.num_hwfns +
2377 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2378 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2381 vector = qedf->int_info.msix[vector_idx].vector;
2385 free_irq(vector, &qedf->fp_array[i]);
2388 qed_ops->common->simd_handler_clean(qedf->cdev,
2391 qedf->int_info.used_cnt = 0;
2392 qed_ops->common->set_fp_int(qedf->cdev, 0);
2395 static int qedf_request_msix_irq(struct qedf_ctx *qedf)
2402 for (i = 0; i < qedf->num_queues; i++) {
2403 vector_idx = i * qedf->dev_info.common.num_hwfns +
2404 qed_ops->common->get_affin_hwfn_idx(qedf->cdev);
2405 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
2408 vector = qedf->int_info.msix[vector_idx].vector;
2409 rc = request_irq(vector, qedf_msix_handler, 0, "qedf",
2410 &qedf->fp_array[i]);
2413 QEDF_WARN(&(qedf->dbg_ctx), "request_irq failed.\n");
2414 qedf_sync_free_irqs(qedf);
2418 qedf->int_info.used_cnt++;
2426 static int qedf_setup_int(struct qedf_ctx *qedf)
2433 rc = qed_ops->common->set_fp_int(qedf->cdev, num_online_cpus());
2437 rc = qed_ops->common->get_fp_int(qedf->cdev, &qedf->int_info);
2441 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of msix_cnt = "
2442 "0x%x num of cpus = 0x%x\n", qedf->int_info.msix_cnt,
2445 if (qedf->int_info.msix_cnt)
2446 return qedf_request_msix_irq(qedf);
2448 qed_ops->common->simd_handler_config(qedf->cdev, &qedf,
2450 qedf->int_info.used_cnt = 1;
2452 QEDF_ERR(&qedf->dbg_ctx,
2458 static void qedf_recv_frame(struct qedf_ctx *qedf,
2473 lport = qedf->lport;
2540 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2546 if (qedf->ctlr.state) {
2547 if (!ether_addr_equal(mac, qedf->ctlr.dest_addr)) {
2548 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2550 mac, qedf->ctlr.dest_addr);
2564 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2575 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2588 fcport = qedf_fcport_lookup(qedf, ntoh24(fh->fh_d_id));
2592 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
2598 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, "FCoE frame receive: "
2612 struct qedf_ctx *qedf = skb_work->qedf;
2616 if (!qedf) {
2617 QEDF_ERR(NULL, "qedf is NULL\n");
2636 qedf_fip_recv(qedf, skb);
2640 qedf_recv_frame(qedf, skb);
2655 struct qedf_ctx *qedf = (struct qedf_ctx *)cookie;
2658 if (atomic_read(&qedf->link_state) == QEDF_LINK_DOWN) {
2659 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2,
2667 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate skb_work so "
2675 skb_work->qedf = qedf;
2676 queue_work(qedf->ll2_recv_wq, &skb_work->work);
2702 fc_exch_recv(io_work->qedf->lport, io_work->fp);
2704 qedf_process_cqe(io_work->qedf, &io_work->cqe);
2709 static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
2716 sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
2720 QEDF_ERR(&qedf->dbg_ctx,
2726 ret = qed_ops->common->sb_init(qedf->cdev, sb_info, sb_virt, sb_phys,
2730 QEDF_ERR(&qedf->dbg_ctx,
2739 static void qedf_free_sb(struct qedf_ctx *qedf, struct qed_sb_info *sb_info)
2742 dma_free_coherent(&qedf->pdev->dev, sizeof(*sb_info->sb_virt),
2746 static void qedf_destroy_sb(struct qedf_ctx *qedf)
2751 for (id = 0; id < qedf->num_queues; id++) {
2752 fp = &(qedf->fp_array[id]);
2755 qedf_free_sb(qedf, fp->sb_info);
2758 kfree(qedf->fp_array);
2761 static int qedf_prepare_sb(struct qedf_ctx *qedf)
2767 qedf->fp_array =
2768 kcalloc(qedf->num_queues, sizeof(struct qedf_fastpath),
2771 if (!qedf->fp_array) {
2772 QEDF_ERR(&(qedf->dbg_ctx), "fastpath array allocation "
2777 for (id = 0; id < qedf->num_queues; id++) {
2778 fp = &(qedf->fp_array[id]);
2782 QEDF_ERR(&(qedf->dbg_ctx), "SB info struct "
2786 ret = qedf_alloc_and_init_sb(qedf, fp->sb_info, id);
2788 QEDF_ERR(&(qedf->dbg_ctx), "SB allocation and "
2793 fp->qedf = qedf;
2795 qedf->global_queues[id]->cq_mem_size /
2802 void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
2815 io_req = &qedf->cmd_mgr->cmds[xid];
2819 QEDF_ERR(&qedf->dbg_ctx,
2827 QEDF_ERR(&qedf->dbg_ctx,
2838 QEDF_ERR(&qedf->dbg_ctx,
2852 qedf_scsi_completion(qedf, cqe, io_req);
2855 qedf_process_els_compl(qedf, cqe, io_req);
2858 qedf_process_tmf_compl(qedf, cqe, io_req);
2861 qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
2867 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2869 qedf_process_error_detect(qedf, cqe, io_req);
2873 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2875 qedf_process_cleanup_compl(qedf, cqe, io_req);
2879 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2881 qedf_process_abts_compl(qedf, cqe, io_req);
2885 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2890 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2895 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2897 qedf_process_warning_compl(qedf, cqe, io_req);
2901 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2905 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
2911 static void qedf_free_bdq(struct qedf_ctx *qedf)
2915 if (qedf->bdq_pbl_list)
2916 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2917 qedf->bdq_pbl_list, qedf->bdq_pbl_list_dma);
2919 if (qedf->bdq_pbl)
2920 dma_free_coherent(&qedf->pdev->dev, qedf->bdq_pbl_mem_size,
2921 qedf->bdq_pbl, qedf->bdq_pbl_dma);
2924 if (qedf->bdq[i].buf_addr) {
2925 dma_free_coherent(&qedf->pdev->dev, QEDF_BDQ_BUF_SIZE,
2926 qedf->bdq[i].buf_addr, qedf->bdq[i].buf_dma);
2931 static void qedf_free_global_queues(struct qedf_ctx *qedf)
2934 struct global_queue **gl = qedf->global_queues;
2936 for (i = 0; i < qedf->num_queues; i++) {
2941 dma_free_coherent(&qedf->pdev->dev,
2944 dma_free_coherent(&qedf->pdev->dev, gl[i]->cq_pbl_size,
2950 qedf_free_bdq(qedf);
2953 static int qedf_alloc_bdq(struct qedf_ctx *qedf)
2961 qedf->bdq[i].buf_addr = dma_alloc_coherent(&qedf->pdev->dev,
2962 QEDF_BDQ_BUF_SIZE, &qedf->bdq[i].buf_dma, GFP_KERNEL);
2963 if (!qedf->bdq[i].buf_addr) {
2964 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ "
2971 qedf->bdq_pbl_mem_size =
2973 qedf->bdq_pbl_mem_size =
2974 ALIGN(qedf->bdq_pbl_mem_size, QEDF_PAGE_SIZE);
2976 qedf->bdq_pbl = dma_alloc_coherent(&qedf->pdev->dev,
2977 qedf->bdq_pbl_mem_size, &qedf->bdq_pbl_dma, GFP_KERNEL);
2978 if (!qedf->bdq_pbl) {
2979 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate BDQ PBL.\n");
2983 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
2985 qedf->bdq_pbl, &qedf->bdq_pbl_dma);
2991 pbl = (struct scsi_bd *)qedf->bdq_pbl;
2993 pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
2994 pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
3002 qedf->bdq_pbl_list = dma_alloc_coherent(&qedf->pdev->dev,
3004 &qedf->bdq_pbl_list_dma,
3006 if (!qedf->bdq_pbl_list) {
3007 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate list of PBL pages.\n");
3015 qedf->bdq_pbl_list_num_entries = qedf->bdq_pbl_mem_size /
3017 list = (u64 *)qedf->bdq_pbl_list;
3018 for (i = 0; i < qedf->bdq_pbl_list_num_entries; i++) {
3019 *list = qedf->bdq_pbl_dma;
3026 static int qedf_alloc_global_queues(struct qedf_ctx *qedf)
3040 if (!qedf->num_queues) {
3041 QEDF_ERR(&(qedf->dbg_ctx), "No MSI-X vectors available!\n");
3049 if (!qedf->p_cpuq) {
3050 QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n");
3054 qedf->global_queues = kzalloc((sizeof(struct global_queue *)
3055 * qedf->num_queues), GFP_KERNEL);
3056 if (!qedf->global_queues) {
3057 QEDF_ERR(&(qedf->dbg_ctx), "Unable to allocate global "
3061 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3062 "qedf->global_queues=%p.\n", qedf->global_queues);
3065 status = qedf_alloc_bdq(qedf);
3067 QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n");
3072 for (i = 0; i < qedf->num_queues; i++) {
3073 qedf->global_queues[i] = kzalloc(sizeof(struct global_queue),
3075 if (!qedf->global_queues[i]) {
3076 QEDF_WARN(&(qedf->dbg_ctx), "Unable to allocate "
3082 qedf->global_queues[i]->cq_mem_size =
3084 qedf->global_queues[i]->cq_mem_size =
3085 ALIGN(qedf->global_queues[i]->cq_mem_size, QEDF_PAGE_SIZE);
3087 qedf->global_queues[i]->cq_pbl_size =
3088 (qedf->global_queues[i]->cq_mem_size /
3090 qedf->global_queues[i]->cq_pbl_size =
3091 ALIGN(qedf->global_queues[i]->cq_pbl_size, QEDF_PAGE_SIZE);
3093 qedf->global_queues[i]->cq =
3094 dma_alloc_coherent(&qedf->pdev->dev,
3095 qedf->global_queues[i]->cq_mem_size,
3096 &qedf->global_queues[i]->cq_dma,
3099 if (!qedf->global_queues[i]->cq) {
3100 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq.\n");
3105 qedf->global_queues[i]->cq_pbl =
3106 dma_alloc_coherent(&qedf->pdev->dev,
3107 qedf->global_queues[i]->cq_pbl_size,
3108 &qedf->global_queues[i]->cq_pbl_dma,
3111 if (!qedf->global_queues[i]->cq_pbl) {
3112 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate cq PBL.\n");
3118 num_pages = qedf->global_queues[i]->cq_mem_size /
3120 page = qedf->global_queues[i]->cq_dma;
3121 pbl = (u32 *)qedf->global_queues[i]->cq_pbl;
3131 qedf->global_queues[i]->cq_cons_idx = 0;
3134 list = (u32 *)qedf->p_cpuq;
3142 for (i = 0; i < qedf->num_queues; i++) {
3143 *list = U64_LO(qedf->global_queues[i]->cq_pbl_dma);
3145 *list = U64_HI(qedf->global_queues[i]->cq_pbl_dma);
3156 qedf_free_global_queues(qedf);
3160 static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
3175 qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);
3177 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
3178 qedf->num_queues);
3180 qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
3181 qedf->num_queues * sizeof(struct qedf_glbl_q_params),
3182 &qedf->hw_p_cpuq, GFP_KERNEL);
3184 if (!qedf->p_cpuq) {
3185 QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
3189 rval = qedf_alloc_global_queues(qedf);
3191 QEDF_ERR(&(qedf->dbg_ctx), "Global queue allocation "
3206 memset(&(qedf->pf_params), 0, sizeof(qedf->pf_params));
3209 qedf->pf_params.fcoe_pf_params.num_cons = QEDF_MAX_SESSIONS;
3210 qedf->pf_params.fcoe_pf_params.num_tasks = FCOE_PARAMS_NUM_TASKS;
3211 qedf->pf_params.fcoe_pf_params.glbl_q_params_addr =
3212 (u64)qedf->hw_p_cpuq;
3213 qedf->pf_params.fcoe_pf_params.sq_num_pbl_pages = sq_num_pbl_pages;
3215 qedf->pf_params.fcoe_pf_params.rq_buffer_log_size = 0;
3217 qedf->pf_params.fcoe_pf_params.cq_num_entries = cq_num_entries;
3218 qedf->pf_params.fcoe_pf_params.num_cqs = qedf->num_queues;
3221 qedf->pf_params.fcoe_pf_params.log_page_size = ilog2(QEDF_PAGE_SIZE);
3223 qedf->pf_params.fcoe_pf_params.mtu = 9000;
3224 qedf->pf_params.fcoe_pf_params.gl_rq_pi = QEDF_FCOE_PARAMS_GL_RQ_PI;
3225 qedf->pf_params.fcoe_pf_params.gl_cmd_pi = QEDF_FCOE_PARAMS_GL_CMD_PI;
3228 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0] =
3229 qedf->bdq_pbl_list_dma;
3230 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0] =
3231 qedf->bdq_pbl_list_num_entries;
3232 qedf->pf_params.fcoe_pf_params.rq_buffer_size = QEDF_BDQ_BUF_SIZE;
3234 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3236 qedf->bdq_pbl_list,
3237 qedf->pf_params.fcoe_pf_params.bdq_pbl_base_addr[0],
3238 qedf->pf_params.fcoe_pf_params.bdq_pbl_num_entries[0]);
3240 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3242 qedf->pf_params.fcoe_pf_params.cq_num_entries);
3248 static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
3252 if (qedf->p_cpuq) {
3253 size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
3254 dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
3255 qedf->hw_p_cpuq);
3258 qedf_free_global_queues(qedf);
3260 kfree(qedf->global_queues);
3287 struct qedf_ctx *qedf = NULL;
3320 qedf = lport_priv(lport);
3321 set_bit(QEDF_PROBING, &qedf->flags);
3322 qedf->lport = lport;
3323 qedf->ctlr.lp = lport;
3324 qedf->pdev = pdev;
3325 qedf->dbg_ctx.pdev = pdev;
3326 qedf->dbg_ctx.host_no = lport->host->host_no;
3327 spin_lock_init(&qedf->hba_lock);
3328 INIT_LIST_HEAD(&qedf->fcports);
3329 qedf->curr_conn_id = QEDF_MAX_SESSIONS - 1;
3330 atomic_set(&qedf->num_offloads, 0);
3331 qedf->stop_io_on_error = false;
3332 pci_set_drvdata(pdev, qedf);
3333 init_completion(&qedf->fipvlan_compl);
3334 mutex_init(&qedf->stats_mutex);
3335 mutex_init(&qedf->flush_mutex);
3336 qedf->flogi_pending = 0;
3338 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
3339 "QLogic FastLinQ FCoE Module qedf %s, "
3345 qedf = pci_get_drvdata(pdev);
3346 set_bit(QEDF_PROBING, &qedf->flags);
3347 lport = qedf->lport;
3350 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe started.\n");
3355 qedf->io_mempool = mempool_create_slab_pool(QEDF_IO_WORK_MIN,
3357 if (qedf->io_mempool == NULL) {
3358 QEDF_ERR(&(qedf->dbg_ctx), "qedf->io_mempool is NULL.\n");
3361 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO, "qedf->io_mempool=%p.\n",
3362 qedf->io_mempool);
3365 qedf->lport->host->host_no);
3366 qedf->link_update_wq = create_workqueue(host_buf);
3367 INIT_DELAYED_WORK(&qedf->link_update, qedf_handle_link_update);
3368 INIT_DELAYED_WORK(&qedf->link_recovery, qedf_link_recovery);
3369 INIT_DELAYED_WORK(&qedf->grcdump_work, qedf_wq_grcdump);
3370 INIT_DELAYED_WORK(&qedf->stag_work, qedf_stag_change_work);
3371 qedf->fipvlan_retries = qedf_fipvlan_retries;
3378 qedf->prio = qedf_default_prio;
3380 qedf->prio = QEDF_DEFAULT_PRIO;
3391 qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
3392 if (!qedf->cdev) {
3394 QEDF_ERR(&qedf->dbg_ctx,
3399 QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
3404 /* Learn information crucial for qedf to progress */
3405 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3407 QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");
3411 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC,
3413 qedf->dev_info.common.num_hwfns,
3414 qed_ops->common->get_affin_hwfn_idx(qedf->cdev));
3424 rc = qedf_set_fcoe_pf_param(qedf);
3426 QEDF_ERR(&(qedf->dbg_ctx), "Cannot set fcoe pf param.\n");
3429 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3431 /* Learn information crucial for qedf to progress */
3432 rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);
3434 QEDF_ERR(&qedf->dbg_ctx, "Failed to fill dev info.\n");
3439 qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
3440 if (IS_ERR(qedf->devlink)) {
3441 QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
3442 rc = PTR_ERR(qedf->devlink);
3443 qedf->devlink = NULL;
3449 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
3450 qedf->bdq_secondary_prod = qedf->dev_info.secondary_bdq_rq_addr;
3451 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3452 "BDQ primary_prod=%p secondary_prod=%p.\n", qedf->bdq_primary_prod,
3453 qedf->bdq_secondary_prod);
3455 qed_ops->register_ops(qedf->cdev, &qedf_cb_ops, qedf);
3457 rc = qedf_prepare_sb(qedf);
3460 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3470 strncpy(slowpath_params.name, "qedf", QED_DRV_VER_STR_SIZE);
3471 rc = qed_ops->common->slowpath_start(qedf->cdev, &slowpath_params);
3473 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start slowpath.\n");
3481 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);
3484 rc = qedf_setup_int(qedf);
3486 QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n");
3490 rc = qed_ops->start(qedf->cdev, &qedf->tasks);
3492 QEDF_ERR(&(qedf->dbg_ctx), "Cannot start FCoE function.\n");
3495 task_start = qedf_get_task_mem(&qedf->tasks, 0);
3496 task_end = qedf_get_task_mem(&qedf->tasks, MAX_TID_BLOCKS_FCOE - 1);
3497 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Task context start=%p, "
3499 qedf->tasks.size);
3506 qedf->bdq_prod_idx = QEDF_BDQ_SIZE;
3507 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3509 qedf->bdq_prod_idx);
3510 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
3511 readw(qedf->bdq_primary_prod);
3512 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
3513 readw(qedf->bdq_secondary_prod);
3515 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3520 ether_addr_copy(qedf->mac, qedf->dev_info.common.hw_mac);
3521 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "MAC address is %pM.\n",
3522 qedf->mac);
3531 if (qedf->dev_info.wwnn != 0 && qedf->dev_info.wwpn != 0) {
3532 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3534 qedf->wwnn = qedf->dev_info.wwnn;
3535 qedf->wwpn = qedf->dev_info.wwpn;
3537 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3539 qedf->wwnn = fcoe_wwn_from_mac(qedf->mac, 1, 0);
3540 qedf->wwpn = fcoe_wwn_from_mac(qedf->mac, 2, 0);
3542 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "WWNN=%016llx "
3543 "WWPN=%016llx.\n", qedf->wwnn, qedf->wwpn);
3546 qed_ops->common->set_name(qedf->cdev, host_buf);
3549 qedf->cmd_mgr = qedf_cmd_mgr_alloc(qedf);
3550 if (!qedf->cmd_mgr) {
3551 QEDF_ERR(&(qedf->dbg_ctx), "Failed to allocate cmd mgr.\n");
3564 QEDF_WARN(&qedf->dbg_ctx,
3572 ether_addr_copy(params.ll2_mac_address, qedf->mac);
3576 qedf->ll2_recv_wq =
3578 if (!qedf->ll2_recv_wq) {
3579 QEDF_ERR(&(qedf->dbg_ctx), "Failed to LL2 workqueue.\n");
3585 qedf_dbg_host_init(&(qedf->dbg_ctx), qedf_debugfs_ops,
3590 qed_ops->ll2->register_cb_ops(qedf->cdev, &qedf_ll2_cb_ops, qedf);
3591 rc = qed_ops->ll2->start(qedf->cdev, ¶ms);
3593 QEDF_ERR(&(qedf->dbg_ctx), "Could not start Light L2.\n");
3596 set_bit(QEDF_LL2_STARTED, &qedf->flags);
3599 qedf->vlan_id = 0;
3607 qedf_fcoe_ctlr_setup(qedf);
3610 rc = qedf_lport_setup(qedf);
3612 QEDF_ERR(&(qedf->dbg_ctx),
3618 sprintf(host_buf, "qedf_%u_timer", qedf->lport->host->host_no);
3619 qedf->timer_work_queue =
3621 if (!qedf->timer_work_queue) {
3622 QEDF_ERR(&(qedf->dbg_ctx), "Failed to start timer "
3631 qedf->lport->host->host_no);
3632 qedf->dpc_wq = create_workqueue(host_buf);
3634 INIT_DELAYED_WORK(&qedf->recovery_work, qedf_recovery_handler);
3641 qedf->grcdump_size =
3642 qed_ops->common->dbg_all_data_size(qedf->cdev);
3643 if (qedf->grcdump_size) {
3644 rc = qedf_alloc_grc_dump_buf(&qedf->grcdump,
3645 qedf->grcdump_size);
3647 QEDF_ERR(&(qedf->dbg_ctx),
3649 qedf->grcdump = NULL;
3652 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
3654 qedf->grcdump, qedf->grcdump_size);
3656 qedf_create_sysfs_ctx_attr(qedf);
3659 spin_lock_init(&qedf->io_trace_lock);
3660 qedf->io_trace_idx = 0;
3663 init_completion(&qedf->flogi_compl);
3665 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3667 QEDF_ERR(&(qedf->dbg_ctx),
3672 status = qed_ops->common->set_link(qedf->cdev, &link_params);
3674 QEDF_WARN(&(qedf->dbg_ctx), "set_link failed.\n");
3678 fcoe_ctlr_link_up(&qedf->ctlr);
3682 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, "Probe done.\n");
3684 clear_bit(QEDF_PROBING, &qedf->flags);
3690 if (qedf->ll2_recv_wq)
3691 destroy_workqueue(qedf->ll2_recv_wq);
3692 fc_remove_host(qedf->lport->host);
3693 scsi_remove_host(qedf->lport->host);
3695 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3698 qedf_cmd_mgr_free(qedf->cmd_mgr);
3700 qed_ops->stop(qedf->cdev);
3702 qedf_free_fcoe_pf_param(qedf);
3703 qedf_sync_free_irqs(qedf);
3705 qed_ops->common->slowpath_stop(qedf->cdev);
3707 qed_ops->common->remove(qedf->cdev);
3721 struct qedf_ctx *qedf;
3729 qedf = pci_get_drvdata(pdev);
3735 if (test_bit(QEDF_UNLOADING, &qedf->flags)) {
3736 QEDF_ERR(&qedf->dbg_ctx, "Already removing PCI function.\n");
3741 set_bit(QEDF_UNLOADING, &qedf->flags);
3745 fcoe_ctlr_link_down(&qedf->ctlr);
3747 fc_fabric_logoff(qedf->lport);
3749 if (!qedf_wait_for_upload(qedf))
3750 QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
3753 qedf_dbg_host_exit(&(qedf->dbg_ctx));
3757 cancel_delayed_work_sync(&qedf->link_update);
3758 destroy_workqueue(qedf->link_update_wq);
3759 qedf->link_update_wq = NULL;
3761 if (qedf->timer_work_queue)
3762 destroy_workqueue(qedf->timer_work_queue);
3765 clear_bit(QEDF_LL2_STARTED, &qedf->flags);
3766 qed_ops->ll2->stop(qedf->cdev);
3767 if (qedf->ll2_recv_wq)
3768 destroy_workqueue(qedf->ll2_recv_wq);
3771 qedf_sync_free_irqs(qedf);
3772 qedf_destroy_sb(qedf);
3779 qedf_free_grc_dump_buf(&qedf->grcdump);
3780 qedf_remove_sysfs_ctx_attr(qedf);
3783 fcoe_ctlr_destroy(&qedf->ctlr);
3784 fc_lport_destroy(qedf->lport);
3785 fc_remove_host(qedf->lport->host);
3786 scsi_remove_host(qedf->lport->host);
3789 qedf_cmd_mgr_free(qedf->cmd_mgr);
3792 fc_exch_mgr_free(qedf->lport);
3793 fc_lport_free_stats(qedf->lport);
3796 qedf_wait_for_vport_destroy(qedf);
3803 qed_ops->stop(qedf->cdev);
3806 if (qedf->dpc_wq) {
3808 destroy_workqueue(qedf->dpc_wq);
3809 qedf->dpc_wq = NULL;
3814 qedf_free_fcoe_pf_param(qedf);
3816 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3820 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3822 QEDF_ERR(&(qedf->dbg_ctx),
3825 if (mode != QEDF_MODE_RECOVERY && qedf->devlink) {
3826 qed_ops->common->devlink_unregister(qedf->devlink);
3827 qedf->devlink = NULL;
3830 qed_ops->common->slowpath_stop(qedf->cdev);
3831 qed_ops->common->remove(qedf->cdev);
3833 mempool_destroy(qedf->io_mempool);
3837 scsi_host_put(qedf->lport->host);
3851 struct qedf_ctx *qedf =
3854 QEDF_ERR(&(qedf->dbg_ctx), "Collecting GRC dump.\n");
3855 qedf_capture_grc_dump(qedf);
3860 struct qedf_ctx *qedf = dev;
3862 QEDF_ERR(&(qedf->dbg_ctx),
3866 if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
3867 QEDF_ERR(&(qedf->dbg_ctx),
3874 schedule_delayed_work(&qedf->board_disable_work, 0);
3881 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3885 qed_ops->common->attn_clr_enable(qedf->cdev, true);
3887 if (qedf_enable_recovery && qedf->devlink)
3888 qed_ops->common->report_fatal_error(qedf->devlink,
3902 struct qedf_ctx *qedf = dev;
3909 if (!qedf) {
3910 QEDF_ERR(NULL, "qedf is null.\n");
3914 if (test_bit(QEDF_PROBING, &qedf->flags)) {
3915 QEDF_ERR(&qedf->dbg_ctx, "Function is still probing.\n");
3919 lport = qedf->lport;
3942 if (qedf->ctlr.sel_fcf) {
3944 u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
3948 /* For qedf we're either link down or fabric attach */
3981 fcoe->tx_lun_rst = qedf->lun_resets;
3984 fcoe->abort_task_sets = qedf->packet_aborts;
3987 fcoe->scsi_busy = qedf->busy;
3990 fcoe->scsi_tsk_full = qedf->task_set_fulls;
3996 struct qedf_ctx *qedf =
4000 dev_name(&qedf->pdev->dev), __func__, __LINE__,
4001 qedf->dbg_ctx.host_no);
4002 qedf_ctx_soft_reset(qedf->lport);
4012 struct qedf_ctx *qedf;
4019 qedf = pci_get_drvdata(pdev);
4021 QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__);
4031 struct qedf_ctx *qedf = dev;
4033 QEDF_ERR(&qedf->dbg_ctx, "Recovery handler scheduled.\n");
4034 schedule_delayed_work(&qedf->recovery_work, 0);
4039 struct qedf_ctx *qedf =
4042 if (test_and_set_bit(QEDF_IN_RECOVERY, &qedf->flags))
4049 qed_ops->common->recovery_prolog(qedf->cdev);
4051 QEDF_ERR(&qedf->dbg_ctx, "Recovery work start.\n");
4052 __qedf_remove(qedf->pdev, QEDF_MODE_RECOVERY);
4058 atomic_set(&qedf->link_state, QEDF_LINK_DOWN);
4059 atomic_set(&qedf->dcbx, QEDF_DCBX_PENDING);
4060 __qedf_probe(qedf->pdev, QEDF_MODE_RECOVERY);
4061 clear_bit(QEDF_IN_RECOVERY, &qedf->flags);
4062 QEDF_ERR(&qedf->dbg_ctx, "Recovery work complete.\n");
4068 struct qedf_ctx *qedf;
4075 qedf = (struct qedf_ctx *)dev;
4078 ether_addr_copy(data->mac[0], qedf->mac);
4125 qedf_dbg_init("qedf");