Lines Matching refs:qedf
8 #include "qedf.h"
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
23 struct qedf_ctx *qedf;
37 qedf = fcport->qedf;
41 if (qedf == NULL) {
43 "qedf is NULL for ABTS xid=0x%x.\n",
48 QEDF_ERR((&qedf->dbg_ctx), "ABTS timeout, xid=0x%x.\n",
71 if (!qedf) {
73 "qedf is NULL for ELS xid=0x%x.\n",
86 QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
98 QEDF_ERR(&(qedf->dbg_ctx), "Sequence cleanup timeout, "
102 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
105 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
114 struct qedf_ctx *qedf = cmgr->qedf;
126 QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n");
134 dma_free_coherent(&qedf->pdev->dev, bd_tbl_sz,
157 dma_free_coherent(&qedf->pdev->dev,
177 struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf)
189 if (!qedf->num_queues) {
190 QEDF_ERR(&(qedf->dbg_ctx), "num_queues is not set.\n");
195 QEDF_WARN(&(qedf->dbg_ctx), "Invalid min_xid 0x%x and "
200 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "min xid 0x%x, max xid "
207 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc cmd mgr.\n");
211 cmgr->qedf = qedf;
228 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
232 QEDF_ERR(&qedf->dbg_ctx,
241 QEDF_ERR(&(qedf->dbg_ctx),
254 QEDF_ERR(&(qedf->dbg_ctx),
266 QEDF_WARN(&(qedf->dbg_ctx), "Failed to alloc io_bdt_pool.\n");
274 QEDF_WARN(&(qedf->dbg_ctx),
282 bdt_info->bd_tbl = dma_alloc_coherent(&qedf->pdev->dev,
286 QEDF_WARN(&(qedf->dbg_ctx),
292 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
305 struct qedf_ctx *qedf = fcport->qedf;
306 struct qedf_cmd_mgr *cmd_mgr = qedf->cmd_mgr;
317 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
326 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
334 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
358 QEDF_ERR(&qedf->dbg_ctx,
387 QEDF_ERR(&(qedf->dbg_ctx), "bd_tbl is NULL, xid=%x.\n", xid);
404 qedf->alloc_failures++;
411 struct qedf_ctx *qedf = io_req->fcport->qedf;
416 dma_free_coherent(&qedf->pdev->dev, sz,
421 dma_free_coherent(&qedf->pdev->dev, sz,
426 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
431 dma_free_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
446 QEDF_WARN(&fcport->qedf->dbg_ctx,
460 QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n");
481 struct qedf_ctx *qedf = lport_priv(lport);
491 sg_count = dma_map_sg(&qedf->pdev->dev, scsi_sglist(sc),
526 QEDF_ERR(&(qedf->dbg_ctx), "byte_count = %d != "
601 struct qedf_ctx *qedf = fcport->qedf;
602 uint16_t cq_idx = smp_processor_id() % qedf->num_queues;
676 qedf->slow_sge_ios++;
678 qedf->fast_sge_ios++;
686 struct qedf_ctx *qedf = io_req->fcport->qedf;
692 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
696 qedf->control_requests++;
808 struct qedf_ctx *qedf = fcport->qedf;
814 spin_lock_irqsave(&qedf->io_trace_lock, flags);
816 io_log = &qedf->io_trace_buf[qedf->io_trace_idx];
845 qedf->io_trace_idx++;
846 if (qedf->io_trace_idx == QEDF_IO_TRACE_SIZE)
847 qedf->io_trace_idx = 0;
849 spin_unlock_irqrestore(&qedf->io_trace_lock, flags);
857 struct qedf_ctx *qedf = lport_priv(lport);
873 qedf->input_requests++;
876 qedf->output_requests++;
879 qedf->control_requests++;
886 QEDF_ERR(&(qedf->dbg_ctx), "BD list creation failed.\n");
895 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
911 task_ctx = qedf_get_task_mem(&qedf->tasks, xid);
913 QEDF_WARN(&(qedf->dbg_ctx), "task_ctx is NULL, xid=%d.\n",
939 struct qedf_ctx *qedf = lport_priv(lport);
951 QEDF_ERR(&qedf->dbg_ctx,
959 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
960 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
961 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
963 qedf->flags);
969 if (!qedf->pdev->msix_enabled) {
970 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
980 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
989 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
990 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n");
996 atomic_read(&qedf->link_state) != QEDF_LINK_UP) {
997 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n");
1044 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1060 struct qedf_ctx *qedf = io_req->fcport->qedf;
1092 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1099 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1113 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1118 dma_unmap_sg(&qedf->pdev->dev, scsi_sglist(sc),
1124 void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1144 QEDF_ERR(&qedf->dbg_ctx,
1154 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1159 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1165 QEDF_ERR(&qedf->dbg_ctx,
1171 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd->request is NULL, "
1177 QEDF_WARN(&(qedf->dbg_ctx), "request->q is NULL so request "
1191 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1199 qedf_unmap_sg_list(qedf, io_req);
1203 QEDF_ERR(&(qedf->dbg_ctx),
1214 QEDF_ERR(&qedf->dbg_ctx,
1241 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1245 qedf->lport->host->host_no, sc_cmd->device->id,
1271 qedf->task_set_fulls++;
1273 qedf->busy++;
1285 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1290 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1303 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1304 "combination of scope = %d and qualifier = %d is not handled in qedf.\n",
1310 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "fcp_status=%d.\n",
1332 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1339 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1344 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1359 QEDF_WARN(&(qedf->dbg_ctx), "sc_cmd is NULL!\n");
1364 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd=%p is not valid.", sc_cmd);
1369 QEDF_WARN(&(qedf->dbg_ctx), "SCp.ptr is NULL, returned in "
1375 QEDF_ERR(&qedf->dbg_ctx, "Device for sc_cmd %p is NULL.\n",
1381 QEDF_ERR(&qedf->dbg_ctx,
1387 QEDF_ERR(&qedf->dbg_ctx,
1394 QEDF_ERR(&qedf->dbg_ctx,
1401 QEDF_ERR(&qedf->dbg_ctx,
1407 qedf_unmap_sg_list(qedf, io_req);
1411 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
1414 qedf->lport->host->host_no, sc_cmd->device->id,
1448 void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1457 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1463 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1465 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1469 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1490 QEDF_ERR(&(qedf->dbg_ctx), "REC timer expired.\n");
1514 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1518 void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1534 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1539 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1541 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1545 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1555 QEDF_ERR(&qedf->dbg_ctx,
1561 if (qedf->stop_io_on_error) {
1562 qedf_stop_all_io(qedf);
1569 QEDF_ERR(&(qedf->dbg_ctx), "Failed to queue ABTS.\n");
1572 static void qedf_flush_els_req(struct qedf_ctx *qedf,
1575 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1606 struct qedf_ctx *qedf;
1625 qedf = fcport->qedf;
1627 if (!qedf) {
1628 QEDF_ERR(NULL, "qedf is NULL.\n");
1636 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1649 cmd_mgr = qedf->cmd_mgr;
1651 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1655 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Locking flush mutex.\n");
1657 mutex_lock(&qedf->flush_mutex);
1678 QEDF_ERR(&qedf->dbg_ctx,
1698 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1708 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1724 QEDF_ERR(&(qedf->dbg_ctx),
1731 qedf_flush_els_req(qedf, io_req);
1744 QEDF_ERR(&(qedf->dbg_ctx),
1752 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1756 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1763 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1782 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1802 QEDF_ERR(&(qedf->dbg_ctx), "Could not get kref for "
1807 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
1819 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1826 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
1832 QEDF_ERR(&qedf->dbg_ctx,
1844 QEDF_ERR(&qedf->dbg_ctx,
1863 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Unlocking flush mutex.\n");
1864 mutex_unlock(&qedf->flush_mutex);
1876 struct qedf_ctx *qedf;
1891 qedf = fcport->qedf;
1895 QEDF_ERR(&qedf->dbg_ctx, "stale rport\n");
1900 lport = qedf->lport;
1903 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
1908 if (atomic_read(&qedf->link_down_tmo_valid) > 0) {
1909 QEDF_ERR(&(qedf->dbg_ctx), "link_down_tmo active.\n");
1916 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
1922 QEDF_ERR(&qedf->dbg_ctx, "fcport is uploading.\n");
1931 QEDF_ERR(&qedf->dbg_ctx,
1946 qedf->control_requests++;
1947 qedf->packet_aborts++;
1953 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
1957 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1977 void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
1984 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "Entered with xid = "
1994 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2006 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2013 QEDF_ERR(&qedf->dbg_ctx,
2019 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2024 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2033 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2034 msecs_to_jiffies(qedf->lport->r_a_tov));
2039 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM,
2044 QEDF_ERR(&(qedf->dbg_ctx), "Unknown ABTS response\n");
2052 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2056 qedf_scsi_done(qedf, io_req, DID_ERROR);
2070 struct qedf_ctx *qedf = io_req->fcport->qedf;
2074 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_MP_REQ, "Entered.\n");
2085 mp_req->req_buf = dma_alloc_coherent(&qedf->pdev->dev, QEDF_PAGE_SIZE,
2088 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req buffer\n");
2093 mp_req->resp_buf = dma_alloc_coherent(&qedf->pdev->dev,
2096 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc TM resp "
2104 mp_req->mp_req_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2107 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP req bd\n");
2112 mp_req->mp_resp_bd = dma_alloc_coherent(&qedf->pdev->dev, sz,
2115 QEDF_ERR(&(qedf->dbg_ctx), "Unable to alloc MP resp bd\n");
2145 static void qedf_drain_request(struct qedf_ctx *qedf)
2147 if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) {
2148 QEDF_ERR(&(qedf->dbg_ctx), "MCP drain already active.\n");
2153 set_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2156 qed_ops->common->drain(qedf->cdev);
2162 clear_bit(QEDF_DRAIN_ACTIVE, &qedf->flags);
2173 struct qedf_ctx *qedf;
2193 qedf = fcport->qedf;
2194 if (!qedf) {
2195 QEDF_ERR(NULL, "qedf is NULL.\n");
2205 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2215 QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
2222 QEDF_ERR(&qedf->dbg_ctx,
2231 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
2262 QEDF_ERR(&(qedf->dbg_ctx), "Cleanup command timeout, "
2266 QEDF_ERR(&(qedf->dbg_ctx), "Issuing MCP drain request.\n");
2267 qedf_drain_request(qedf);
2283 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM,
2287 qedf_scsi_done(qedf, io_req, DID_ERROR);
2298 void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2301 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "Entered xid = 0x%x\n",
2315 struct qedf_ctx *qedf = fcport->qedf;
2316 struct fc_lport *lport = qedf->lport;
2326 QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n");
2332 QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
2339 QEDF_ERR(&(qedf->dbg_ctx), "Failed TMF");
2345 qedf->lun_resets++;
2347 qedf->target_resets++;
2368 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2372 task = qedf_get_task_mem(&qedf->tasks, xid);
2393 QEDF_ERR(&(qedf->dbg_ctx), "wait for tm_cmpl timeout!\n");
2409 QEDF_ERR(&qedf->dbg_ctx,
2424 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command failed...\n");
2427 QEDF_ERR(&(qedf->dbg_ctx), "task mgmt command success...\n");
2438 struct qedf_ctx *qedf;
2485 qedf = fcport->qedf;
2487 if (!qedf) {
2488 QEDF_ERR(NULL, "qedf is NULL.\n");
2494 QEDF_ERR(&qedf->dbg_ctx, "Connection is getting uploaded.\n");
2499 if (test_bit(QEDF_UNLOADING, &qedf->flags) ||
2500 test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) {
2506 QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
2513 QEDF_ERR(&qedf->dbg_ctx, "fcport %p is uploading.\n",
2516 QEDF_ERR(&qedf->dbg_ctx,
2530 void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
2544 void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
2558 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2564 qedf->bdq_prod_idx, pktlen);
2568 QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
2573 bdq_addr = qedf->bdq[bdq_idx].buf_addr;
2575 QEDF_ERR(&(qedf->dbg_ctx), "bdq_addr is NULL, dropping "
2581 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
2589 fp = fc_frame_alloc(qedf->lport, payload_len);
2591 QEDF_ERR(&(qedf->dbg_ctx), "Could not allocate fp.\n");
2599 QEDF_WARN(&qedf->dbg_ctx,
2607 fr_dev(fp) = qedf->lport;
2616 io_work = mempool_alloc(qedf->io_mempool, GFP_ATOMIC);
2618 QEDF_WARN(&(qedf->dbg_ctx), "Could not allocate "
2630 io_work->qedf = qedf;
2635 spin_lock_irqsave(&qedf->hba_lock, flags);
2638 qedf->bdq_prod_idx++;
2641 if (qedf->bdq_prod_idx == 0xffff)
2642 qedf->bdq_prod_idx = 0;
2644 writew(qedf->bdq_prod_idx, qedf->bdq_primary_prod);
2645 readw(qedf->bdq_primary_prod);
2646 writew(qedf->bdq_prod_idx, qedf->bdq_secondary_prod);
2647 readw(qedf->bdq_secondary_prod);
2649 spin_unlock_irqrestore(&qedf->hba_lock, flags);