Lines Matching defs:io_req

11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
21 struct qedf_ioreq *io_req =
26 if (io_req == NULL) {
27 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
31 fcport = io_req->fcport;
32 if (io_req->fcport == NULL) {
39 switch (io_req->cmd_type) {
44 io_req->xid);
49 io_req->xid);
51 qedf_initiate_cleanup(io_req, true);
52 complete(&io_req->abts_done);
59 kref_put(&io_req->refcount, qedf_release_cmd);
62 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
74 io_req->xid);
78 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
80 kref_get(&io_req->refcount);
87 io_req->xid);
88 qedf_initiate_cleanup(io_req, true);
89 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
91 if (io_req->cb_func && io_req->cb_arg) {
92 io_req->cb_func(io_req->cb_arg);
93 io_req->cb_arg = NULL;
95 kref_put(&io_req->refcount, qedf_release_cmd);
99 "xid=0x%x.\n", io_req->xid);
100 qedf_initiate_cleanup(io_req, true);
101 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
102 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
106 "Hit default case, xid=0x%x.\n", io_req->xid);
120 struct qedf_ioreq *io_req;
152 io_req = &cmgr->cmds[i];
153 kfree(io_req->sgl_task_params);
154 kfree(io_req->task_params);
156 if (io_req->sense_buffer)
158 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
159 io_req->sense_buffer_dma);
160 cancel_delayed_work_sync(&io_req->rrq_work);
169 struct qedf_ioreq *io_req =
172 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
173 qedf_send_rrq(io_req);
181 struct qedf_ioreq *io_req;
220 io_req = &cmgr->cmds[i];
221 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
223 io_req->xid = xid++;
225 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
228 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
229 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
231 if (!io_req->sense_buffer) {
238 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
240 if (!io_req->task_params) {
251 io_req->sgl_task_params = kzalloc(
253 if (!io_req->sgl_task_params) {
307 struct qedf_ioreq *io_req = NULL;
342 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
348 if (!io_req->alloc)
357 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
359 "io_req found to be dirty ox_id = 0x%x.\n",
360 io_req->xid);
363 io_req->flags = 0;
364 io_req->alloc = 1;
369 xid = io_req->xid;
372 io_req->cmd_mgr = cmd_mgr;
373 io_req->fcport = fcport;
376 io_req->sc_cmd = NULL;
377 io_req->lun = -1;
379 /* Hold the io_req against deletion */
380 kref_init(&io_req->refcount); /* ID: 001 */
381 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
383 /* Bind io_bdt for this io_req */
384 /* Have a static link between io_req and io_bdt_pool */
385 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
388 kref_put(&io_req->refcount, qedf_release_cmd);
391 bd_tbl->io_req = io_req;
392 io_req->cmd_type = cmd_type;
393 io_req->tm_flags = 0;
396 io_req->rx_buf_off = 0;
397 io_req->tx_buf_off = 0;
398 io_req->rx_id = 0xffff; /* No OX_ID */
400 return io_req;
408 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
410 struct qedf_mp_req *mp_req = &(io_req->mp_req);
411 struct qedf_ctx *qedf = io_req->fcport->qedf;
439 struct qedf_ioreq *io_req =
441 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
442 struct qedf_rport *fcport = io_req->fcport;
445 if (io_req->cmd_type == QEDF_SCSI_CMD) {
447 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
448 io_req, io_req->xid);
449 WARN_ON(io_req->sc_cmd);
452 if (io_req->cmd_type == QEDF_ELS ||
453 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
454 qedf_free_mp_resc(io_req);
458 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
465 io_req->task_retry_identifier++;
466 io_req->fcport = NULL;
468 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
469 io_req->cpu = 0;
471 io_req->fcport = NULL;
472 io_req->alloc = 0;
476 static int qedf_map_sg(struct qedf_ioreq *io_req)
478 struct scsi_cmnd *sc = io_req->sc_cmd;
482 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
495 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
497 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
498 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
509 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
511 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
522 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
523 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
528 scsi_bufflen(sc), io_req->xid);
533 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
535 struct scsi_cmnd *sc = io_req->sc_cmd;
536 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
540 bd_count = qedf_map_sg(io_req);
548 io_req->bd_tbl->bd_valid = bd_count;
553 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
556 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
567 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
568 fcp_cmnd->fc_flags = io_req->io_req_flags;
572 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
584 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
588 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
592 struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
596 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
597 struct io_bdt *bd_tbl = io_req->bd_tbl;
609 io_req->task = task_ctx;
611 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
612 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
615 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
620 tx_io_size = io_req->data_xfer_len;
623 rx_io_size = io_req->data_xfer_len;
628 io_req->task_params->context = task_ctx;
629 io_req->task_params->sqe = sqe;
630 io_req->task_params->task_type = task_type;
631 io_req->task_params->tx_io_size = tx_io_size;
632 io_req->task_params->rx_io_size = rx_io_size;
633 io_req->task_params->conn_cid = fcport->fw_cid;
634 io_req->task_params->itid = io_req->xid;
635 io_req->task_params->cq_rss_number = cq_idx;
636 io_req->task_params->is_tape_device = fcport->dev_type;
639 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
641 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
642 io_req->sgl_task_params->sgl_phys_addr.lo =
644 io_req->sgl_task_params->sgl_phys_addr.hi =
646 io_req->sgl_task_params->num_sges = bd_count;
647 io_req->sgl_task_params->total_buffer_size =
648 scsi_bufflen(io_req->sc_cmd);
649 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
650 io_req->sgl_task_params->small_mid_sge = 1;
652 io_req->sgl_task_params->small_mid_sge = 0;
656 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
657 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
660 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
669 init_initiator_rw_fcoe_task(io_req->task_params,
670 io_req->sgl_task_params,
672 io_req->task_retry_identifier, fcp_cmnd);
675 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
681 void qedf_init_mp_task(struct qedf_ioreq *io_req,
684 struct qedf_mp_req *mp_req = &(io_req->mp_req);
685 struct qedf_rport *fcport = io_req->fcport;
686 struct qedf_ctx *qedf = io_req->fcport->qedf;
694 io_req->cmd_type);
703 /* Setup the task from io_req for easy reference */
704 io_req->task = task_ctx;
707 io_req->task_params->context = task_ctx;
708 io_req->task_params->sqe = sqe;
709 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
710 io_req->task_params->tx_io_size = io_req->data_xfer_len;
712 io_req->task_params->rx_io_size = PAGE_SIZE;
713 io_req->task_params->conn_cid = fcport->fw_cid;
714 io_req->task_params->itid = io_req->xid;
716 io_req->task_params->cq_rss_number = 0;
717 io_req->task_params->is_tape_device = fcport->dev_type;
721 fc_hdr->fh_ox_id = io_req->xid;
739 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
756 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
805 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
810 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
818 io_log->task_id = io_req->xid;
830 io_log->refcount = kref_read(&io_req->refcount);
834 io_log->req_cpu = io_req->cpu;
838 io_log->req_cpu = io_req->cpu;
839 io_log->int_cpu = io_req->int_cpu;
843 io_log->sge_type = io_req->sge_type;
852 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
854 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
863 /* Initialize rest of io_req fileds */
864 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
865 sc_cmd->SCp.ptr = (char *)io_req;
866 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
869 io_req->cpu = smp_processor_id();
872 io_req->io_req_flags = QEDF_READ;
875 io_req->io_req_flags = QEDF_WRITE;
878 io_req->io_req_flags = 0;
882 xid = io_req->xid;
885 if (qedf_build_bd_list_from_sg(io_req)) {
887 /* Release cmd will release io_req, but sc_cmd is assigned */
888 io_req->sc_cmd = NULL;
889 kref_put(&io_req->refcount, qedf_release_cmd);
896 /* Release cmd will release io_req, but sc_cmd is assigned */
897 io_req->sc_cmd = NULL;
898 kref_put(&io_req->refcount, qedf_release_cmd);
903 io_req->lun = (int)sc_cmd->device->lun;
915 /* Release cmd will release io_req, but sc_cmd is assigned */
916 io_req->sc_cmd = NULL;
917 kref_put(&io_req->refcount, qedf_release_cmd);
921 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
927 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
929 if (qedf_io_tracing && io_req->sc_cmd)
930 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
943 struct qedf_ioreq *io_req;
1032 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1033 if (!io_req) {
1039 io_req->sc_cmd = sc_cmd;
1043 if (qedf_post_io_req(fcport, io_req)) {
1044 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1056 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1059 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1060 struct qedf_ctx *qedf = io_req->fcport->qedf;
1066 io_req->fcp_status = FC_GOOD;
1067 io_req->fcp_resid = 0;
1070 io_req->fcp_resid = fcp_rsp->fcp_resid;
1072 io_req->scsi_comp_flags = rsp_flags;
1073 CMD_SCSI_STATUS(sc_cmd) = io_req->cdb_status =
1084 io_req->fcp_rsp_len = fcp_rsp_len;
1085 io_req->fcp_sns_len = fcp_sns_len;
1086 rsp_info = sense_data = io_req->sense_buffer;
1091 io_req->fcp_rsp_code = rsp_info[3];
1093 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1113 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1115 struct scsi_cmnd *sc = io_req->sc_cmd;
1117 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1120 io_req->bd_tbl->bd_valid = 0;
1125 struct qedf_ioreq *io_req)
1136 if (!io_req)
1141 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1142 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1143 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1145 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1146 io_req->xid);
1150 sc_cmd = io_req->sc_cmd;
1182 fcport = io_req->fcport;
1193 io_req->xid);
1197 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1199 qedf_unmap_sg_list(qedf, io_req);
1202 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1205 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1206 io_req->fcp_rsp_code);
1216 io_req->xid, fcp_rsp->rsp_flags.flags,
1217 io_req->fcp_resid,
1221 if (io_req->cdb_status == 0)
1222 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1224 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1234 switch (io_req->fcp_status) {
1236 if (io_req->cdb_status == 0) {
1240 refcount = kref_read(&io_req->refcount);
1246 sc_cmd->device->lun, io_req->xid,
1249 io_req->cdb_status, io_req->fcp_resid,
1251 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1253 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1254 io_req->cdb_status == SAM_STAT_BUSY) {
1269 if (io_req->cdb_status ==
1276 if (io_req->fcp_resid)
1277 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1311 io_req->fcp_status);
1317 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1323 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1325 io_req->sc_cmd = NULL;
1328 kref_put(&io_req->refcount, qedf_release_cmd);
1332 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1338 if (!io_req) {
1339 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1343 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1345 "io_req:%p scsi_done handling already done\n",
1346 io_req);
1354 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1356 sc_cmd = io_req->sc_cmd;
1407 qedf_unmap_sg_list(qedf, io_req);
1410 refcount = kref_read(&io_req->refcount);
1427 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1429 io_req->sc_cmd = NULL;
1432 kref_put(&io_req->refcount, qedf_release_cmd);
1437 * Clear the io_req->sc_cmd backpointer so we don't try to process
1440 io_req->sc_cmd = NULL;
1441 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1449 struct qedf_ioreq *io_req)
1452 struct qedf_rport *fcport = io_req->fcport;
1458 "cqe is NULL for io_req %p xid=0x%x\n",
1459 io_req, io_req->xid);
1463 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1464 "xid=0x%x\n", io_req->xid);
1465 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1469 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1491 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1492 io_req->rx_buf_off =
1494 io_req->tx_buf_off =
1496 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1497 rval = qedf_send_rec(io_req);
1499 * We only want to abort the io_req if we
1511 init_completion(&io_req->abts_done);
1512 rval = qedf_initiate_abts(io_req, true);
1519 struct qedf_ioreq *io_req)
1523 if (io_req == NULL) {
1524 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1528 if (io_req->fcport == NULL) {
1535 "cqe is NULL for io_req %p\n", io_req);
1539 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1540 "xid=0x%x\n", io_req->xid);
1541 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1545 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1552 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1553 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1554 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1557 io_req->xid);
1566 init_completion(&io_req->abts_done);
1567 rval = qedf_initiate_abts(io_req, true);
1605 struct qedf_ioreq *io_req;
1666 io_req = &cmd_mgr->cmds[i];
1668 if (!io_req)
1670 if (!io_req->fcport)
1675 if (io_req->alloc) {
1676 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677 if (io_req->cmd_type == QEDF_SCSI_CMD)
1680 io_req->xid);
1688 if (io_req->fcport != fcport)
1694 * NULL, and we drop the ref on the io_req to clean it up.
1696 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1697 refcount = kref_read(&io_req->refcount);
1700 io_req->xid, io_req->cmd_type, refcount);
1702 * free the io_req
1704 if (atomic_read(&io_req->state) ==
1707 (&io_req->rrq_work)) {
1710 io_req->xid);
1712 kref_put(&io_req->refcount,
1720 if (io_req->cmd_type == QEDF_ELS &&
1722 rc = kref_get_unless_zero(&io_req->refcount);
1725 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1726 io_req, io_req->xid);
1729 qedf_initiate_cleanup(io_req, false);
1731 qedf_flush_els_req(qedf, io_req);
1740 if (io_req->cmd_type == QEDF_ABTS) {
1742 rc = kref_get_unless_zero(&io_req->refcount);
1745 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1746 io_req, io_req->xid);
1749 if (lun != -1 && io_req->lun != lun)
1753 "Flushing abort xid=0x%x.\n", io_req->xid);
1755 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1758 io_req->xid);
1759 kref_put(&io_req->refcount, qedf_release_cmd);
1762 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1765 io_req->xid);
1766 qedf_initiate_cleanup(io_req, true);
1770 complete(&io_req->abts_done);
1771 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1773 kref_put(&io_req->refcount, qedf_release_cmd);
1779 if (!io_req->sc_cmd)
1781 if (!io_req->sc_cmd->device) {
1784 io_req->sc_cmd);
1786 io_req->sc_cmd = NULL;
1787 qedf_initiate_cleanup(io_req, false);
1788 kref_put(&io_req->refcount, qedf_release_cmd);
1792 if (io_req->lun != lun)
1800 rc = kref_get_unless_zero(&io_req->refcount);
1803 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1808 "Cleanup xid=0x%x.\n", io_req->xid);
1812 qedf_initiate_cleanup(io_req, true);
1815 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1837 io_req = &cmd_mgr->cmds[i];
1838 if (io_req->fcport &&
1839 io_req->fcport == fcport) {
1841 kref_read(&io_req->refcount);
1843 &io_req->flags);
1845 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1846 io_req, io_req->xid,
1847 io_req->flags,
1848 io_req->sc_cmd,
1850 io_req->cmd_type);
1871 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1874 struct qedf_rport *fcport = io_req->fcport;
1928 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1929 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1930 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1932 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1933 io_req->xid, io_req->sc_cmd);
1940 io_req->cmd_type = QEDF_ABTS;
1943 kref_get(&io_req->refcount);
1945 xid = io_req->xid;
1949 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1951 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1952 refcount = kref_read(&io_req->refcount);
1954 "ABTS io_req xid = 0x%x refcount=%d\n",
1957 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1964 io_req->task_params->sqe = sqe;
1966 init_initiator_abort_fcoe_task(io_req->task_params);
1978 struct qedf_ioreq *io_req)
1982 struct qedf_rport *fcport = io_req->fcport;
1985 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1991 * the io_req to be freed from the other context before we got here.
1996 io_req->xid);
2008 io_req->xid);
2012 if (!cancel_delayed_work(&io_req->timeout_work)) {
2021 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2022 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2026 io_req->xid);
2033 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2035 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2041 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2048 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2050 if (io_req->sc_cmd) {
2051 if (!io_req->return_scsi_cmd_on_abts)
2054 io_req->xid);
2055 if (io_req->return_scsi_cmd_on_abts)
2056 qedf_scsi_done(qedf, io_req, DID_ERROR);
2060 complete(&io_req->abts_done);
2062 kref_put(&io_req->refcount, qedf_release_cmd);
2065 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2070 struct qedf_ctx *qedf = io_req->fcport->qedf;
2076 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2079 if (io_req->cmd_type != QEDF_ELS) {
2081 io_req->data_xfer_len = mp_req->req_len;
2083 mp_req->req_len = io_req->data_xfer_len;
2089 qedf_free_mp_resc(io_req);
2098 qedf_free_mp_resc(io_req);
2108 qedf_free_mp_resc(io_req);
2116 qedf_free_mp_resc(io_req);
2169 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2181 fcport = io_req->fcport;
2199 if (io_req->cmd_type == QEDF_ELS) {
2203 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2204 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2205 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2207 io_req->xid);
2210 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2217 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2221 if (io_req->cmd_type == QEDF_CLEANUP) {
2223 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2224 io_req->xid, io_req->cmd_type);
2225 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2229 refcount = kref_read(&io_req->refcount);
2233 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2238 io_req->cmd_type = QEDF_CLEANUP;
2240 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2242 init_completion(&io_req->cleanup_done);
2249 io_req->task_params->sqe = sqe;
2251 init_initiator_cleanup_fcoe_task(io_req->task_params);
2256 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2263 "xid=%x.\n", io_req->xid);
2264 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2273 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2274 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2275 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2276 io_req->sc_cmd = NULL;
2277 kref_put(&io_req->refcount, qedf_release_cmd);
2278 complete(&io_req->tm_done);
2281 if (io_req->sc_cmd) {
2282 if (!io_req->return_scsi_cmd_on_abts)
2285 io_req->xid);
2286 if (io_req->return_scsi_cmd_on_abts)
2287 qedf_scsi_done(qedf, io_req, DID_ERROR);
2291 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2293 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2299 struct qedf_ioreq *io_req)
2302 io_req->xid);
2304 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2307 complete(&io_req->cleanup_done);
2313 struct qedf_ioreq *io_req;
2337 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2338 if (!io_req) {
2349 /* Initialize rest of io_req fields */
2350 io_req->sc_cmd = sc_cmd;
2351 io_req->fcport = fcport;
2352 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2355 io_req->cpu = smp_processor_id();
2358 io_req->io_req_flags = QEDF_READ;
2359 io_req->data_xfer_len = 0;
2360 io_req->tm_flags = tm_flags;
2363 io_req->return_scsi_cmd_on_abts = false;
2366 xid = io_req->xid;
2368 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2374 init_completion(&io_req->tm_done);
2382 qedf_init_task(fcport, lport, io_req, task, sqe);
2387 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2388 tmo = wait_for_completion_timeout(&io_req->tm_done,
2395 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2396 io_req->sc_cmd = NULL;
2399 if (io_req->fcp_rsp_code == 0)
2413 /* We do not need this io_req any more */
2414 kref_put(&io_req->refcount, qedf_release_cmd);
2442 struct qedf_ioreq *io_req = NULL;
2461 io_req = (struct qedf_ioreq *)sc_cmd->SCp.ptr;
2462 ref_cnt = kref_read(&io_req->refcount);
2464 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2465 io_req, io_req->xid, ref_cnt);
2531 struct qedf_ioreq *io_req)
2535 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2538 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2540 io_req->sc_cmd = NULL;
2541 complete(&io_req->tm_done);