Lines Matching defs:io_req
11 void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
14 queue_delayed_work(qedf->timer_work_queue, &io_req->timeout_work,
21 struct qedf_ioreq *io_req =
26 fcport = io_req->fcport;
27 if (io_req->fcport == NULL) {
34 switch (io_req->cmd_type) {
39 io_req->xid);
44 io_req->xid);
46 qedf_initiate_cleanup(io_req, true);
47 complete(&io_req->abts_done);
54 kref_put(&io_req->refcount, qedf_release_cmd);
57 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
69 io_req->xid);
73 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
75 kref_get(&io_req->refcount);
82 io_req->xid);
83 qedf_initiate_cleanup(io_req, true);
84 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
86 if (io_req->cb_func && io_req->cb_arg) {
87 io_req->cb_func(io_req->cb_arg);
88 io_req->cb_arg = NULL;
90 kref_put(&io_req->refcount, qedf_release_cmd);
94 "xid=0x%x.\n", io_req->xid);
95 qedf_initiate_cleanup(io_req, true);
96 io_req->event = QEDF_IOREQ_EV_ELS_TMO;
97 qedf_process_seq_cleanup_compl(qedf, NULL, io_req);
101 "Hit default case, xid=0x%x.\n", io_req->xid);
115 struct qedf_ioreq *io_req;
147 io_req = &cmgr->cmds[i];
148 kfree(io_req->sgl_task_params);
149 kfree(io_req->task_params);
151 if (io_req->sense_buffer)
153 QEDF_SCSI_SENSE_BUFFERSIZE, io_req->sense_buffer,
154 io_req->sense_buffer_dma);
155 cancel_delayed_work_sync(&io_req->rrq_work);
164 struct qedf_ioreq *io_req =
167 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_ACTIVE);
168 qedf_send_rrq(io_req);
176 struct qedf_ioreq *io_req;
215 io_req = &cmgr->cmds[i];
216 INIT_DELAYED_WORK(&io_req->timeout_work, qedf_cmd_timeout);
218 io_req->xid = xid++;
220 INIT_DELAYED_WORK(&io_req->rrq_work, qedf_handle_rrq);
223 io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev,
224 QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma,
226 if (!io_req->sense_buffer) {
233 io_req->task_params = kzalloc(sizeof(*io_req->task_params),
235 if (!io_req->task_params) {
246 io_req->sgl_task_params = kzalloc(
248 if (!io_req->sgl_task_params) {
302 struct qedf_ioreq *io_req = NULL;
337 io_req = &cmd_mgr->cmds[cmd_mgr->idx];
343 if (!io_req->alloc)
352 if (test_bit(QEDF_CMD_DIRTY, &io_req->flags))
354 "io_req found to be dirty ox_id = 0x%x.\n",
355 io_req->xid);
358 io_req->flags = 0;
359 io_req->alloc = 1;
364 xid = io_req->xid;
367 io_req->cmd_mgr = cmd_mgr;
368 io_req->fcport = fcport;
371 io_req->sc_cmd = NULL;
372 io_req->lun = -1;
374 /* Hold the io_req against deletion */
375 kref_init(&io_req->refcount); /* ID: 001 */
376 atomic_set(&io_req->state, QEDFC_CMD_ST_IO_ACTIVE);
378 /* Bind io_bdt for this io_req */
379 /* Have a static link between io_req and io_bdt_pool */
380 bd_tbl = io_req->bd_tbl = cmd_mgr->io_bdt_pool[xid];
383 kref_put(&io_req->refcount, qedf_release_cmd);
386 bd_tbl->io_req = io_req;
387 io_req->cmd_type = cmd_type;
388 io_req->tm_flags = 0;
391 io_req->rx_buf_off = 0;
392 io_req->tx_buf_off = 0;
393 io_req->rx_id = 0xffff; /* No OX_ID */
395 return io_req;
403 static void qedf_free_mp_resc(struct qedf_ioreq *io_req)
405 struct qedf_mp_req *mp_req = &(io_req->mp_req);
406 struct qedf_ctx *qedf = io_req->fcport->qedf;
434 struct qedf_ioreq *io_req =
436 struct qedf_cmd_mgr *cmd_mgr = io_req->cmd_mgr;
437 struct qedf_rport *fcport = io_req->fcport;
440 if (io_req->cmd_type == QEDF_SCSI_CMD) {
442 "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n",
443 io_req, io_req->xid);
444 WARN_ON(io_req->sc_cmd);
447 if (io_req->cmd_type == QEDF_ELS ||
448 io_req->cmd_type == QEDF_TASK_MGMT_CMD)
449 qedf_free_mp_resc(io_req);
453 atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE);
460 io_req->task_retry_identifier++;
461 io_req->fcport = NULL;
463 clear_bit(QEDF_CMD_DIRTY, &io_req->flags);
464 io_req->cpu = 0;
466 io_req->fcport = NULL;
467 io_req->alloc = 0;
471 static int qedf_map_sg(struct qedf_ioreq *io_req)
473 struct scsi_cmnd *sc = io_req->sc_cmd;
477 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
490 io_req->sge_type = QEDF_IOREQ_UNKNOWN_SGE;
492 if (sg_count <= 8 || io_req->io_req_flags == QEDF_READ)
493 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
504 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE && (i) &&
506 io_req->sge_type = QEDF_IOREQ_SLOW_SGE;
517 if (io_req->sge_type == QEDF_IOREQ_UNKNOWN_SGE)
518 io_req->sge_type = QEDF_IOREQ_FAST_SGE;
523 scsi_bufflen(sc), io_req->xid);
528 static int qedf_build_bd_list_from_sg(struct qedf_ioreq *io_req)
530 struct scsi_cmnd *sc = io_req->sc_cmd;
531 struct scsi_sge *bd = io_req->bd_tbl->bd_tbl;
535 bd_count = qedf_map_sg(io_req);
543 io_req->bd_tbl->bd_valid = bd_count;
548 static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
551 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
562 fcp_cmnd->fc_tm_flags = io_req->tm_flags;
563 fcp_cmnd->fc_flags = io_req->io_req_flags;
567 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
579 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD)
583 fcp_cmnd->fc_dl = htonl(io_req->data_xfer_len);
587 struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
591 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
592 struct io_bdt *bd_tbl = io_req->bd_tbl;
604 io_req->task = task_ctx;
606 memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
607 memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
610 if (io_req->cmd_type == QEDF_TASK_MGMT_CMD) {
615 tx_io_size = io_req->data_xfer_len;
618 rx_io_size = io_req->data_xfer_len;
623 io_req->task_params->context = task_ctx;
624 io_req->task_params->sqe = sqe;
625 io_req->task_params->task_type = task_type;
626 io_req->task_params->tx_io_size = tx_io_size;
627 io_req->task_params->rx_io_size = rx_io_size;
628 io_req->task_params->conn_cid = fcport->fw_cid;
629 io_req->task_params->itid = io_req->xid;
630 io_req->task_params->cq_rss_number = cq_idx;
631 io_req->task_params->is_tape_device = fcport->dev_type;
634 if (io_req->cmd_type != QEDF_TASK_MGMT_CMD) {
636 io_req->sgl_task_params->sgl = bd_tbl->bd_tbl;
637 io_req->sgl_task_params->sgl_phys_addr.lo =
639 io_req->sgl_task_params->sgl_phys_addr.hi =
641 io_req->sgl_task_params->num_sges = bd_count;
642 io_req->sgl_task_params->total_buffer_size =
643 scsi_bufflen(io_req->sc_cmd);
644 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
645 io_req->sgl_task_params->small_mid_sge = 1;
647 io_req->sgl_task_params->small_mid_sge = 0;
651 sense_data_buffer_phys_addr.lo = U64_LO(io_req->sense_buffer_dma);
652 sense_data_buffer_phys_addr.hi = U64_HI(io_req->sense_buffer_dma);
655 qedf_build_fcp_cmnd(io_req, (struct fcp_cmnd *)tmp_fcp_cmnd);
664 init_initiator_rw_fcoe_task(io_req->task_params,
665 io_req->sgl_task_params,
667 io_req->task_retry_identifier, fcp_cmnd);
670 if (io_req->sge_type == QEDF_IOREQ_SLOW_SGE)
676 void qedf_init_mp_task(struct qedf_ioreq *io_req,
679 struct qedf_mp_req *mp_req = &(io_req->mp_req);
680 struct qedf_rport *fcport = io_req->fcport;
681 struct qedf_ctx *qedf = io_req->fcport->qedf;
689 io_req->cmd_type);
698 /* Setup the task from io_req for easy reference */
699 io_req->task = task_ctx;
702 io_req->task_params->context = task_ctx;
703 io_req->task_params->sqe = sqe;
704 io_req->task_params->task_type = FCOE_TASK_TYPE_MIDPATH;
705 io_req->task_params->tx_io_size = io_req->data_xfer_len;
707 io_req->task_params->rx_io_size = PAGE_SIZE;
708 io_req->task_params->conn_cid = fcport->fw_cid;
709 io_req->task_params->itid = io_req->xid;
711 io_req->task_params->cq_rss_number = 0;
712 io_req->task_params->is_tape_device = fcport->dev_type;
716 fc_hdr->fh_ox_id = io_req->xid;
734 tx_sgl_task_params.total_buffer_size = io_req->data_xfer_len;
751 init_initiator_midpath_unsolicited_fcoe_task(io_req->task_params,
800 static void qedf_trace_io(struct qedf_rport *fcport, struct qedf_ioreq *io_req,
805 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
812 io_log->task_id = io_req->xid;
824 io_log->refcount = kref_read(&io_req->refcount);
828 io_log->req_cpu = io_req->cpu;
832 io_log->req_cpu = io_req->cpu;
833 io_log->int_cpu = io_req->int_cpu;
837 io_log->sge_type = io_req->sge_type;
846 int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
848 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
857 /* Initialize rest of io_req fileds */
858 io_req->data_xfer_len = scsi_bufflen(sc_cmd);
859 qedf_priv(sc_cmd)->io_req = io_req;
860 io_req->sge_type = QEDF_IOREQ_FAST_SGE; /* Assume fast SGL by default */
863 io_req->cpu = smp_processor_id();
866 io_req->io_req_flags = QEDF_READ;
869 io_req->io_req_flags = QEDF_WRITE;
872 io_req->io_req_flags = 0;
876 xid = io_req->xid;
879 if (qedf_build_bd_list_from_sg(io_req)) {
881 /* Release cmd will release io_req, but sc_cmd is assigned */
882 io_req->sc_cmd = NULL;
883 kref_put(&io_req->refcount, qedf_release_cmd);
890 /* Release cmd will release io_req, but sc_cmd is assigned */
891 io_req->sc_cmd = NULL;
892 kref_put(&io_req->refcount, qedf_release_cmd);
897 io_req->lun = (int)sc_cmd->device->lun;
909 /* Release cmd will release io_req, but sc_cmd is assigned */
910 io_req->sc_cmd = NULL;
911 kref_put(&io_req->refcount, qedf_release_cmd);
915 qedf_init_task(fcport, lport, io_req, task_ctx, sqe);
921 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
923 if (qedf_io_tracing && io_req->sc_cmd)
924 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_REQ);
937 struct qedf_ioreq *io_req;
1026 io_req = qedf_alloc_cmd(fcport, QEDF_SCSI_CMD);
1027 if (!io_req) {
1033 io_req->sc_cmd = sc_cmd;
1037 if (qedf_post_io_req(fcport, io_req)) {
1038 QEDF_WARN(&(qedf->dbg_ctx), "Unable to post io_req\n");
1050 static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
1053 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1054 struct qedf_ctx *qedf = io_req->fcport->qedf;
1060 io_req->fcp_status = FC_GOOD;
1061 io_req->fcp_resid = 0;
1064 io_req->fcp_resid = fcp_rsp->fcp_resid;
1066 io_req->scsi_comp_flags = rsp_flags;
1067 io_req->cdb_status = fcp_rsp->scsi_status_code;
1077 io_req->fcp_rsp_len = fcp_rsp_len;
1078 io_req->fcp_sns_len = fcp_sns_len;
1079 rsp_info = sense_data = io_req->sense_buffer;
1084 io_req->fcp_rsp_code = rsp_info[3];
1086 "fcp_rsp_code = %d\n", io_req->fcp_rsp_code);
1106 static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
1108 struct scsi_cmnd *sc = io_req->sc_cmd;
1110 if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
1113 io_req->bd_tbl->bd_valid = 0;
1118 struct qedf_ioreq *io_req)
1129 if (!io_req)
1134 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1135 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1136 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1138 "io_req xid=0x%x already in cleanup or abort processing or already completed.\n",
1139 io_req->xid);
1143 sc_cmd = io_req->sc_cmd;
1151 if (!qedf_priv(sc_cmd)->io_req) {
1153 "io_req is NULL, returned in another context.\n");
1169 fcport = io_req->fcport;
1180 io_req->xid);
1184 qedf_parse_fcp_rsp(io_req, fcp_rsp);
1186 qedf_unmap_sg_list(qedf, io_req);
1189 if (io_req->fcp_rsp_len > 3 && io_req->fcp_rsp_code) {
1192 "fcp_rsp_code=%d.\n", io_req->xid, io_req->fcp_rsp_len,
1193 io_req->fcp_rsp_code);
1203 io_req->xid, fcp_rsp->rsp_flags.flags,
1204 io_req->fcp_resid,
1208 if (io_req->cdb_status == 0)
1209 sc_cmd->result = (DID_ERROR << 16) | io_req->cdb_status;
1211 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1221 switch (io_req->fcp_status) {
1223 if (io_req->cdb_status == 0) {
1227 refcount = kref_read(&io_req->refcount);
1233 sc_cmd->device->lun, io_req->xid,
1236 io_req->cdb_status, io_req->fcp_resid,
1238 sc_cmd->result = (DID_OK << 16) | io_req->cdb_status;
1240 if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
1241 io_req->cdb_status == SAM_STAT_BUSY) {
1256 if (io_req->cdb_status ==
1263 if (io_req->fcp_resid)
1264 scsi_set_resid(sc_cmd, io_req->fcp_resid);
1298 io_req->fcp_status);
1304 qedf_trace_io(fcport, io_req, QEDF_IO_TRACE_RSP);
1310 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1312 io_req->sc_cmd = NULL;
1313 qedf_priv(sc_cmd)->io_req = NULL;
1315 kref_put(&io_req->refcount, qedf_release_cmd);
1319 void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
1325 if (!io_req) {
1326 QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n");
1330 if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) {
1332 "io_req:%p scsi_done handling already done\n",
1333 io_req);
1341 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
1343 sc_cmd = io_req->sc_cmd;
1355 if (!qedf_priv(sc_cmd)->io_req) {
1357 "io_req is NULL, returned in another context.\n");
1387 qedf_unmap_sg_list(qedf, io_req);
1390 refcount = kref_read(&io_req->refcount);
1407 qedf_trace_io(io_req->fcport, io_req, QEDF_IO_TRACE_RSP);
1409 io_req->sc_cmd = NULL;
1410 qedf_priv(sc_cmd)->io_req = NULL;
1412 kref_put(&io_req->refcount, qedf_release_cmd);
1417 * Clear the io_req->sc_cmd backpointer so we don't try to process
1420 io_req->sc_cmd = NULL;
1421 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 001 */
1429 struct qedf_ioreq *io_req)
1432 struct qedf_rport *fcport = io_req->fcport;
1438 "cqe is NULL for io_req %p xid=0x%x\n",
1439 io_req, io_req->xid);
1443 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, "
1444 "xid=0x%x\n", io_req->xid);
1445 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1449 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1471 if (!test_bit(QEDF_CMD_SRR_SENT, &io_req->flags)) {
1472 io_req->rx_buf_off =
1474 io_req->tx_buf_off =
1476 io_req->rx_id = cqe->cqe_info.err_info.rx_id;
1477 rval = qedf_send_rec(io_req);
1479 * We only want to abort the io_req if we
1491 init_completion(&io_req->abts_done);
1492 rval = qedf_initiate_abts(io_req, true);
1499 struct qedf_ioreq *io_req)
1503 if (io_req == NULL) {
1504 QEDF_INFO(NULL, QEDF_LOG_IO, "io_req is NULL.\n");
1508 if (io_req->fcport == NULL) {
1515 "cqe is NULL for io_req %p\n", io_req);
1519 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, "
1520 "xid=0x%x\n", io_req->xid);
1521 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx),
1525 QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "tx_buff_off=%08x, "
1532 if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &io_req->fcport->flags) ||
1533 (test_bit(QEDF_RPORT_IN_LUN_RESET, &io_req->fcport->flags) &&
1534 io_req->sc_cmd->device->lun == (u64)io_req->fcport->lun_reset_lun)) {
1537 io_req->xid);
1546 init_completion(&io_req->abts_done);
1547 rval = qedf_initiate_abts(io_req, true);
1585 struct qedf_ioreq *io_req;
1646 io_req = &cmd_mgr->cmds[i];
1648 if (!io_req)
1650 if (!io_req->fcport)
1655 if (io_req->alloc) {
1656 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1657 if (io_req->cmd_type == QEDF_SCSI_CMD)
1660 io_req->xid);
1668 if (io_req->fcport != fcport)
1674 * NULL, and we drop the ref on the io_req to clean it up.
1676 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags)) {
1677 refcount = kref_read(&io_req->refcount);
1680 io_req->xid, io_req->cmd_type, refcount);
1682 * free the io_req
1684 if (atomic_read(&io_req->state) ==
1687 (&io_req->rrq_work)) {
1690 io_req->xid);
1692 kref_put(&io_req->refcount,
1700 if (io_req->cmd_type == QEDF_ELS &&
1702 rc = kref_get_unless_zero(&io_req->refcount);
1705 "Could not get kref for ELS io_req=0x%p xid=0x%x.\n",
1706 io_req, io_req->xid);
1709 qedf_initiate_cleanup(io_req, false);
1711 qedf_flush_els_req(qedf, io_req);
1720 if (io_req->cmd_type == QEDF_ABTS) {
1722 rc = kref_get_unless_zero(&io_req->refcount);
1725 "Could not get kref for abort io_req=0x%p xid=0x%x.\n",
1726 io_req, io_req->xid);
1729 if (lun != -1 && io_req->lun != lun)
1733 "Flushing abort xid=0x%x.\n", io_req->xid);
1735 if (cancel_delayed_work_sync(&io_req->rrq_work)) {
1738 io_req->xid);
1739 kref_put(&io_req->refcount, qedf_release_cmd);
1742 if (cancel_delayed_work_sync(&io_req->timeout_work)) {
1745 io_req->xid);
1746 qedf_initiate_cleanup(io_req, true);
1750 complete(&io_req->abts_done);
1751 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1753 kref_put(&io_req->refcount, qedf_release_cmd);
1759 if (!io_req->sc_cmd)
1761 if (!io_req->sc_cmd->device) {
1764 io_req->sc_cmd);
1766 io_req->sc_cmd = NULL;
1767 qedf_initiate_cleanup(io_req, false);
1768 kref_put(&io_req->refcount, qedf_release_cmd);
1772 if (io_req->lun != lun)
1780 rc = kref_get_unless_zero(&io_req->refcount);
1783 "io_req=0x%p xid=0x%x\n", io_req, io_req->xid);
1788 "Cleanup xid=0x%x.\n", io_req->xid);
1792 qedf_initiate_cleanup(io_req, true);
1795 kref_put(&io_req->refcount, qedf_release_cmd); /* ID: 004 */
1817 io_req = &cmd_mgr->cmds[i];
1818 if (io_req->fcport &&
1819 io_req->fcport == fcport) {
1821 kref_read(&io_req->refcount);
1823 &io_req->flags);
1825 "Outstanding io_req =%p xid=0x%x flags=0x%lx, sc_cmd=%p refcount=%d cmd_type=%d.\n",
1826 io_req, io_req->xid,
1827 io_req->flags,
1828 io_req->sc_cmd,
1830 io_req->cmd_type);
1851 int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
1854 struct qedf_rport *fcport = io_req->fcport;
1908 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
1909 test_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags) ||
1910 test_bit(QEDF_CMD_IN_ABORT, &io_req->flags)) {
1912 "io_req xid=0x%x sc_cmd=%p already in cleanup or abort processing or already completed.\n",
1913 io_req->xid, io_req->sc_cmd);
1920 io_req->cmd_type = QEDF_ABTS;
1923 kref_get(&io_req->refcount);
1925 xid = io_req->xid;
1929 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
1931 set_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
1932 refcount = kref_read(&io_req->refcount);
1934 "ABTS io_req xid = 0x%x refcount=%d\n",
1937 qedf_cmd_timer_set(qedf, io_req, QEDF_ABORT_TIMEOUT);
1944 io_req->task_params->sqe = sqe;
1946 init_initiator_abort_fcoe_task(io_req->task_params);
1958 struct qedf_ioreq *io_req)
1962 struct qedf_rport *fcport = io_req->fcport;
1965 "0x%x cmd_type = %d\n", io_req->xid, io_req->cmd_type);
1971 * the io_req to be freed from the other context before we got here.
1976 io_req->xid);
1988 io_req->xid);
1992 if (!cancel_delayed_work(&io_req->timeout_work)) {
2001 io_req->event = QEDF_IOREQ_EV_ABORT_SUCCESS;
2002 rc = kref_get_unless_zero(&io_req->refcount); /* ID: 003 */
2006 io_req->xid);
2013 queue_delayed_work(qedf->dpc_wq, &io_req->rrq_work,
2015 atomic_set(&io_req->state, QEDFC_CMD_ST_RRQ_WAIT);
2021 io_req->event = QEDF_IOREQ_EV_ABORT_FAILED;
2028 clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags);
2030 if (io_req->sc_cmd) {
2031 if (!io_req->return_scsi_cmd_on_abts)
2034 io_req->xid);
2035 if (io_req->return_scsi_cmd_on_abts)
2036 qedf_scsi_done(qedf, io_req, DID_ERROR);
2040 complete(&io_req->abts_done);
2042 kref_put(&io_req->refcount, qedf_release_cmd);
2045 int qedf_init_mp_req(struct qedf_ioreq *io_req)
2050 struct qedf_ctx *qedf = io_req->fcport->qedf;
2056 mp_req = (struct qedf_mp_req *)&(io_req->mp_req);
2059 if (io_req->cmd_type != QEDF_ELS) {
2061 io_req->data_xfer_len = mp_req->req_len;
2063 mp_req->req_len = io_req->data_xfer_len;
2069 qedf_free_mp_resc(io_req);
2078 qedf_free_mp_resc(io_req);
2088 qedf_free_mp_resc(io_req);
2096 qedf_free_mp_resc(io_req);
2149 int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
2161 fcport = io_req->fcport;
2179 if (io_req->cmd_type == QEDF_ELS) {
2183 if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
2184 test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
2185 QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
2187 io_req->xid);
2190 set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2197 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2201 if (io_req->cmd_type == QEDF_CLEANUP) {
2203 "io_req=0x%x is already a cleanup command cmd_type=%d.\n",
2204 io_req->xid, io_req->cmd_type);
2205 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2209 refcount = kref_read(&io_req->refcount);
2213 io_req->xid, io_req->sc_cmd, io_req->cmd_type, io_req->flags,
2218 io_req->cmd_type = QEDF_CLEANUP;
2220 io_req->return_scsi_cmd_on_abts = return_scsi_cmd_on_abts;
2222 init_completion(&io_req->cleanup_done);
2229 io_req->task_params->sqe = sqe;
2231 init_initiator_cleanup_fcoe_task(io_req->task_params);
2236 tmo = wait_for_completion_timeout(&io_req->cleanup_done,
2243 "xid=%x.\n", io_req->xid);
2244 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2253 if (io_req->tm_flags == FCP_TMF_LUN_RESET ||
2254 io_req->tm_flags == FCP_TMF_TGT_RESET) {
2255 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2256 io_req->sc_cmd = NULL;
2257 kref_put(&io_req->refcount, qedf_release_cmd);
2258 complete(&io_req->tm_done);
2261 if (io_req->sc_cmd) {
2262 if (!io_req->return_scsi_cmd_on_abts)
2265 io_req->xid);
2266 if (io_req->return_scsi_cmd_on_abts)
2267 qedf_scsi_done(qedf, io_req, DID_ERROR);
2271 io_req->event = QEDF_IOREQ_EV_CLEANUP_SUCCESS;
2273 io_req->event = QEDF_IOREQ_EV_CLEANUP_FAILED;
2279 struct qedf_ioreq *io_req)
2282 io_req->xid);
2284 clear_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
2287 complete(&io_req->cleanup_done);
2293 struct qedf_ioreq *io_req;
2317 io_req = qedf_alloc_cmd(fcport, QEDF_TASK_MGMT_CMD);
2318 if (!io_req) {
2329 /* Initialize rest of io_req fields */
2330 io_req->sc_cmd = sc_cmd;
2331 io_req->fcport = fcport;
2332 io_req->cmd_type = QEDF_TASK_MGMT_CMD;
2335 io_req->cpu = smp_processor_id();
2338 io_req->io_req_flags = QEDF_READ;
2339 io_req->data_xfer_len = 0;
2340 io_req->tm_flags = tm_flags;
2343 io_req->return_scsi_cmd_on_abts = false;
2346 xid = io_req->xid;
2348 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_SCSI_TM, "TMF io_req xid = "
2354 init_completion(&io_req->tm_done);
2362 qedf_init_task(fcport, lport, io_req, task, sqe);
2367 set_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2368 tmo = wait_for_completion_timeout(&io_req->tm_done,
2375 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2376 io_req->sc_cmd = NULL;
2379 if (io_req->fcp_rsp_code == 0)
2393 /* We do not need this io_req any more */
2394 kref_put(&io_req->refcount, qedf_release_cmd);
2422 struct qedf_ioreq *io_req = NULL;
2440 if (qedf_priv(sc_cmd)->io_req) {
2441 io_req = qedf_priv(sc_cmd)->io_req;
2442 ref_cnt = kref_read(&io_req->refcount);
2444 "orig io_req = %p xid = 0x%x ref_cnt = %d.\n",
2445 io_req, io_req->xid, ref_cnt);
2511 struct qedf_ioreq *io_req)
2515 clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
2518 qedf_parse_fcp_rsp(io_req, fcp_rsp);
2520 io_req->sc_cmd = NULL;
2521 complete(&io_req->tm_done);