Lines Matching defs:cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
2000 struct qla_tgt_cmd *cmd;
2017 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2021 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
2023 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
2025 cmd->aborted = 1;
2116 "qla_target(%d): %s: Allocation of ABORT cmd failed",
2287 void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
2290 struct atio_from_isp *atio = &cmd->atio;
2293 struct scsi_qla_host *vha = cmd->vha;
2311 ctio->nport_handle = cpu_to_le16(cmd->sess->loop_id);
2422 struct qla_tgt_cmd *cmd = prm->cmd;
2424 BUG_ON(cmd->sg_cnt == 0);
2426 prm->sg = (struct scatterlist *)cmd->sg;
2427 prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
2428 cmd->sg_cnt, cmd->dma_data_direction);
2432 prm->cmd->sg_mapped = 1;
2434 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
2445 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2446 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2447 prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
2452 if (cmd->prot_sg_cnt) {
2453 prm->prot_sg = cmd->prot_sg;
2454 prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
2455 cmd->prot_sg, cmd->prot_sg_cnt,
2456 cmd->dma_data_direction);
2460 if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
2461 (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
2463 prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
2464 cmd->blk_sz);
2474 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe04d,
2476 0, prm->cmd->sg_cnt);
2480 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2485 if (!cmd->sg_mapped)
2488 qpair = cmd->qpair;
2490 dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
2491 cmd->dma_data_direction);
2492 cmd->sg_mapped = 0;
2494 if (cmd->prot_sg_cnt)
2495 dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
2496 cmd->dma_data_direction);
2498 if (!cmd->ctx)
2501 if (cmd->ctx_dsd_alloced)
2502 qla2x00_clean_dsd_pool(ha, cmd->ctx);
2504 dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
2575 "qla_target(%d): Ran out of empty cmd slots\n",
2589 struct atio_from_isp *atio = &prm->cmd->atio;
2591 struct qla_tgt_cmd *cmd = prm->cmd;
2599 pkt->vp_index = prm->cmd->vp_idx;
2610 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
2614 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
2622 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
2624 if (cmd->edif) {
2625 if (cmd->dma_data_direction == DMA_TO_DEVICE)
2626 prm->cmd->sess->edif.rx_bytes += cmd->bufflen;
2627 if (cmd->dma_data_direction == DMA_FROM_DEVICE)
2628 prm->cmd->sess->edif.tx_bytes += cmd->bufflen;
2649 prm->cmd->qpair->req);
2686 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
2715 static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
2717 return cmd->bufflen > 0;
2722 struct qla_tgt_cmd *cmd;
2727 cmd = prm->cmd;
2728 vha = cmd->vha;
2735 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2736 cmd->atio.u.isp24.exchange_addr);
2742 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2743 cmd->atio.u.isp24.exchange_addr);
2749 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2750 cmd->atio.u.isp24.exchange_addr);
2756 cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
2757 cmd->atio.u.isp24.exchange_addr);
2760 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2767 static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
2771 struct se_cmd *se_cmd = &cmd->se_cmd;
2772 struct qla_qpair *qpair = cmd->qpair;
2774 prm->cmd = cmd;
2775 prm->tgt = cmd->tgt;
2778 prm->sense_buffer = &cmd->sense_buffer[0];
2789 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
2802 cmd->bufflen, prm->rq_result);
2809 se_cmd->t_task_cdb[0] : 0, cmd->bufflen, prm->rq_result);
2818 if (qlt_has_data(cmd)) {
2820 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2831 static inline int qlt_need_explicit_conf(struct qla_tgt_cmd *cmd,
2834 if (cmd->qpair->enable_class_2)
2838 return cmd->conf_compl_supported;
2840 return cmd->qpair->enable_explicit_conf &&
2841 cmd->conf_compl_supported;
2850 if (qlt_need_explicit_conf(prm->cmd, 0)) {
2860 if (qlt_need_explicit_conf(prm->cmd, 1)) {
2862 ql_dbg_qp(ql_dbg_tgt, prm->cmd->qpair, 0xe017,
2948 qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
2951 struct se_cmd *se_cmd = &cmd->se_cmd;
2953 scsi_qla_host_t *vha = cmd->tgt->vha;
2958 * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
2974 t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
3046 struct qla_tgt_cmd *cmd = prm->cmd;
3047 struct se_cmd *se_cmd = &cmd->se_cmd;
3049 struct atio_from_isp *atio = &prm->cmd->atio;
3052 scsi_qla_host_t *vha = cmd->vha;
3060 ql_dbg_qp(ql_dbg_tgt, cmd->qpair, 0xe071,
3061 "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
3062 cmd->vp_idx, __func__, se_cmd, se_cmd->prot_op,
3070 data_bytes = cmd->bufflen;
3071 dif_bytes = (data_bytes / cmd->blk_sz) * 8;
3077 if (cmd->prot_sg_cnt)
3125 pkt->vp_index = cmd->vp_idx;
3136 qpair->req->outstanding_cmds[h] = (srb_t *)prm->cmd;
3140 pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
3151 pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
3154 if (cmd->dma_data_direction == DMA_TO_DEVICE)
3156 else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
3166 crc_ctx_pkt = cmd->ctx =
3178 qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
3198 crc_ctx_pkt->blk_size = cpu_to_le16(cmd->blk_sz);
3205 tc.blk_sz = cmd->blk_sz;
3206 tc.bufflen = cmd->bufflen;
3207 tc.sg = cmd->sg;
3208 tc.prot_sg = cmd->prot_sg;
3210 tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
3229 prm->prot_seg_cnt, cmd))
3245 int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
3248 struct scsi_qla_host *vha = cmd->vha;
3249 struct qla_qpair *qpair = cmd->qpair;
3256 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3257 (cmd->sess && cmd->sess->deleted)) {
3258 cmd->state = QLA_TGT_STATE_PROCESSED;
3263 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p] qp %d\n",
3265 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
3266 &cmd->se_cmd, qpair->id);
3268 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
3281 if (!qpair->fw_started || cmd->reset_count != qpair->chip_reset) {
3286 cmd->state = QLA_TGT_STATE_PROCESSED;
3290 cmd->reset_count, qpair->chip_reset);
3300 if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
3311 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
3316 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3323 if (!cmd->edif)
3329 if (qlt_need_explicit_conf(cmd, 0)) {
3379 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
3380 cmd->cmd_sent_to_fw = 1;
3381 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3394 qlt_unmap_sg(vha, cmd);
3401 int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
3404 struct scsi_qla_host *vha = cmd->vha;
3405 struct qla_tgt *tgt = cmd->tgt;
3409 struct qla_qpair *qpair = cmd->qpair;
3412 prm.cmd = cmd;
3417 if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
3418 (cmd->sess && cmd->sess->deleted)) {
3423 cmd->aborted = 1;
3424 cmd->write_data_transferred = 0;
3425 cmd->state = QLA_TGT_STATE_DATA_IN;
3426 vha->hw->tgt.tgt_ops->handle_data(cmd);
3430 cmd->reset_count, qpair->chip_reset);
3443 if (cmd->se_cmd.prot_op)
3457 if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
3460 cmd->state = QLA_TGT_STATE_NEED_DATA;
3461 cmd->cmd_sent_to_fw = 1;
3462 cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
3475 qlt_unmap_sg(vha, cmd);
3487 qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
3492 uint64_t lba = cmd->se_cmd.t_task_lba;
3495 struct scsi_qla_host *vha = cmd->vha;
3497 cmd->trc_flags |= TRC_DIF_ERR;
3499 cmd->a_guard = get_unaligned_be16(ap + 0);
3500 cmd->a_app_tag = get_unaligned_be16(ap + 2);
3501 cmd->a_ref_tag = get_unaligned_be32(ap + 4);
3503 cmd->e_guard = get_unaligned_be16(ep + 0);
3504 cmd->e_app_tag = get_unaligned_be16(ep + 2);
3505 cmd->e_ref_tag = get_unaligned_be32(ep + 4);
3508 "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
3513 if (cmd->e_app_tag != cmd->a_app_tag) {
3515 "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3516 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3517 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3518 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3519 cmd->atio.u.isp24.fcp_hdr.ox_id);
3521 cmd->dif_err_code = DIF_ERR_APP;
3529 if (cmd->e_ref_tag != cmd->a_ref_tag) {
3531 "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard[%x|%x] cmd=%p ox_id[%04x] ",
3532 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3533 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3534 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3535 cmd->atio.u.isp24.fcp_hdr.ox_id);
3537 cmd->dif_err_code = DIF_ERR_REF;
3546 if (cmd->e_guard != cmd->a_guard) {
3548 "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] Ref[%x|%x], App[%x|%x], Guard [%x|%x] cmd=%p ox_id[%04x]",
3549 cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
3550 cmd->a_ref_tag, cmd->e_ref_tag, cmd->a_app_tag,
3551 cmd->e_app_tag, cmd->a_guard, cmd->e_guard, cmd,
3552 cmd->atio.u.isp24.fcp_hdr.ox_id);
3554 cmd->dif_err_code = DIF_ERR_GRD;
3561 switch (cmd->state) {
3564 cmd->state = QLA_TGT_STATE_DATA_IN;
3565 vha->hw->tgt.tgt_ops->handle_data(cmd);
3568 spin_lock_irqsave(&cmd->cmd_lock, flags);
3569 if (cmd->aborted) {
3570 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3571 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3574 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3576 qlt_send_resp_ctio(qpair, cmd, scsi_status, sense_key, asc,
3581 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3652 struct qla_tgt_cmd *cmd,
3664 if (cmd)
3665 vha = cmd->vha;
3675 if (cmd != NULL) {
3676 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
3678 "qla_target(%d): Terminating cmd %p with "
3679 "incorrect state %d\n", vha->vp_idx, cmd,
3680 cmd->state);
3712 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3720 if (cmd)
3721 vha = cmd->vha;
3726 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3732 rc = __qlt_send_term_exchange(qpair, cmd, atio);
3737 if (cmd && !ul_abort && !cmd->aborted) {
3738 if (cmd->sg_mapped)
3739 qlt_unmap_sg(vha, cmd);
3740 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3752 struct qla_tgt_cmd *cmd, *tcmd;
3757 cmd = tcmd = NULL;
3762 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
3763 list_del(&cmd->cmd_list);
3764 /* This cmd was never sent to TCM. There is no need
3767 qlt_free_cmd(cmd);
3796 int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3798 struct qla_tgt *tgt = cmd->tgt;
3800 struct se_cmd *se_cmd = &cmd->se_cmd;
3804 "qla_target(%d): terminating exchange for aborted cmd=%p "
3805 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3808 spin_lock_irqsave(&cmd->cmd_lock, flags);
3809 if (cmd->aborted) {
3810 if (cmd->sg_mapped)
3811 qlt_unmap_sg(vha, cmd);
3813 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3821 "se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
3822 cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
3825 cmd->aborted = 1;
3826 cmd->trc_flags |= TRC_ABORT;
3827 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3829 qlt_send_term_exchange(cmd->qpair, cmd, &cmd->atio, 0, 1);
3834 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3836 struct fc_port *sess = cmd->sess;
3838 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3840 __func__, &cmd->se_cmd,
3841 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
3843 BUG_ON(cmd->cmd_in_wq);
3845 if (!cmd->q_full)
3846 qlt_decr_num_pend_cmds(cmd->vha);
3848 BUG_ON(cmd->sg_mapped);
3849 cmd->jiffies_at_free = get_jiffies_64();
3855 cmd->jiffies_at_free = get_jiffies_64();
3856 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3864 struct qla_tgt_cmd *cmd, uint32_t status)
3869 if (cmd->se_cmd.prot_op)
3871 "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
3873 cmd->lba, cmd->lba,
3874 cmd->num_blks, &cmd->se_cmd,
3875 cmd->atio.u.isp24.exchange_addr,
3876 cmd->se_cmd.prot_op,
3877 prot_op_str(cmd->se_cmd.prot_op));
3888 qlt_send_term_exchange(qpair, cmd, &cmd->atio, 1, 0);
3898 void *cmd = NULL;
3927 cmd = req->outstanding_cmds[h];
3928 if (unlikely(cmd == NULL)) {
3943 return cmd;
3954 struct qla_tgt_cmd *cmd;
3967 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3968 if (cmd == NULL)
3972 cmd->sess) {
3973 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3977 se_cmd = &cmd->se_cmd;
3978 cmd->cmd_sent_to_fw = 0;
3980 qlt_unmap_sg(vha, cmd);
3988 vha->vp_idx, cmd->atio.u.isp24.attr,
3989 ((cmd->ctio_flags >> 9) & 0xf),
3990 cmd->ctio_flags);
4004 status, cmd->state, se_cmd);
4017 status, cmd->state, se_cmd);
4019 if (logged_out && cmd->sess) {
4024 cmd->sess->send_els_logo = 1;
4027 __func__, __LINE__, cmd->sess->port_name);
4029 qlt_schedule_sess_for_deletion(cmd->sess);
4040 vha->vp_idx, status, cmd->state, se_cmd,
4044 qlt_handle_dif_error(qpair, cmd, ctio);
4054 vha->vp_idx, status, cmd->state, se_cmd);
4060 vha->vp_idx, status, cmd->state, se_cmd);
4065 /* "cmd->aborted" means
4066 * cmd is already aborted/terminated, we don't
4071 if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
4072 (!cmd->aborted)) {
4073 cmd->trc_flags |= TRC_CTIO_ERR;
4074 if (qlt_term_ctio_exchange(qpair, ctio, cmd, status))
4079 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
4080 cmd->trc_flags |= TRC_CTIO_DONE;
4081 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
4082 cmd->state = QLA_TGT_STATE_DATA_IN;
4085 cmd->write_data_transferred = 1;
4087 ha->tgt.tgt_ops->handle_data(cmd);
4089 } else if (cmd->aborted) {
4090 cmd->trc_flags |= TRC_CTIO_ABORTED;
4092 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
4094 cmd->trc_flags |= TRC_CTIO_STRANGE;
4097 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4101 !cmd->aborted) {
4106 ha->tgt.tgt_ops->free_cmd(cmd);
4144 static void __qlt_do_work(struct qla_tgt_cmd *cmd)
4146 scsi_qla_host_t *vha = cmd->vha;
4148 struct fc_port *sess = cmd->sess;
4149 struct atio_from_isp *atio = &cmd->atio;
4154 struct qla_qpair *qpair = cmd->qpair;
4156 cmd->cmd_in_wq = 0;
4157 cmd->trc_flags |= TRC_DO_WORK;
4159 if (cmd->aborted) {
4161 "cmd with tag %u is aborted\n",
4162 cmd->atio.u.isp24.exchange_addr);
4166 spin_lock_init(&cmd->cmd_lock);
4168 cmd->se_cmd.tag = le32_to_cpu(atio->u.isp24.exchange_addr);
4185 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4196 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4198 * cmd has not sent to target yet, so pass NULL as the second
4201 cmd->trc_flags |= TRC_DO_WORK_ERR;
4203 qlt_send_term_exchange(qpair, NULL, &cmd->atio, 1, 0);
4206 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4214 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
4215 scsi_qla_host_t *vha = cmd->vha;
4219 list_del(&cmd->cmd_list);
4222 __qlt_do_work(cmd);
4251 struct qla_tgt_cmd *cmd)
4258 h = btree_lookup64(&tgt->lun_qpair_map, cmd->unpacked_lun);
4271 cmd->unpacked_lun, h, GFP_ATOMIC);
4276 cmd->unpacked_lun);
4291 cmd->unpacked_lun, h, GFP_ATOMIC);
4296 cmd->unpacked_lun);
4313 cmd->unpacked_lun, h, GFP_ATOMIC);
4318 cmd->unpacked_lun);
4325 cmd->qpair = h->qpair;
4326 cmd->se_cmd.cpuid = h->cpuid;
4333 struct qla_tgt_cmd *cmd;
4335 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4336 if (!cmd)
4339 cmd->cmd_type = TYPE_TGT_CMD;
4340 memcpy(&cmd->atio, atio, sizeof(*atio));
4341 INIT_LIST_HEAD(&cmd->sess_cmd_list);
4342 cmd->state = QLA_TGT_STATE_NEW;
4343 cmd->tgt = vha->vha_tgt.qla_tgt;
4345 cmd->vha = vha;
4346 cmd->sess = sess;
4347 cmd->loop_id = sess->loop_id;
4348 cmd->conf_compl_supported = sess->conf_compl_supported;
4350 cmd->trc_flags = 0;
4351 cmd->jiffies_at_alloc = get_jiffies_64();
4353 cmd->unpacked_lun = scsilun_to_int(
4355 qlt_assign_qpair(vha, cmd);
4356 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4357 cmd->vp_idx = vha->vp_idx;
4358 cmd->edif = sess->edif.enable;
4360 return cmd;
4370 struct qla_tgt_cmd *cmd;
4408 cmd = qlt_get_tag(vha, sess, atio);
4409 if (!cmd) {
4411 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4416 cmd->cmd_in_wq = 1;
4417 cmd->trc_flags |= TRC_NEW_CMD;
4420 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4423 INIT_WORK(&cmd->work, qlt_do_work);
4425 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq, &cmd->work);
4427 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
4428 queue_work(qla_tgt_wq, &cmd->work);
4430 queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
4431 &cmd->work);
4433 queue_work(qla_tgt_wq, &cmd->work);
4539 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
4689 struct qla_tgt_cmd *cmd;
4708 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4709 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4712 cmd->aborted = 1;
5185 * cmd went upper layer, look for qlt_xmit_tm_rsp()
5191 /* cmd did not go to upper layer. */
5458 struct qla_tgt_cmd *cmd;
5488 cmd = ha->tgt.tgt_ops->get_cmd(sess);
5489 if (!cmd) {
5491 "qla_target(%d): %s: Allocation of cmd failed\n",
5505 INIT_LIST_HEAD(&cmd->cmd_list);
5506 memcpy(&cmd->atio, atio, sizeof(*atio));
5508 cmd->tgt = vha->vha_tgt.qla_tgt;
5509 cmd->vha = vha;
5510 cmd->reset_count = ha->base_qpair->chip_reset;
5511 cmd->q_full = 1;
5512 cmd->qpair = ha->base_qpair;
5515 cmd->q_full = 1;
5517 cmd->state = status;
5519 cmd->term_exchg = 1;
5522 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5538 struct qla_tgt_cmd *cmd, *tcmd;
5558 list_for_each_entry_safe(cmd, tcmd, &q_full_list, cmd_list) {
5559 if (cmd->q_full)
5560 /* cmd->state is a borrowed field to hold status */
5561 rc = __qlt_send_busy(qpair, &cmd->atio, cmd->state);
5562 else if (cmd->term_exchg)
5563 rc = __qlt_send_term_exchange(qpair, NULL, &cmd->atio);
5568 if (cmd->q_full)
5571 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5572 else if (cmd->term_exchg)
5575 be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
5578 "%s: Unexpected cmd in QFull list %p\n", __func__,
5579 cmd);
5581 list_move_tail(&cmd->cmd_list, &free_list);
5588 cmd = NULL;
5590 list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
5591 list_del(&cmd->cmd_list);
5592 /* This cmd was never sent to TCM. There is no need
5595 qlt_free_cmd(cmd);