Lines Matching refs:ioctx
660 struct srpt_ioctx *ioctx;
662 ioctx = kzalloc(ioctx_size, GFP_KERNEL);
663 if (!ioctx)
666 ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
667 if (!ioctx->buf)
670 ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
672 if (ib_dma_mapping_error(sdev->device, ioctx->dma))
675 return ioctx;
678 kmem_cache_free(buf_cache, ioctx->buf);
680 kfree(ioctx);
688 * @ioctx: I/O context pointer.
692 static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
696 if (!ioctx)
699 ib_dma_unmap_single(sdev->device, ioctx->dma,
701 kmem_cache_free(buf_cache, ioctx->buf);
702 kfree(ioctx);
773 * @ioctx: Send I/O context.
779 static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
784 previous = ioctx->state;
786 ioctx->state = new;
793 * @ioctx: Send I/O context.
799 static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
805 WARN_ON(!ioctx);
809 previous = ioctx->state;
811 ioctx->state = new;
820 * @ioctx: Receive I/O context pointer.
823 struct srpt_recv_ioctx *ioctx)
829 list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
833 ioctx->ioctx.cqe.done = srpt_recv_done;
834 wr.wr_cqe = &ioctx->ioctx.cqe;
889 static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
893 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
894 struct srpt_rdma_ch *ch = ioctx->ch;
900 ioctx->rw_ctxs = &ioctx->s_rw_ctx;
902 ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
904 if (!ioctx->rw_ctxs)
908 for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
909 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
926 ioctx->n_rdma += ret;
927 ioctx->n_rw_ctx++;
946 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
952 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
953 kfree(ioctx->rw_ctxs);
958 struct srpt_send_ioctx *ioctx)
960 enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
963 for (i = 0; i < ioctx->n_rw_ctx; i++) {
964 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
971 if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
972 kfree(ioctx->rw_ctxs);
996 * @ioctx: I/O context that will be used for responding to the initiator.
1007 * This function initializes ioctx->nrbuf and ioctx->r_bufs.
1013 struct srpt_send_ioctx *ioctx,
1036 ioctx->cmd.data_direction = *dir;
1043 return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
1061 return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
1088 ioctx->recv_ioctx = recv_ioctx;
1093 sg_init_one(&ioctx->imm_sg, data, len);
1094 *sg = &ioctx->imm_sg;
1220 struct srpt_send_ioctx *ioctx;
1229 ioctx = ch->ioctx_ring[tag];
1230 BUG_ON(ioctx->ch != ch);
1231 ioctx->state = SRPT_STATE_NEW;
1232 WARN_ON_ONCE(ioctx->recv_ioctx);
1233 ioctx->n_rdma = 0;
1234 ioctx->n_rw_ctx = 0;
1235 ioctx->queue_status_only = false;
1240 memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
1241 memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
1242 ioctx->cmd.map_tag = tag;
1243 ioctx->cmd.map_cpu = cpu;
1245 return ioctx;
1250 * @ioctx: I/O context associated with the SCSI command.
1252 static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
1256 BUG_ON(!ioctx);
1263 state = ioctx->state;
1266 ioctx->state = SRPT_STATE_DATA_IN;
1270 ioctx->state = SRPT_STATE_DONE;
1279 ioctx->state, ioctx->cmd.tag);
1292 pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
1293 transport_generic_request_failure(&ioctx->cmd,
1301 transport_generic_free_cmd(&ioctx->cmd, 0);
1304 transport_generic_free_cmd(&ioctx->cmd, 0);
1327 struct srpt_send_ioctx *ioctx =
1330 WARN_ON(ioctx->n_rdma <= 0);
1331 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
1332 ioctx->n_rdma = 0;
1335 pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
1336 ioctx, wc->status);
1337 srpt_abort_cmd(ioctx);
1341 if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
1343 target_execute_cmd(&ioctx->cmd);
1346 __LINE__, ioctx->state);
1352 * @ioctx: I/O context associated with the SRP_CMD request. The response will
1353 * be built in the buffer ioctx->buf points at and hence this function will
1365 struct srpt_send_ioctx *ioctx, u64 tag,
1368 struct se_cmd *cmd = &ioctx->cmd;
1380 srp_rsp = ioctx->ioctx.buf;
1383 sense_data = ioctx->sense_data;
1384 sense_data_len = ioctx->cmd.scsi_sense_length;
1385 WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
1436 * @ioctx: I/O context in which the SRP_RSP response will be built.
1447 struct srpt_send_ioctx *ioctx,
1457 srp_rsp = ioctx->ioctx.buf;
1475 struct srpt_send_ioctx *ioctx = container_of(cmd,
1478 return target_put_sess_cmd(&ioctx->cmd);
1501 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1588 srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1629 recv_ioctx->ioctx.dma,
1630 recv_ioctx->ioctx.offset + srp_max_req_size,
1633 srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
1688 struct srpt_recv_ioctx *ioctx =
1689 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
1697 ioctx->byte_len = wc->byte_len;
1698 srpt_handle_new_iu(ch, ioctx);
1700 pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
1701 ioctx, wc->status);
1749 struct srpt_send_ioctx *ioctx =
1750 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
1753 state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
1758 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
1761 pr_info("sending response for ioctx 0x%p failed with status %d\n",
1762 ioctx, wc->status);
1765 transport_generic_free_cmd(&ioctx->cmd, 0);
1768 ioctx->ioctx.index);
2738 struct srpt_send_ioctx *ioctx =
2740 struct srpt_rdma_ch *ch = ioctx->ch;
2742 struct ib_cqe *cqe = &ioctx->rdma_cqe;
2746 if (ioctx->recv_ioctx) {
2747 srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
2748 target_execute_cmd(&ioctx->cmd);
2752 new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
2755 if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
2757 __func__, ioctx->n_rdma);
2763 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2764 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2774 __func__, ret, ioctx->n_rdma,
2781 atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
2805 struct srpt_send_ioctx *ioctx =
2807 struct srpt_rdma_ch *ch = ioctx->ch;
2815 state = ioctx->state;
2819 ioctx->state = SRPT_STATE_CMD_RSP_SENT;
2822 ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
2826 ch, ioctx->ioctx.index, ioctx->state);
2834 if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
2835 ioctx->cmd.data_length &&
2836 !ioctx->queue_status_only) {
2837 for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
2838 struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
2846 resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
2851 resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
2852 ioctx->cmd.tag);
2857 if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
2860 __func__, ioctx->n_rdma);
2865 ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
2868 sge.addr = ioctx->ioctx.dma;
2872 ioctx->ioctx.cqe.done = srpt_send_done;
2874 send_wr.wr_cqe = &ioctx->ioctx.cqe;
2883 __func__, ioctx->cmd.tag, ret);
2890 atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
2892 srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
2893 target_put_sess_cmd(&ioctx->cmd);
2915 struct srpt_send_ioctx *ioctx = container_of(cmd,
2917 struct srpt_rdma_ch *ch = ioctx->ch;
2924 struct srpt_send_ioctx *ioctx;
2926 ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
2927 BUG_ON(ioctx->sense_data != cmd->sense_buffer);
2931 ioctx->queue_status_only = true;
3342 struct srpt_send_ioctx *ioctx = container_of(se_cmd,
3344 struct srpt_rdma_ch *ch = ioctx->ch;
3345 struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
3347 WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
3348 !(ioctx->cmd.transport_state & CMD_T_ABORTED));
3352 ioctx->recv_ioctx = NULL;
3356 if (ioctx->n_rw_ctx) {
3357 srpt_free_rw_ctxs(ch, ioctx);
3358 ioctx->n_rw_ctx = 0;
3400 struct srpt_send_ioctx *ioctx;
3402 ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
3403 return ioctx->state;