Lines Matching refs:rsp
163 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
193 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
195 return nvme_is_write(rsp->req.cmd) &&
196 rsp->req.transfer_len &&
197 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
200 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
202 return !nvme_is_write(rsp->req.cmd) &&
203 rsp->req.transfer_len &&
204 !rsp->req.cqe->status &&
205 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
211 struct nvmet_rdma_rsp *rsp;
215 rsp = list_first_entry_or_null(&queue->free_rsps,
217 if (likely(rsp))
218 list_del(&rsp->free_list);
221 if (unlikely(!rsp)) {
224 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
225 if (unlikely(!rsp))
227 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
229 kfree(rsp);
233 rsp->allocated = true;
236 return rsp;
240 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
244 if (unlikely(rsp->allocated)) {
245 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
246 kfree(rsp);
250 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
251 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
252 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
463 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
465 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
469 list_add_tail(&rsp->free_list, &queue->free_rsps);
476 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
478 list_del(&rsp->free_list);
479 nvmet_rdma_free_rsp(ndev, rsp);
492 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
494 list_del(&rsp->free_list);
495 nvmet_rdma_free_rsp(ndev, rsp);
524 struct nvmet_rdma_rsp *rsp;
527 rsp = list_entry(queue->rsp_wr_wait_list.next,
529 list_del(&rsp->wait_list);
532 ret = nvmet_rdma_execute_command(rsp);
536 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
632 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
635 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
636 struct nvmet_req *req = &rsp->req;
640 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
645 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
652 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
654 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
655 struct nvmet_req *req = &rsp->req;
658 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
663 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
667 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
669 struct nvmet_rdma_queue *queue = rsp->queue;
671 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
673 if (rsp->n_rdma)
674 nvmet_rdma_rw_ctx_destroy(rsp);
676 if (rsp->req.sg != rsp->cmd->inline_sg)
677 nvmet_req_free_sgls(&rsp->req);
682 nvmet_rdma_put_rsp(rsp);
701 struct nvmet_rdma_rsp *rsp =
705 nvmet_rdma_release_rsp(rsp);
717 struct nvmet_rdma_rsp *rsp =
719 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
722 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
723 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
724 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
726 rsp->send_wr.opcode = IB_WR_SEND;
729 if (nvmet_rdma_need_data_out(rsp)) {
730 if (rsp->req.metadata_len)
731 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
732 cm_id->port_num, &rsp->write_cqe, NULL);
734 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
735 cm_id->port_num, NULL, &rsp->send_wr);
737 first_wr = &rsp->send_wr;
740 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
742 ib_dma_sync_single_for_device(rsp->queue->dev->device,
743 rsp->send_sge.addr, rsp->send_sge.length,
748 nvmet_rdma_release_rsp(rsp);
754 struct nvmet_rdma_rsp *rsp =
759 WARN_ON(rsp->n_rdma <= 0);
760 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
761 rsp->n_rdma = 0;
764 nvmet_rdma_rw_ctx_destroy(rsp);
765 nvmet_req_uninit(&rsp->req);
766 nvmet_rdma_release_rsp(rsp);
775 if (rsp->req.metadata_len)
776 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
777 nvmet_rdma_rw_ctx_destroy(rsp);
780 nvmet_req_complete(&rsp->req, status);
782 rsp->req.execute(&rsp->req);
787 struct nvmet_rdma_rsp *rsp =
790 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
796 WARN_ON(rsp->n_rdma <= 0);
797 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
798 rsp->n_rdma = 0;
801 nvmet_rdma_rw_ctx_destroy(rsp);
802 nvmet_req_uninit(&rsp->req);
803 nvmet_rdma_release_rsp(rsp);
817 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
819 rsp->req.cqe->status = cpu_to_le16(status << 1);
820 nvmet_rdma_rw_ctx_destroy(rsp);
822 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
824 nvmet_rdma_release_rsp(rsp);
828 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
835 sg = rsp->cmd->inline_sg;
848 rsp->req.sg = rsp->cmd->inline_sg;
849 rsp->req.sg_cnt = sg_count;
852 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
854 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
858 if (!nvme_is_write(rsp->req.cmd)) {
859 rsp->req.error_loc =
864 if (off + len > rsp->queue->dev->inline_data_size) {
873 nvmet_rdma_use_inline_sg(rsp, len, off);
874 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
875 rsp->req.transfer_len += len;
879 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
887 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
890 if (!rsp->req.transfer_len)
893 if (rsp->req.metadata_len)
894 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
896 ret = nvmet_req_alloc_sgls(&rsp->req);
900 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
903 rsp->n_rdma += ret;
906 rsp->invalidate_rkey = key;
907 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
913 rsp->req.transfer_len = 0;
917 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
919 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
925 return nvmet_rdma_map_sgl_inline(rsp);
928 rsp->req.error_loc =
935 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
937 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
940 rsp->req.error_loc =
946 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
951 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
953 struct nvmet_rdma_queue *queue = rsp->queue;
955 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
958 1 + rsp->n_rdma, queue->idx,
960 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
964 if (nvmet_rdma_need_data_in(rsp)) {
965 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
966 queue->cm_id->port_num, &rsp->read_cqe, NULL))
967 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
969 rsp->req.execute(&rsp->req);
1012 struct nvmet_rdma_rsp *rsp;
1031 rsp = nvmet_rdma_get_rsp(queue);
1032 if (unlikely(!rsp)) {
1041 rsp->queue = queue;
1042 rsp->cmd = cmd;
1043 rsp->flags = 0;
1044 rsp->req.cmd = cmd->nvme_cmd;
1045 rsp->req.port = queue->port;
1046 rsp->n_rdma = 0;
1053 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
1055 nvmet_rdma_put_rsp(rsp);
1060 nvmet_rdma_handle_command(queue, rsp);
1652 struct nvmet_rdma_rsp *rsp;
1654 rsp = list_first_entry(&queue->rsp_wait_list,
1657 list_del(&rsp->wait_list);
1658 nvmet_rdma_put_rsp(rsp);
1985 struct nvmet_rdma_rsp *rsp =
1987 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;