Lines Matching refs:rsp

164 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp);
194 static inline bool nvmet_rdma_need_data_in(struct nvmet_rdma_rsp *rsp)
196 return nvme_is_write(rsp->req.cmd) &&
197 rsp->req.transfer_len &&
198 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
201 static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
203 return !nvme_is_write(rsp->req.cmd) &&
204 rsp->req.transfer_len &&
205 !rsp->req.cqe->status &&
206 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
212 struct nvmet_rdma_rsp *rsp;
216 rsp = list_first_entry_or_null(&queue->free_rsps,
218 if (likely(rsp))
219 list_del(&rsp->free_list);
222 if (unlikely(!rsp)) {
225 rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
226 if (unlikely(!rsp))
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
230 kfree(rsp);
234 rsp->allocated = true;
237 return rsp;
241 nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
245 if (unlikely(rsp->allocated)) {
246 nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
247 kfree(rsp);
251 spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
252 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
253 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
464 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
466 ret = nvmet_rdma_alloc_rsp(ndev, rsp);
470 list_add_tail(&rsp->free_list, &queue->free_rsps);
477 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
479 list_del(&rsp->free_list);
480 nvmet_rdma_free_rsp(ndev, rsp);
493 struct nvmet_rdma_rsp *rsp = &queue->rsps[i];
495 list_del(&rsp->free_list);
496 nvmet_rdma_free_rsp(ndev, rsp);
525 struct nvmet_rdma_rsp *rsp;
528 rsp = list_entry(queue->rsp_wr_wait_list.next,
530 list_del(&rsp->wait_list);
533 ret = nvmet_rdma_execute_command(rsp);
537 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list);
633 static int nvmet_rdma_rw_ctx_init(struct nvmet_rdma_rsp *rsp, u64 addr, u32 key,
636 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
637 struct nvmet_req *req = &rsp->req;
641 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp,
646 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
653 static void nvmet_rdma_rw_ctx_destroy(struct nvmet_rdma_rsp *rsp)
655 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
656 struct nvmet_req *req = &rsp->req;
659 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp,
664 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num,
668 static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
670 struct nvmet_rdma_queue *queue = rsp->queue;
672 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
674 if (rsp->n_rdma)
675 nvmet_rdma_rw_ctx_destroy(rsp);
677 if (rsp->req.sg != rsp->cmd->inline_sg)
678 nvmet_req_free_sgls(&rsp->req);
683 nvmet_rdma_put_rsp(rsp);
702 struct nvmet_rdma_rsp *rsp =
706 nvmet_rdma_release_rsp(rsp);
718 struct nvmet_rdma_rsp *rsp =
720 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
723 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
724 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
725 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
727 rsp->send_wr.opcode = IB_WR_SEND;
730 if (nvmet_rdma_need_data_out(rsp)) {
731 if (rsp->req.metadata_len)
732 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
733 cm_id->port_num, &rsp->write_cqe, NULL);
735 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp,
736 cm_id->port_num, NULL, &rsp->send_wr);
738 first_wr = &rsp->send_wr;
741 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
743 ib_dma_sync_single_for_device(rsp->queue->dev->device,
744 rsp->send_sge.addr, rsp->send_sge.length,
749 nvmet_rdma_release_rsp(rsp);
755 struct nvmet_rdma_rsp *rsp =
760 WARN_ON(rsp->n_rdma <= 0);
761 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
762 rsp->n_rdma = 0;
765 nvmet_rdma_rw_ctx_destroy(rsp);
766 nvmet_req_uninit(&rsp->req);
767 nvmet_rdma_release_rsp(rsp);
776 if (rsp->req.metadata_len)
777 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
778 nvmet_rdma_rw_ctx_destroy(rsp);
781 nvmet_req_complete(&rsp->req, status);
783 rsp->req.execute(&rsp->req);
788 struct nvmet_rdma_rsp *rsp =
791 struct rdma_cm_id *cm_id = rsp->queue->cm_id;
797 WARN_ON(rsp->n_rdma <= 0);
798 atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
799 rsp->n_rdma = 0;
802 nvmet_rdma_rw_ctx_destroy(rsp);
803 nvmet_req_uninit(&rsp->req);
804 nvmet_rdma_release_rsp(rsp);
818 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr);
820 rsp->req.cqe->status = cpu_to_le16(status << 1);
821 nvmet_rdma_rw_ctx_destroy(rsp);
823 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) {
825 nvmet_rdma_release_rsp(rsp);
829 static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len,
836 sg = rsp->cmd->inline_sg;
849 rsp->req.sg = rsp->cmd->inline_sg;
850 rsp->req.sg_cnt = sg_count;
853 static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
855 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl;
859 if (!nvme_is_write(rsp->req.cmd)) {
860 rsp->req.error_loc =
865 if (off + len > rsp->queue->dev->inline_data_size) {
874 nvmet_rdma_use_inline_sg(rsp, len, off);
875 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA;
876 rsp->req.transfer_len += len;
880 static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
888 rsp->req.transfer_len = get_unaligned_le24(sgl->length);
891 if (!rsp->req.transfer_len)
894 if (rsp->req.metadata_len)
895 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs);
897 ret = nvmet_req_alloc_sgls(&rsp->req);
901 ret = nvmet_rdma_rw_ctx_init(rsp, addr, key, &sig_attrs);
904 rsp->n_rdma += ret;
907 rsp->invalidate_rkey = key;
908 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
914 rsp->req.transfer_len = 0;
918 static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
920 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl;
926 return nvmet_rdma_map_sgl_inline(rsp);
929 rsp->req.error_loc =
936 return nvmet_rdma_map_sgl_keyed(rsp, sgl, true);
938 return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
941 rsp->req.error_loc =
947 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
952 static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
954 struct nvmet_rdma_queue *queue = rsp->queue;
956 if (unlikely(atomic_sub_return(1 + rsp->n_rdma,
959 1 + rsp->n_rdma, queue->idx,
961 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
965 if (nvmet_rdma_need_data_in(rsp)) {
966 if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
967 queue->cm_id->port_num, &rsp->read_cqe, NULL))
968 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
970 rsp->req.execute(&rsp->req);
1013 struct nvmet_rdma_rsp *rsp;
1032 rsp = nvmet_rdma_get_rsp(queue);
1033 if (unlikely(!rsp)) {
1042 rsp->queue = queue;
1043 rsp->cmd = cmd;
1044 rsp->flags = 0;
1045 rsp->req.cmd = cmd->nvme_cmd;
1046 rsp->req.port = queue->port;
1047 rsp->n_rdma = 0;
1054 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
1056 nvmet_rdma_put_rsp(rsp);
1061 nvmet_rdma_handle_command(queue, rsp);
1652 struct nvmet_rdma_rsp *rsp;
1654 rsp = list_first_entry(&queue->rsp_wait_list,
1657 list_del(&rsp->wait_list);
1658 nvmet_rdma_put_rsp(rsp);
1985 struct nvmet_rdma_rsp *rsp =
1987 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id;