Lines Matching refs:req

80 	struct nvme_request	req;
211 static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req)
213 return req->pdu;
216 static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req)
219 return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) -
223 static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req)
225 if (nvme_is_fabrics(req->req.cmd))
227 return req->queue->cmnd_capsule_len - sizeof(struct nvme_command);
230 static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
232 return req == &req->queue->ctrl->async_req;
235 static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
239 if (unlikely(nvme_tcp_async_req(req)))
242 rq = blk_mq_rq_from_pdu(req);
244 return rq_data_dir(rq) == WRITE && req->data_len &&
245 req->data_len <= nvme_tcp_inline_data_size(req);
248 static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
250 return req->iter.bvec->bv_page;
253 static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
255 return req->iter.bvec->bv_offset + req->iter.iov_offset;
258 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
260 return min_t(size_t, iov_iter_single_seg_count(&req->iter),
261 req->pdu_len - req->pdu_sent);
264 static inline size_t nvme_tcp_pdu_data_left(struct nvme_tcp_request *req)
266 return rq_data_dir(blk_mq_rq_from_pdu(req)) == WRITE ?
267 req->pdu_len - req->pdu_sent : 0;
270 static inline size_t nvme_tcp_pdu_last_send(struct nvme_tcp_request *req,
273 return nvme_tcp_pdu_data_left(req) <= len;
276 static void nvme_tcp_init_iter(struct nvme_tcp_request *req,
279 struct request *rq = blk_mq_rq_from_pdu(req);
291 struct bio *bio = req->curr_bio;
304 iov_iter_bvec(&req->iter, dir, vec, nr_bvec, size);
305 req->iter.iov_offset = offset;
308 static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
311 req->data_sent += len;
312 req->pdu_sent += len;
313 iov_iter_advance(&req->iter, len);
314 if (!iov_iter_count(&req->iter) &&
315 req->data_sent < req->data_len) {
316 req->curr_bio = req->curr_bio->bi_next;
317 nvme_tcp_init_iter(req, ITER_SOURCE);
337 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
340 struct nvme_tcp_queue *queue = req->queue;
343 empty = llist_add(&req->lentry, &queue->req_list) &&
363 struct nvme_tcp_request *req;
367 req = llist_entry(node, struct nvme_tcp_request, lentry);
368 list_add(&req->entry, &queue->send_list);
375 struct nvme_tcp_request *req;
377 req = list_first_entry_or_null(&queue->send_list,
379 if (!req) {
381 req = list_first_entry_or_null(&queue->send_list,
383 if (unlikely(!req))
387 list_del(&req->entry);
388 return req;
469 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
471 page_frag_free(req->pdu);
479 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
485 req->pdu = page_frag_alloc(&queue->pf_cache,
488 if (!req->pdu)
491 pdu = req->pdu;
492 req->queue = queue;
548 struct nvme_tcp_request *req;
560 req = blk_mq_rq_to_pdu(rq);
561 if (req->status == cpu_to_le16(NVME_SC_SUCCESS))
562 req->status = cqe->status;
564 if (!nvme_try_complete_req(rq, req->status, cqe->result))
627 static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req)
629 struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req);
630 struct nvme_tcp_queue *queue = req->queue;
631 struct request *rq = blk_mq_rq_from_pdu(req);
632 u32 h2cdata_sent = req->pdu_len;
636 req->state = NVME_TCP_SEND_H2C_PDU;
637 req->offset = 0;
638 req->pdu_len = min(req->h2cdata_left, queue->maxh2cdata);
639 req->pdu_sent = 0;
640 req->h2cdata_left -= req->pdu_len;
641 req->h2cdata_offset += h2cdata_sent;
645 if (!req->h2cdata_left)
654 cpu_to_le32(data->hdr.hlen + hdgst + req->pdu_len + ddgst);
655 data->ttag = req->ttag;
657 data->data_offset = cpu_to_le32(req->h2cdata_offset);
658 data->data_length = cpu_to_le32(req->pdu_len);
664 struct nvme_tcp_request *req;
676 req = blk_mq_rq_to_pdu(rq);
680 "req %d r2t len is %u, probably a bug...\n",
685 if (unlikely(req->data_sent + r2t_length > req->data_len)) {
687 "req %d r2t len %u exceeded data len %u (%zu sent)\n",
688 rq->tag, r2t_length, req->data_len, req->data_sent);
692 if (unlikely(r2t_offset < req->data_sent)) {
694 "req %d unexpected r2t offset %u (expected %zu)\n",
695 rq->tag, r2t_offset, req->data_sent);
699 req->pdu_len = 0;
700 req->h2cdata_left = r2t_length;
701 req->h2cdata_offset = r2t_offset;
702 req->ttag = pdu->ttag;
704 nvme_tcp_setup_h2c_data_pdu(req);
705 nvme_tcp_queue_request(req, false, true);
774 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
783 if (!iov_iter_count(&req->iter)) {
784 req->curr_bio = req->curr_bio->bi_next;
790 if (!req->curr_bio) {
797 nvme_tcp_init_iter(req, ITER_DEST);
802 iov_iter_count(&req->iter));
806 &req->iter, recv_len, queue->rcv_hash);
809 &req->iter, recv_len);
829 le16_to_cpu(req->status));
861 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
863 req->status = cpu_to_le16(NVME_SC_DATA_XFER_ERROR);
874 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
876 nvme_tcp_end_request(rq, le16_to_cpu(req->status));
980 static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
982 if (nvme_tcp_async_req(req)) {
985 nvme_complete_async_event(&req->queue->ctrl->ctrl,
988 nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
993 static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
995 struct nvme_tcp_queue *queue = req->queue;
996 int req_data_len = req->data_len;
997 u32 h2cdata_left = req->h2cdata_left;
1004 struct page *page = nvme_tcp_req_cur_page(req);
1005 size_t offset = nvme_tcp_req_cur_offset(req);
1006 size_t len = nvme_tcp_req_cur_length(req);
1007 bool last = nvme_tcp_pdu_last_send(req, len);
1008 int req_data_sent = req->data_sent;
1035 nvme_tcp_advance_req(req, ret);
1041 &req->ddgst);
1042 req->state = NVME_TCP_SEND_DDGST;
1043 req->offset = 0;
1046 nvme_tcp_setup_h2c_data_pdu(req);
1056 static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req)
1058 struct nvme_tcp_queue *queue = req->queue;
1059 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
1062 bool inline_data = nvme_tcp_has_inline_data(req);
1064 int len = sizeof(*pdu) + hdgst - req->offset;
1072 if (queue->hdr_digest && !req->offset)
1075 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1084 req->state = NVME_TCP_SEND_DATA;
1092 req->offset += ret;
1097 static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
1099 struct nvme_tcp_queue *queue = req->queue;
1100 struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req);
1104 int len = sizeof(*pdu) - req->offset + hdgst;
1107 if (queue->hdr_digest && !req->offset)
1110 if (!req->h2cdata_left)
1113 bvec_set_virt(&bvec, (void *)pdu + req->offset, len);
1121 req->state = NVME_TCP_SEND_DATA;
1126 req->offset += ret;
1131 static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
1133 struct nvme_tcp_queue *queue = req->queue;
1134 size_t offset = req->offset;
1135 u32 h2cdata_left = req->h2cdata_left;
1139 .iov_base = (u8 *)&req->ddgst + req->offset,
1140 .iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
1154 nvme_tcp_setup_h2c_data_pdu(req);
1160 req->offset += ret;
1166 struct nvme_tcp_request *req;
1175 req = queue->request;
1178 if (req->state == NVME_TCP_SEND_CMD_PDU) {
1179 ret = nvme_tcp_try_send_cmd_pdu(req);
1182 if (!nvme_tcp_has_inline_data(req))
1186 if (req->state == NVME_TCP_SEND_H2C_PDU) {
1187 ret = nvme_tcp_try_send_data_pdu(req);
1192 if (req->state == NVME_TCP_SEND_DATA) {
1193 ret = nvme_tcp_try_send_data(req);
1198 if (req->state == NVME_TCP_SEND_DDGST)
1199 ret = nvme_tcp_try_send_ddgst(req);
2264 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2265 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2267 nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
2273 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2274 struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl;
2275 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2277 int qid = nvme_tcp_queue_id(req->queue);
2281 nvme_tcp_queue_id(req->queue), nvme_cid(rq), pdu->hdr.type,
2313 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2314 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2322 req->data_len <= nvme_tcp_inline_data_size(req))
2323 nvme_tcp_set_sg_inline(queue, c, req->data_len);
2325 nvme_tcp_set_sg_host_data(c, req->data_len);
2333 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2334 struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req);
2335 struct nvme_tcp_queue *queue = req->queue;
2343 req->state = NVME_TCP_SEND_CMD_PDU;
2344 req->status = cpu_to_le16(NVME_SC_SUCCESS);
2345 req->offset = 0;
2346 req->data_sent = 0;
2347 req->pdu_len = 0;
2348 req->pdu_sent = 0;
2349 req->h2cdata_left = 0;
2350 req->data_len = blk_rq_nr_phys_segments(rq) ?
2352 req->curr_bio = rq->bio;
2353 if (req->curr_bio && req->data_len)
2354 nvme_tcp_init_iter(req, rq_data_dir(rq));
2357 req->data_len <= nvme_tcp_inline_data_size(req))
2358 req->pdu_len = req->data_len;
2364 if (queue->data_digest && req->pdu_len) {
2369 pdu->hdr.pdo = req->pdu_len ? pdu->hdr.hlen + hdgst : 0;
2371 cpu_to_le32(pdu->hdr.hlen + hdgst + req->pdu_len + ddgst);
2398 struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
2411 nvme_tcp_queue_request(req, true, bd->last);