Lines Matching refs:req

95 	struct nvmet_req		req;
210 return nvme_is_write(cmd->req.cmd) &&
211 cmd->rbytes_done < cmd->req.transfer_len;
216 return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
221 return !nvme_is_write(cmd->req.cmd) &&
222 cmd->req.transfer_len > 0 &&
223 !cmd->req.cqe->status;
228 return nvme_is_write(cmd->req.cmd) && cmd->pdu_len &&
330 sgl_free(cmd->req.sg);
332 cmd->req.sg = NULL;
347 sg = &cmd->req.sg[cmd->sg_idx];
385 struct nvme_sgl_desc *sgl = &cmd->req.cmd->common.dptr.sgl;
393 if (!nvme_is_write(cmd->req.cmd))
396 if (len > cmd->req.port->inline_data_size)
400 cmd->req.transfer_len += len;
402 cmd->req.sg = sgl_alloc(len, GFP_KERNEL, &cmd->req.sg_cnt);
403 if (!cmd->req.sg)
405 cmd->cur_sg = cmd->req.sg;
408 cmd->iov = kmalloc_array(cmd->req.sg_cnt,
423 ahash_request_set_crypt(hash, cmd->req.sg,
424 (void *)&cmd->exp_ddgst, cmd->req.transfer_len);
445 cmd->req.transfer_len + ddgst);
446 pdu->command_id = cmd->req.cqe->command_id;
447 pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
476 pdu->command_id = cmd->req.cmd->common.command_id;
478 pdu->r2t_length = cpu_to_le32(cmd->req.transfer_len - cmd->rbytes_done);
544 static void nvmet_tcp_queue_response(struct nvmet_req *req)
547 container_of(req, struct nvmet_tcp_cmd, req);
553 sgl = &cmd->req.cmd->common.dptr.sgl;
562 len && len <= cmd->req.port->inline_data_size &&
563 nvme_is_write(cmd->req.cmd))
574 nvmet_tcp_queue_response(&cmd->req);
576 cmd->req.execute(&cmd->req);
620 cmd->wbytes_done + left < cmd->req.transfer_len ||
923 struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
925 size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
935 if (!nvme_is_write(cmd->req.cmd) || !data_len ||
936 data_len > cmd->req.port->inline_data_size) {
1006 struct nvmet_req *req;
1043 req = &queue->cmd->req;
1044 memcpy(req->cmd, nvme_cmd, sizeof(*nvme_cmd));
1046 if (unlikely(!nvmet_req_init(req, &queue->nvme_cq,
1049 req->cmd, req->cmd->common.command_id,
1050 req->cmd->common.opcode,
1051 le32_to_cpu(req->cmd->common.dptr.sgl.length));
1053 nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
1063 nvmet_req_complete(req, ret);
1075 nvmet_tcp_queue_response(&queue->cmd->req);
1079 queue->cmd->req.execute(&queue->cmd->req);
1196 if (cmd->rbytes_done == cmd->req.transfer_len)
1225 queue->idx, cmd->req.cmd->common.command_id,
1228 nvmet_req_uninit(&cmd->req);
1235 if (cmd->rbytes_done == cmd->req.transfer_len)
1362 c->req.port = queue->port->nport;
1368 c->req.cmd = &c->cmd_pdu->cmd;
1374 c->req.cqe = &c->rsp_pdu->cqe;
1465 nvmet_req_uninit(&cmd->req);
1470 nvmet_req_uninit(&queue->connect.req);
1869 static void nvmet_tcp_disc_port_addr(struct nvmet_req *req,
1876 container_of(req, struct nvmet_tcp_cmd, req);