Lines Matching refs:req
23 static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
25 struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 status = nvmet_copy_from_sgl(req, 0, id, sizeof(*id));
86 if (req->port->inline_data_size)
100 req->port->inline_data_size) / 16);
111 status = nvmet_copy_to_sgl(req, 0, id, sizeof(struct nvme_id_ctrl));
118 static u16 nvmet_passthru_override_id_ns(struct nvmet_req *req)
128 status = nvmet_copy_from_sgl(req, 0, id, sizeof(struct nvme_id_ns));
145 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
154 struct nvmet_req *req = container_of(w, struct nvmet_req, p.work);
155 struct request *rq = req->p.rq;
162 req->cmd->common.opcode == nvme_admin_identify) {
163 switch (req->cmd->identify.cns) {
165 nvmet_passthru_override_id_ctrl(req);
168 nvmet_passthru_override_id_ns(req);
173 req->cqe->result = nvme_req(rq)->result;
174 nvmet_req_complete(req, status);
181 struct nvmet_req *req = rq->end_io_data;
183 req->cqe->result = nvme_req(rq)->result;
184 nvmet_req_complete(req, nvme_req(rq)->status);
188 static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
195 if (req->sg_cnt > BIO_MAX_PAGES)
198 if (req->cmd->common.opcode == nvme_cmd_flush)
200 else if (nvme_is_write(req->cmd))
203 bio = bio_alloc(GFP_KERNEL, req->sg_cnt);
207 for_each_sg(req->sg, sg, req->sg_cnt, i) {
224 static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
226 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
234 if (likely(req->sq->qid != 0)) {
235 u32 nsid = le32_to_cpu(req->cmd->common.nsid);
247 rq = nvme_alloc_request(q, req->cmd, 0);
253 if (req->sg_cnt) {
254 ret = nvmet_passthru_map_sg(req, rq);
266 effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
267 if (req->p.use_workqueue ||
269 INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
270 req->p.rq = rq;
271 schedule_work(&req->p.work);
273 rq->end_io_data = req;
289 nvmet_req_complete(req, status);
297 static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
299 struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
313 status = nvmet_copy_from_sgl(req, 0, &host[1], sizeof(*host));
325 nvmet_req_complete(req, status);
328 static u16 nvmet_setup_passthru_command(struct nvmet_req *req)
330 req->p.use_workqueue = false;
331 req->execute = nvmet_passthru_execute_cmd;
335 u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
338 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
341 switch (req->cmd->common.opcode) {
356 return nvmet_setup_passthru_command(req);
364 static u16 nvmet_passthru_get_set_features(struct nvmet_req *req)
366 switch (le32_to_cpu(req->cmd->features.fid)) {
384 return nvmet_setup_passthru_command(req);
409 u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req)
412 if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL)
418 if (req->cmd->common.opcode >= nvme_admin_vendor_start)
419 return nvmet_setup_passthru_command(req);
421 switch (req->cmd->common.opcode) {
423 req->execute = nvmet_execute_async_event;
431 req->execute = nvmet_execute_keep_alive;
434 switch (le32_to_cpu(req->cmd->features.fid)) {
439 req->execute = nvmet_execute_set_features;
442 req->execute = nvmet_passthru_set_host_behaviour;
445 return nvmet_passthru_get_set_features(req);
449 switch (le32_to_cpu(req->cmd->features.fid)) {
454 req->execute = nvmet_execute_get_features;
457 return nvmet_passthru_get_set_features(req);
461 switch (req->cmd->identify.cns) {
463 req->execute = nvmet_passthru_execute_cmd;
464 req->p.use_workqueue = true;
467 switch (req->cmd->identify.csi) {
469 req->execute = nvmet_passthru_execute_cmd;
470 req->p.use_workqueue = true;
475 req->execute = nvmet_passthru_execute_cmd;
476 req->p.use_workqueue = true;
479 switch (req->cmd->identify.csi) {
481 req->execute = nvmet_passthru_execute_cmd;
482 req->p.use_workqueue = true;
487 return nvmet_setup_passthru_command(req);
490 return nvmet_setup_passthru_command(req);