Lines Matching refs:req
28 static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
32 return sizeof(req->sq->ctrl->hostid);
43 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
45 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->transfer_len));
48 static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
71 nvmet_req_complete(req, 0);
74 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
80 status = nvmet_req_find_ns(req);
85 if (!req->ns->bdev)
88 host_reads = part_stat_read(req->ns->bdev, ios[READ]);
90 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[READ]), 1000);
91 host_writes = part_stat_read(req->ns->bdev, ios[WRITE]);
93 DIV_ROUND_UP(part_stat_read(req->ns->bdev, sectors[WRITE]), 1000);
103 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
112 ctrl = req->sq->ctrl;
133 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
139 if (req->transfer_len != sizeof(*log))
146 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
147 status = nvmet_get_smart_log_all(req, log);
149 status = nvmet_get_smart_log_nsid(req, log);
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
158 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
162 nvmet_req_complete(req, status);
194 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
205 switch (req->cmd->get_log_page.csi) {
222 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
226 nvmet_req_complete(req, status);
229 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
231 struct nvmet_ctrl *ctrl = req->sq->ctrl;
235 if (req->transfer_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
243 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
245 status = nvmet_zero_sgl(req, len, req->transfer_len - len);
247 nvmet_clear_aen_bit(req, NVME_AEN_BIT_NS_ATTR);
250 nvmet_req_complete(req, status);
253 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
256 struct nvmet_ctrl *ctrl = req->sq->ctrl;
261 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
270 desc->state = req->port->ana_state[grpid];
275 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
295 len = nvmet_format_ana_group(req, grpid, desc);
296 status = nvmet_copy_to_sgl(req, offset, desc, len);
309 nvmet_clear_aen_bit(req, NVME_AEN_BIT_ANA_CHANGE);
315 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
317 nvmet_req_complete(req, status);
320 static void nvmet_execute_get_log_page(struct nvmet_req *req)
322 if (!nvmet_check_transfer_len(req, nvmet_get_log_page_len(req->cmd)))
325 switch (req->cmd->get_log_page.lid) {
327 return nvmet_execute_get_log_page_error(req);
329 return nvmet_execute_get_log_page_smart(req);
336 return nvmet_execute_get_log_page_noop(req);
338 return nvmet_execute_get_log_changed_ns(req);
340 return nvmet_execute_get_log_cmd_effects_ns(req);
342 return nvmet_execute_get_log_page_ana(req);
345 req->cmd->get_log_page.lid, req->sq->qid);
346 req->error_loc = offsetof(struct nvme_get_log_page_command, lid);
347 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
350 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
352 struct nvmet_ctrl *ctrl = req->sq->ctrl;
451 if (req->port->inline_data_size)
462 cmd_capsule_size += req->port->inline_data_size;
485 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
489 nvmet_req_complete(req, status);
492 static void nvmet_execute_identify_ns(struct nvmet_req *req)
497 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
498 req->error_loc = offsetof(struct nvme_identify, nsid);
510 status = nvmet_req_find_ns(req);
516 if (nvmet_ns_revalidate(req->ns)) {
517 mutex_lock(&req->ns->subsys->lock);
518 nvmet_ns_changed(req->ns->subsys, req->ns->nsid);
519 mutex_unlock(&req->ns->subsys->lock);
527 cpu_to_le64(req->ns->size >> req->ns->blksize_shift);
528 switch (req->port->ana_state[req->ns->anagrpid]) {
537 if (req->ns->bdev)
538 nvmet_bdev_set_limits(req->ns->bdev, id);
552 id->anagrpid = cpu_to_le32(req->ns->anagrpid);
554 memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
556 id->lbaf[0].ds = req->ns->blksize_shift;
558 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
563 id->dps = req->ns->pi_type;
565 id->lbaf[0].ms = cpu_to_le16(req->ns->metadata_size);
568 if (req->ns->readonly)
572 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
576 nvmet_req_complete(req, status);
579 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
582 struct nvmet_ctrl *ctrl = req->sq->ctrl;
585 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
604 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
608 nvmet_req_complete(req, status);
611 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
620 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
625 status = nvmet_copy_to_sgl(req, *off, id, len);
633 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
638 status = nvmet_req_find_ns(req);
642 if (memchr_inv(&req->ns->uuid, 0, sizeof(req->ns->uuid))) {
643 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
645 &req->ns->uuid, &off);
649 if (memchr_inv(req->ns->nguid, 0, sizeof(req->ns->nguid))) {
650 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
652 &req->ns->nguid, &off);
657 status = nvmet_copy_ns_identifier(req, NVME_NIDT_CSI,
659 &req->ns->csi, &off);
663 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
668 nvmet_req_complete(req, status);
671 static void nvmet_execute_identify_ctrl_nvm(struct nvmet_req *req)
674 nvmet_req_complete(req,
675 nvmet_zero_sgl(req, 0, sizeof(struct nvme_id_ctrl_nvm)));
678 static void nvmet_execute_identify(struct nvmet_req *req)
680 if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
683 switch (req->cmd->identify.cns) {
685 nvmet_execute_identify_ns(req);
688 nvmet_execute_identify_ctrl(req);
691 nvmet_execute_identify_nslist(req);
694 nvmet_execute_identify_desclist(req);
697 switch (req->cmd->identify.csi) {
703 nvmet_execute_identify_ns_zns(req);
710 switch (req->cmd->identify.csi) {
712 nvmet_execute_identify_ctrl_nvm(req);
716 nvmet_execute_identify_ctrl_zns(req);
725 req->cmd->identify.cns, req->sq->qid);
726 req->error_loc = offsetof(struct nvme_identify, cns);
727 nvmet_req_complete(req, NVME_SC_INVALID_FIELD | NVME_SC_DNR);
737 static void nvmet_execute_abort(struct nvmet_req *req)
739 if (!nvmet_check_transfer_len(req, 0))
741 nvmet_set_result(req, 1);
742 nvmet_req_complete(req, 0);
745 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
749 if (req->ns->file)
750 status = nvmet_file_flush(req);
752 status = nvmet_bdev_flush(req);
755 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
759 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
761 u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
762 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
765 status = nvmet_req_find_ns(req);
772 req->ns->readonly = true;
773 status = nvmet_write_protect_flush_sync(req);
775 req->ns->readonly = false;
778 req->ns->readonly = false;
786 nvmet_ns_changed(subsys, req->ns->nsid);
791 u16 nvmet_set_feat_kato(struct nvmet_req *req)
793 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
795 nvmet_stop_keep_alive_timer(req->sq->ctrl);
796 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
797 nvmet_start_keep_alive_timer(req->sq->ctrl);
799 nvmet_set_result(req, req->sq->ctrl->kato);
804 u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
806 u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
809 req->error_loc = offsetof(struct nvme_common_command, cdw11);
813 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
814 nvmet_set_result(req, val32);
819 void nvmet_execute_set_features(struct nvmet_req *req)
821 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
822 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
823 u32 cdw11 = le32_to_cpu(req->cmd->common.cdw11);
828 if (!nvmet_check_data_len_lte(req, 0))
839 nvmet_set_result(req,
843 status = nvmet_set_feat_kato(req);
846 status = nvmet_set_feat_async_event(req, NVMET_AEN_CFG_ALL);
852 status = nvmet_set_feat_write_protect(req);
855 req->error_loc = offsetof(struct nvme_common_command, cdw10);
860 nvmet_req_complete(req, status);
863 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
865 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
868 result = nvmet_req_find_ns(req);
873 if (req->ns->readonly == true)
877 nvmet_set_result(req, result);
883 void nvmet_get_feat_kato(struct nvmet_req *req)
885 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
888 void nvmet_get_feat_async_event(struct nvmet_req *req)
890 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
893 void nvmet_execute_get_features(struct nvmet_req *req)
895 struct nvmet_subsys *subsys = nvmet_req_subsys(req);
896 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
899 if (!nvmet_check_transfer_len(req, nvmet_feat_data_len(req, cdw10)))
925 nvmet_get_feat_async_event(req);
928 nvmet_set_result(req, 1);
931 nvmet_set_result(req,
935 nvmet_get_feat_kato(req);
939 if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
940 req->error_loc =
946 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
947 sizeof(req->sq->ctrl->hostid));
950 status = nvmet_get_feat_write_protect(req);
953 req->error_loc =
959 nvmet_req_complete(req, status);
962 void nvmet_execute_async_event(struct nvmet_req *req)
964 struct nvmet_ctrl *ctrl = req->sq->ctrl;
966 if (!nvmet_check_transfer_len(req, 0))
972 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
975 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
981 void nvmet_execute_keep_alive(struct nvmet_req *req)
983 struct nvmet_ctrl *ctrl = req->sq->ctrl;
986 if (!nvmet_check_transfer_len(req, 0))
998 nvmet_req_complete(req, status);
1001 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
1003 struct nvme_command *cmd = req->cmd;
1007 return nvmet_parse_fabrics_admin_cmd(req);
1008 if (unlikely(!nvmet_check_auth_status(req)))
1010 if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
1011 return nvmet_parse_discovery_cmd(req);
1013 ret = nvmet_check_ctrl_status(req);
1017 if (nvmet_is_passthru_req(req))
1018 return nvmet_parse_passthru_admin_cmd(req);
1022 req->execute = nvmet_execute_get_log_page;
1025 req->execute = nvmet_execute_identify;
1028 req->execute = nvmet_execute_abort;
1031 req->execute = nvmet_execute_set_features;
1034 req->execute = nvmet_execute_get_features;
1037 req->execute = nvmet_execute_async_event;
1040 req->execute = nvmet_execute_keep_alive;
1043 return nvmet_report_invalid_opcode(req);