Lines Matching refs:ctrl

32 		return sizeof(req->sq->ctrl->hostid);
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
80 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
115 struct nvmet_ctrl *ctrl;
118 ctrl = req->sq->ctrl;
119 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
159 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
160 put_unaligned_le64(req->sq->ctrl->err_counter,
162 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
203 struct nvmet_ctrl *ctrl = req->sq->ctrl;
210 mutex_lock(&ctrl->lock);
211 if (ctrl->nr_changed_ns == U32_MAX)
214 len = ctrl->nr_changed_ns * sizeof(__le32);
215 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
218 ctrl->nr_changed_ns = 0;
220 mutex_unlock(&ctrl->lock);
228 struct nvmet_ctrl *ctrl = req->sq->ctrl;
234 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
338 struct nvmet_ctrl *ctrl = req->sq->ctrl;
354 bin2hex(id->sn, &ctrl->subsys->serial,
355 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
356 nvmet_id_set_model_number(id, ctrl->subsys);
371 if (ctrl->ops->get_mdts)
372 id->mdts = ctrl->ops->get_mdts(ctrl);
376 id->cntlid = cpu_to_le16(ctrl->cntlid);
377 id->ver = cpu_to_le32(ctrl->subsys->ver);
410 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
426 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
431 strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
438 if (!ctrl->pi_support)
445 id->msdbd = ctrl->ops->msdbd;
471 struct nvmet_ctrl *ctrl = req->sq->ctrl;
488 req->ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid);
532 if (ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
556 struct nvmet_ctrl *ctrl = req->sq->ctrl;
570 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
613 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
698 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
701 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
733 nvmet_stop_keep_alive_timer(req->sq->ctrl);
734 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
735 nvmet_start_keep_alive_timer(req->sq->ctrl);
737 nvmet_set_result(req, req->sq->ctrl->kato);
751 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
759 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
803 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
806 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
824 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
829 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
834 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
885 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
886 sizeof(req->sq->ctrl->hostid));
903 struct nvmet_ctrl *ctrl = req->sq->ctrl;
908 mutex_lock(&ctrl->lock);
909 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
910 mutex_unlock(&ctrl->lock);
914 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
915 mutex_unlock(&ctrl->lock);
917 schedule_work(&ctrl->async_event_work);
922 struct nvmet_ctrl *ctrl = req->sq->ctrl;
927 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
928 ctrl->cntlid, ctrl->kato);
930 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
941 if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)