Lines Matching refs:ctrl

32 		return sizeof(req->sq->ctrl->hostid);
50 struct nvmet_ctrl *ctrl = req->sq->ctrl;
56 spin_lock_irqsave(&ctrl->error_lock, flags);
57 slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
60 if (nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
70 spin_unlock_irqrestore(&ctrl->error_lock, flags);
109 struct nvmet_ctrl *ctrl;
112 ctrl = req->sq->ctrl;
113 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
153 spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
154 put_unaligned_le64(req->sq->ctrl->err_counter,
156 spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
231 struct nvmet_ctrl *ctrl = req->sq->ctrl;
238 mutex_lock(&ctrl->lock);
239 if (ctrl->nr_changed_ns == U32_MAX)
242 len = ctrl->nr_changed_ns * sizeof(__le32);
243 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
246 ctrl->nr_changed_ns = 0;
248 mutex_unlock(&ctrl->lock);
256 struct nvmet_ctrl *ctrl = req->sq->ctrl;
262 xa_for_each(&ctrl->subsys->namespaces, idx, ns)
352 struct nvmet_ctrl *ctrl = req->sq->ctrl;
353 struct nvmet_subsys *subsys = ctrl->subsys;
374 memcpy(id->sn, ctrl->subsys->serial, NVMET_SN_MAX_SIZE);
384 if (nvmet_is_disc_subsys(ctrl->subsys))
394 if (ctrl->ops->get_mdts)
395 id->mdts = ctrl->ops->get_mdts(ctrl);
399 id->cntlid = cpu_to_le16(ctrl->cntlid);
400 id->ver = cpu_to_le32(ctrl->subsys->ver);
449 if (ctrl->ops->flags & NVMF_KEYED_SGLS)
454 strscpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
461 if (!ctrl->pi_support)
468 id->msdbd = ctrl->ops->msdbd;
558 if (req->sq->ctrl->pi_support && nvmet_ns_has_pi(req->ns)) {
582 struct nvmet_ctrl *ctrl = req->sq->ctrl;
596 xa_for_each(&ctrl->subsys->namespaces, idx, ns) {
795 nvmet_stop_keep_alive_timer(req->sq->ctrl);
796 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
797 nvmet_start_keep_alive_timer(req->sq->ctrl);
799 nvmet_set_result(req, req->sq->ctrl->kato);
813 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
885 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
890 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
946 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
947 sizeof(req->sq->ctrl->hostid));
964 struct nvmet_ctrl *ctrl = req->sq->ctrl;
969 mutex_lock(&ctrl->lock);
970 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
971 mutex_unlock(&ctrl->lock);
975 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
976 mutex_unlock(&ctrl->lock);
978 queue_work(nvmet_wq, &ctrl->async_event_work);
983 struct nvmet_ctrl *ctrl = req->sq->ctrl;
989 if (!ctrl->kato) {
994 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
995 ctrl->cntlid, ctrl->kato);
996 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);