Lines Matching refs:ctrl

97 	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
99 queue_work(nvme_wq, &ns->ctrl->ana_work);
150 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
154 down_read(&ctrl->namespaces_rwsem);
155 list_for_each_entry(ns, &ctrl->namespaces, list) {
159 if (ctrl->state == NVME_CTRL_LIVE)
162 up_read(&ctrl->namespaces_rwsem);
193 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
197 down_read(&ctrl->namespaces_rwsem);
198 list_for_each_entry(ns, &ctrl->namespaces, list) {
202 up_read(&ctrl->namespaces_rwsem);
231 if (ns->ctrl->state != NVME_CTRL_LIVE &&
232 ns->ctrl->state != NVME_CTRL_DELETING)
250 distance = node_distance(node, ns->ctrl->numa_node);
334 return ns->ctrl->state == NVME_CTRL_LIVE &&
359 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
361 switch (ns->ctrl->state) {
516 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
530 if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) ||
531 !nvme_is_unique_nsid(ctrl, head) || !multipath)
534 head->disk = blk_alloc_disk(ctrl->numa_node);
540 ctrl->subsys->instance, head->instance);
551 if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
552 ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
561 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
605 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
606 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
609 void *base = ctrl->ana_log_buf;
613 lockdep_assert_held(&ctrl->ana_lock);
615 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
620 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
628 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
636 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
639 error = cb(ctrl, desc, data);
664 * nvme_mpath_init_identify() and the ctrl will never complete
670 ns->ctrl->state == NVME_CTRL_LIVE)
674 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
681 dev_dbg(ctrl->device, "ANA group %d: %s.\n",
691 down_read(&ctrl->namespaces_rwsem);
692 list_for_each_entry(ns, &ctrl->namespaces, list) {
705 up_read(&ctrl->namespaces_rwsem);
709 static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
714 mutex_lock(&ctrl->ana_lock);
715 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
716 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
718 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
722 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
739 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
741 del_timer_sync(&ctrl->anatt_timer);
743 mutex_unlock(&ctrl->ana_lock);
749 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
751 if (ctrl->state != NVME_CTRL_LIVE)
754 nvme_read_ana_log(ctrl);
757 void nvme_mpath_update(struct nvme_ctrl *ctrl)
761 if (!ctrl->ana_log_buf)
764 mutex_lock(&ctrl->ana_lock);
765 nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
766 mutex_unlock(&ctrl->ana_lock);
771 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
773 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
774 nvme_reset_ctrl(ctrl);
777 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
779 if (!nvme_ctrl_use_ana(ctrl))
781 del_timer_sync(&ctrl->anatt_timer);
782 cancel_work_sync(&ctrl->ana_work);
834 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
848 if (nvme_ctrl_use_ana(ns->ctrl)) {
854 mutex_lock(&ns->ctrl->ana_lock);
856 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
857 mutex_unlock(&ns->ctrl->ana_lock);
864 queue_work(nvme_wq, &ns->ctrl->ana_work);
901 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
903 mutex_init(&ctrl->ana_lock);
904 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
905 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
908 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
910 size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
915 if (!multipath || !ctrl->subsys ||
916 !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
919 if (!ctrl->max_namespaces ||
920 ctrl->max_namespaces > le32_to_cpu(id->nn)) {
921 dev_err(ctrl->device,
922 "Invalid MNAN value %u\n", ctrl->max_namespaces);
926 ctrl->anacap = id->anacap;
927 ctrl->anatt = id->anatt;
928 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
929 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
932 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
933 ctrl->max_namespaces * sizeof(__le32);
935 dev_err(ctrl->device,
938 dev_err(ctrl->device, "disabling ANA support.\n");
941 if (ana_log_size > ctrl->ana_log_size) {
942 nvme_mpath_stop(ctrl);
943 nvme_mpath_uninit(ctrl);
944 ctrl->ana_log_buf = kvmalloc(ana_log_size, GFP_KERNEL);
945 if (!ctrl->ana_log_buf)
948 ctrl->ana_log_size = ana_log_size;
949 error = nvme_read_ana_log(ctrl);
955 nvme_mpath_uninit(ctrl);
959 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
961 kvfree(ctrl->ana_log_buf);
962 ctrl->ana_log_buf = NULL;
963 ctrl->ana_log_size = 0;