Lines Matching refs:ns

85 	struct nvme_ns *ns = req->q->queuedata;
90 nvme_mpath_clear_current_path(ns);
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
99 queue_work(nvme_wq, &ns->ctrl->ana_work);
102 spin_lock_irqsave(&ns->head->requeue_lock, flags);
104 bio_set_dev(bio, ns->head->disk->part0);
118 blk_steal_bios(&ns->head->requeue_list, req);
119 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
122 kblockd_schedule_work(&ns->head->requeue_work);
127 struct nvme_ns *ns = rq->q->queuedata;
128 struct gendisk *disk = ns->head->disk;
141 struct nvme_ns *ns = rq->q->queuedata;
145 bdev_end_io_acct(ns->head->disk->part0, req_op(rq),
152 struct nvme_ns *ns;
155 list_for_each_entry(ns, &ctrl->namespaces, list) {
156 if (!ns->head->disk)
158 kblockd_schedule_work(&ns->head->requeue_work);
160 disk_uevent(ns->head->disk, KOBJ_CHANGE);
174 bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
176 struct nvme_ns_head *head = ns->head;
184 if (ns == rcu_access_pointer(head->current_path[node])) {
195 struct nvme_ns *ns;
198 list_for_each_entry(ns, &ctrl->namespaces, list) {
199 nvme_mpath_clear_current_path(ns);
200 kblockd_schedule_work(&ns->head->requeue_work);
205 void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
207 struct nvme_ns_head *head = ns->head;
213 list_for_each_entry_rcu(ns, &head->list, siblings) {
214 if (capacity != get_capacity(ns->disk))
215 clear_bit(NVME_NS_READY, &ns->flags);
224 static bool nvme_path_is_disabled(struct nvme_ns *ns)
231 if (ns->ctrl->state != NVME_CTRL_LIVE &&
232 ns->ctrl->state != NVME_CTRL_DELETING)
234 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
235 !test_bit(NVME_NS_READY, &ns->flags))
243 struct nvme_ns *found = NULL, *fallback = NULL, *ns;
245 list_for_each_entry_rcu(ns, &head->list, siblings) {
246 if (nvme_path_is_disabled(ns))
250 distance = node_distance(node, ns->ctrl->numa_node);
254 switch (ns->ana_state) {
258 found = ns;
264 fallback = ns;
280 struct nvme_ns *ns)
282 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
284 if (ns)
285 return ns;
292 struct nvme_ns *ns, *found = NULL;
300 for (ns = nvme_next_ns(head, old);
301 ns && ns != old;
302 ns = nvme_next_ns(head, ns)) {
303 if (nvme_path_is_disabled(ns))
306 if (ns->ana_state == NVME_ANA_OPTIMIZED) {
307 found = ns;
310 if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
311 found = ns;
332 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
334 return ns->ctrl->state == NVME_CTRL_LIVE &&
335 ns->ana_state == NVME_ANA_OPTIMIZED;
341 struct nvme_ns *ns;
343 ns = srcu_dereference(head->current_path[node], &head->srcu);
344 if (unlikely(!ns))
348 return nvme_round_robin_path(head, node, ns);
349 if (unlikely(!nvme_path_is_optimized(ns)))
351 return ns;
356 struct nvme_ns *ns;
358 list_for_each_entry_rcu(ns, &head->list, siblings) {
359 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags))
361 switch (ns->ctrl->state) {
378 struct nvme_ns *ns;
391 ns = nvme_find_path(head);
392 if (likely(ns)) {
393 bio_set_dev(bio, ns->disk->part0);
395 trace_block_bio_remap(bio, disk_devt(ns->head->disk),
430 struct nvme_ns *ns;
434 ns = nvme_find_path(head);
435 if (ns)
436 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data);
567 static void nvme_mpath_set_live(struct nvme_ns *ns)
569 struct nvme_ns_head *head = ns->head;
584 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
591 if (nvme_path_is_optimized(ns)) {
655 struct nvme_ns *ns)
657 ns->ana_grpid = le32_to_cpu(desc->grpid);
658 ns->ana_state = desc->state;
659 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
669 if (nvme_state_is_live(ns->ana_state) &&
670 ns->ctrl->state == NVME_CTRL_LIVE)
671 nvme_mpath_set_live(ns);
679 struct nvme_ns *ns;
692 list_for_each_entry(ns, &ctrl->namespaces, list) {
696 if (ns->head->ns_id < nsid)
698 if (ns->head->ns_id == nsid)
699 nvme_update_ns_ana_state(desc, ns);
702 if (ns->head->ns_id > nsid)
828 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
830 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
846 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
848 if (nvme_ctrl_use_ana(ns->ctrl)) {
854 mutex_lock(&ns->ctrl->ana_lock);
855 ns->ana_grpid = le32_to_cpu(anagrpid);
856 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
857 mutex_unlock(&ns->ctrl->ana_lock);
860 nvme_update_ns_ana_state(&desc, ns);
863 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
864 queue_work(nvme_wq, &ns->ctrl->ana_work);
867 ns->ana_state = NVME_ANA_OPTIMIZED;
868 nvme_mpath_set_live(ns);
871 if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
873 ns->head->disk->queue);
875 if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
876 ns->head->disk->nr_zones = ns->disk->nr_zones;