1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017-2018 Christoph Hellwig.
4 */
5
6#include <linux/backing-dev.h>
7#include <linux/moduleparam.h>
8#include <trace/events/block.h>
9#include "nvme.h"
10
11static bool multipath = true;
12module_param(multipath, bool, 0444);
13MODULE_PARM_DESC(multipath,
14	"turn on native support for multiple controllers per subsystem");
15
16void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
17{
18	struct nvme_ns_head *h;
19
20	lockdep_assert_held(&subsys->lock);
21	list_for_each_entry(h, &subsys->nsheads, entry)
22		if (h->disk)
23			blk_mq_unfreeze_queue(h->disk->queue);
24}
25
26void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
27{
28	struct nvme_ns_head *h;
29
30	lockdep_assert_held(&subsys->lock);
31	list_for_each_entry(h, &subsys->nsheads, entry)
32		if (h->disk)
33			blk_mq_freeze_queue_wait(h->disk->queue);
34}
35
36void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
37{
38	struct nvme_ns_head *h;
39
40	lockdep_assert_held(&subsys->lock);
41	list_for_each_entry(h, &subsys->nsheads, entry)
42		if (h->disk)
43			blk_freeze_queue_start(h->disk->queue);
44}
45
46/*
47 * If multipathing is enabled we need to always use the subsystem instance
48 * number for numbering our devices to avoid conflicts between subsystems that
49 * have multiple controllers and thus use the multipath-aware subsystem node
50 * and those that have a single controller and use the controller node
51 * directly.
52 */
53void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
54			struct nvme_ctrl *ctrl, int *flags)
55{
56	if (!multipath) {
57		sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
58	} else if (ns->head->disk) {
59		sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
60				ctrl->instance, ns->head->instance);
61		*flags = GENHD_FL_HIDDEN;
62	} else {
63		sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
64				ns->head->instance);
65	}
66}
67
68void nvme_failover_req(struct request *req)
69{
70	struct nvme_ns *ns = req->q->queuedata;
71	u16 status = nvme_req(req)->status & 0x7ff;
72	unsigned long flags;
73
74	nvme_mpath_clear_current_path(ns);
75
76	/*
77	 * If we got back an ANA error, we know the controller is alive but not
78	 * ready to serve this namespace.  Kick of a re-read of the ANA
79	 * information page, and just try any other available path for now.
80	 */
81	if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) {
82		set_bit(NVME_NS_ANA_PENDING, &ns->flags);
83		queue_work(nvme_wq, &ns->ctrl->ana_work);
84	}
85
86	spin_lock_irqsave(&ns->head->requeue_lock, flags);
87	blk_steal_bios(&ns->head->requeue_list, req);
88	spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
89
90	blk_mq_end_request(req, 0);
91	kblockd_schedule_work(&ns->head->requeue_work);
92}
93
94void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
95{
96	struct nvme_ns *ns;
97
98	down_read(&ctrl->namespaces_rwsem);
99	list_for_each_entry(ns, &ctrl->namespaces, list) {
100		if (ns->head->disk)
101			kblockd_schedule_work(&ns->head->requeue_work);
102	}
103	up_read(&ctrl->namespaces_rwsem);
104}
105
106static const char *nvme_ana_state_names[] = {
107	[0]				= "invalid state",
108	[NVME_ANA_OPTIMIZED]		= "optimized",
109	[NVME_ANA_NONOPTIMIZED]		= "non-optimized",
110	[NVME_ANA_INACCESSIBLE]		= "inaccessible",
111	[NVME_ANA_PERSISTENT_LOSS]	= "persistent-loss",
112	[NVME_ANA_CHANGE]		= "change",
113};
114
115bool nvme_mpath_clear_current_path(struct nvme_ns *ns)
116{
117	struct nvme_ns_head *head = ns->head;
118	bool changed = false;
119	int node;
120
121	if (!head)
122		goto out;
123
124	for_each_node(node) {
125		if (ns == rcu_access_pointer(head->current_path[node])) {
126			rcu_assign_pointer(head->current_path[node], NULL);
127			changed = true;
128		}
129	}
130out:
131	return changed;
132}
133
134void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
135{
136	struct nvme_ns *ns;
137
138	down_read(&ctrl->namespaces_rwsem);
139	list_for_each_entry(ns, &ctrl->namespaces, list) {
140		nvme_mpath_clear_current_path(ns);
141		kblockd_schedule_work(&ns->head->requeue_work);
142	}
143	up_read(&ctrl->namespaces_rwsem);
144}
145
146static bool nvme_path_is_disabled(struct nvme_ns *ns)
147{
148	/*
149	 * We don't treat NVME_CTRL_DELETING as a disabled path as I/O should
150	 * still be able to complete assuming that the controller is connected.
151	 * Otherwise it will fail immediately and return to the requeue list.
152	 */
153	if (ns->ctrl->state != NVME_CTRL_LIVE &&
154	    ns->ctrl->state != NVME_CTRL_DELETING)
155		return true;
156	if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) ||
157	    test_bit(NVME_NS_REMOVING, &ns->flags))
158		return true;
159	return false;
160}
161
162static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
163{
164	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
165	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
166
167	list_for_each_entry_rcu(ns, &head->list, siblings) {
168		if (nvme_path_is_disabled(ns))
169			continue;
170
171		if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
172			distance = node_distance(node, ns->ctrl->numa_node);
173		else
174			distance = LOCAL_DISTANCE;
175
176		switch (ns->ana_state) {
177		case NVME_ANA_OPTIMIZED:
178			if (distance < found_distance) {
179				found_distance = distance;
180				found = ns;
181			}
182			break;
183		case NVME_ANA_NONOPTIMIZED:
184			if (distance < fallback_distance) {
185				fallback_distance = distance;
186				fallback = ns;
187			}
188			break;
189		default:
190			break;
191		}
192	}
193
194	if (!found)
195		found = fallback;
196	if (found)
197		rcu_assign_pointer(head->current_path[node], found);
198	return found;
199}
200
201static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head,
202		struct nvme_ns *ns)
203{
204	ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns,
205			siblings);
206	if (ns)
207		return ns;
208	return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings);
209}
210
211static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
212		int node, struct nvme_ns *old)
213{
214	struct nvme_ns *ns, *found = NULL;
215
216	if (list_is_singular(&head->list)) {
217		if (nvme_path_is_disabled(old))
218			return NULL;
219		return old;
220	}
221
222	for (ns = nvme_next_ns(head, old);
223	     ns && ns != old;
224	     ns = nvme_next_ns(head, ns)) {
225		if (nvme_path_is_disabled(ns))
226			continue;
227
228		if (ns->ana_state == NVME_ANA_OPTIMIZED) {
229			found = ns;
230			goto out;
231		}
232		if (ns->ana_state == NVME_ANA_NONOPTIMIZED)
233			found = ns;
234	}
235
236	/*
237	 * The loop above skips the current path for round-robin semantics.
238	 * Fall back to the current path if either:
239	 *  - no other optimized path found and current is optimized,
240	 *  - no other usable path found and current is usable.
241	 */
242	if (!nvme_path_is_disabled(old) &&
243	    (old->ana_state == NVME_ANA_OPTIMIZED ||
244	     (!found && old->ana_state == NVME_ANA_NONOPTIMIZED)))
245		return old;
246
247	if (!found)
248		return NULL;
249out:
250	rcu_assign_pointer(head->current_path[node], found);
251	return found;
252}
253
254static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
255{
256	return ns->ctrl->state == NVME_CTRL_LIVE &&
257		ns->ana_state == NVME_ANA_OPTIMIZED;
258}
259
260inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
261{
262	int node = numa_node_id();
263	struct nvme_ns *ns;
264
265	ns = srcu_dereference(head->current_path[node], &head->srcu);
266	if (unlikely(!ns))
267		return __nvme_find_path(head, node);
268
269	if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR)
270		return nvme_round_robin_path(head, node, ns);
271	if (unlikely(!nvme_path_is_optimized(ns)))
272		return __nvme_find_path(head, node);
273	return ns;
274}
275
276static bool nvme_available_path(struct nvme_ns_head *head)
277{
278	struct nvme_ns *ns;
279
280	list_for_each_entry_rcu(ns, &head->list, siblings) {
281		switch (ns->ctrl->state) {
282		case NVME_CTRL_LIVE:
283		case NVME_CTRL_RESETTING:
284		case NVME_CTRL_CONNECTING:
285			/* fallthru */
286			return true;
287		default:
288			break;
289		}
290	}
291	return false;
292}
293
294blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
295{
296	struct nvme_ns_head *head = bio->bi_disk->private_data;
297	struct device *dev = disk_to_dev(head->disk);
298	struct nvme_ns *ns;
299	blk_qc_t ret = BLK_QC_T_NONE;
300	int srcu_idx;
301
302	/*
303	 * The namespace might be going away and the bio might be moved to a
304	 * different queue via blk_steal_bios(), so we need to use the bio_split
305	 * pool from the original queue to allocate the bvecs from.
306	 */
307	blk_queue_split(&bio);
308
309	srcu_idx = srcu_read_lock(&head->srcu);
310	ns = nvme_find_path(head);
311	if (likely(ns)) {
312		bio->bi_disk = ns->disk;
313		bio->bi_opf |= REQ_NVME_MPATH;
314		trace_block_bio_remap(bio->bi_disk->queue, bio,
315				      disk_devt(ns->head->disk),
316				      bio->bi_iter.bi_sector);
317		ret = submit_bio_noacct(bio);
318	} else if (nvme_available_path(head)) {
319		dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
320
321		spin_lock_irq(&head->requeue_lock);
322		bio_list_add(&head->requeue_list, bio);
323		spin_unlock_irq(&head->requeue_lock);
324	} else {
325		dev_warn_ratelimited(dev, "no available path - failing I/O\n");
326
327		bio->bi_status = BLK_STS_IOERR;
328		bio_endio(bio);
329	}
330
331	srcu_read_unlock(&head->srcu, srcu_idx);
332	return ret;
333}
334
335static void nvme_requeue_work(struct work_struct *work)
336{
337	struct nvme_ns_head *head =
338		container_of(work, struct nvme_ns_head, requeue_work);
339	struct bio *bio, *next;
340
341	spin_lock_irq(&head->requeue_lock);
342	next = bio_list_get(&head->requeue_list);
343	spin_unlock_irq(&head->requeue_lock);
344
345	while ((bio = next) != NULL) {
346		next = bio->bi_next;
347		bio->bi_next = NULL;
348
349		/*
350		 * Reset disk to the mpath node and resubmit to select a new
351		 * path.
352		 */
353		bio->bi_disk = head->disk;
354		submit_bio_noacct(bio);
355	}
356}
357
358int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
359{
360	struct request_queue *q;
361	bool vwc = false;
362
363	mutex_init(&head->lock);
364	bio_list_init(&head->requeue_list);
365	spin_lock_init(&head->requeue_lock);
366	INIT_WORK(&head->requeue_work, nvme_requeue_work);
367
368	/*
369	 * Add a multipath node if the subsystems supports multiple controllers.
370	 * We also do this for private namespaces as the namespace sharing data could
371	 * change after a rescan.
372	 */
373	if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
374		return 0;
375
376	q = blk_alloc_queue(ctrl->numa_node);
377	if (!q)
378		goto out;
379	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
380	/* set to a default value for 512 until disk is validated */
381	blk_queue_logical_block_size(q, 512);
382	blk_set_stacking_limits(&q->limits);
383
384	/* we need to propagate up the VMC settings */
385	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
386		vwc = true;
387	blk_queue_write_cache(q, vwc, vwc);
388
389	head->disk = alloc_disk(0);
390	if (!head->disk)
391		goto out_cleanup_queue;
392	head->disk->fops = &nvme_ns_head_ops;
393	head->disk->private_data = head;
394	head->disk->queue = q;
395	head->disk->flags = GENHD_FL_EXT_DEVT;
396	sprintf(head->disk->disk_name, "nvme%dn%d",
397			ctrl->subsys->instance, head->instance);
398	return 0;
399
400out_cleanup_queue:
401	blk_cleanup_queue(q);
402out:
403	return -ENOMEM;
404}
405
406static void nvme_mpath_set_live(struct nvme_ns *ns)
407{
408	struct nvme_ns_head *head = ns->head;
409
410	if (!head->disk)
411		return;
412
413	if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
414		device_add_disk(&head->subsys->dev, head->disk,
415				nvme_ns_id_attr_groups);
416
417	mutex_lock(&head->lock);
418	if (nvme_path_is_optimized(ns)) {
419		int node, srcu_idx;
420
421		srcu_idx = srcu_read_lock(&head->srcu);
422		for_each_node(node)
423			__nvme_find_path(head, node);
424		srcu_read_unlock(&head->srcu, srcu_idx);
425	}
426	mutex_unlock(&head->lock);
427
428	synchronize_srcu(&head->srcu);
429	kblockd_schedule_work(&head->requeue_work);
430}
431
432static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
433		int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
434			void *))
435{
436	void *base = ctrl->ana_log_buf;
437	size_t offset = sizeof(struct nvme_ana_rsp_hdr);
438	int error, i;
439
440	lockdep_assert_held(&ctrl->ana_lock);
441
442	for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
443		struct nvme_ana_group_desc *desc = base + offset;
444		u32 nr_nsids;
445		size_t nsid_buf_size;
446
447		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
448			return -EINVAL;
449
450		nr_nsids = le32_to_cpu(desc->nnsids);
451		nsid_buf_size = nr_nsids * sizeof(__le32);
452
453		if (WARN_ON_ONCE(desc->grpid == 0))
454			return -EINVAL;
455		if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
456			return -EINVAL;
457		if (WARN_ON_ONCE(desc->state == 0))
458			return -EINVAL;
459		if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
460			return -EINVAL;
461
462		offset += sizeof(*desc);
463		if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
464			return -EINVAL;
465
466		error = cb(ctrl, desc, data);
467		if (error)
468			return error;
469
470		offset += nsid_buf_size;
471	}
472
473	return 0;
474}
475
476static inline bool nvme_state_is_live(enum nvme_ana_state state)
477{
478	return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
479}
480
481static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
482		struct nvme_ns *ns)
483{
484	ns->ana_grpid = le32_to_cpu(desc->grpid);
485	ns->ana_state = desc->state;
486	clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
487	/*
488	 * nvme_mpath_set_live() will trigger I/O to the multipath path device
489	 * and in turn to this path device.  However we cannot accept this I/O
490	 * if the controller is not live.  This may deadlock if called from
491	 * nvme_mpath_init_identify() and the ctrl will never complete
492	 * initialization, preventing I/O from completing.  For this case we
493	 * will reprocess the ANA log page in nvme_mpath_update() once the
494	 * controller is ready.
495	 */
496	if (nvme_state_is_live(ns->ana_state) &&
497	    ns->ctrl->state == NVME_CTRL_LIVE)
498		nvme_mpath_set_live(ns);
499}
500
501static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
502		struct nvme_ana_group_desc *desc, void *data)
503{
504	u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
505	unsigned *nr_change_groups = data;
506	struct nvme_ns *ns;
507
508	dev_dbg(ctrl->device, "ANA group %d: %s.\n",
509			le32_to_cpu(desc->grpid),
510			nvme_ana_state_names[desc->state]);
511
512	if (desc->state == NVME_ANA_CHANGE)
513		(*nr_change_groups)++;
514
515	if (!nr_nsids)
516		return 0;
517
518	down_read(&ctrl->namespaces_rwsem);
519	list_for_each_entry(ns, &ctrl->namespaces, list) {
520		unsigned nsid;
521again:
522		nsid = le32_to_cpu(desc->nsids[n]);
523		if (ns->head->ns_id < nsid)
524			continue;
525		if (ns->head->ns_id == nsid)
526			nvme_update_ns_ana_state(desc, ns);
527		if (++n == nr_nsids)
528			break;
529		if (ns->head->ns_id > nsid)
530			goto again;
531	}
532	up_read(&ctrl->namespaces_rwsem);
533	return 0;
534}
535
536static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
537{
538	u32 nr_change_groups = 0;
539	int error;
540
541	mutex_lock(&ctrl->ana_lock);
542	error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM,
543			ctrl->ana_log_buf, ctrl->ana_log_size, 0);
544	if (error) {
545		dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
546		goto out_unlock;
547	}
548
549	error = nvme_parse_ana_log(ctrl, &nr_change_groups,
550			nvme_update_ana_state);
551	if (error)
552		goto out_unlock;
553
554	/*
555	 * In theory we should have an ANATT timer per group as they might enter
556	 * the change state at different times.  But that is a lot of overhead
557	 * just to protect against a target that keeps entering new changes
558	 * states while never finishing previous ones.  But we'll still
559	 * eventually time out once all groups are in change state, so this
560	 * isn't a big deal.
561	 *
562	 * We also double the ANATT value to provide some slack for transports
563	 * or AEN processing overhead.
564	 */
565	if (nr_change_groups)
566		mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
567	else
568		del_timer_sync(&ctrl->anatt_timer);
569out_unlock:
570	mutex_unlock(&ctrl->ana_lock);
571	return error;
572}
573
574static void nvme_ana_work(struct work_struct *work)
575{
576	struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
577
578	if (ctrl->state != NVME_CTRL_LIVE)
579		return;
580
581	nvme_read_ana_log(ctrl);
582}
583
584void nvme_mpath_update(struct nvme_ctrl *ctrl)
585{
586	u32 nr_change_groups = 0;
587
588	if (!ctrl->ana_log_buf)
589		return;
590
591	mutex_lock(&ctrl->ana_lock);
592	nvme_parse_ana_log(ctrl, &nr_change_groups, nvme_update_ana_state);
593	mutex_unlock(&ctrl->ana_lock);
594}
595
596static void nvme_anatt_timeout(struct timer_list *t)
597{
598	struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
599
600	dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
601	nvme_reset_ctrl(ctrl);
602}
603
604void nvme_mpath_stop(struct nvme_ctrl *ctrl)
605{
606	if (!nvme_ctrl_use_ana(ctrl))
607		return;
608	del_timer_sync(&ctrl->anatt_timer);
609	cancel_work_sync(&ctrl->ana_work);
610}
611
612#define SUBSYS_ATTR_RW(_name, _mode, _show, _store)  \
613	struct device_attribute subsys_attr_##_name =	\
614		__ATTR(_name, _mode, _show, _store)
615
616static const char *nvme_iopolicy_names[] = {
617	[NVME_IOPOLICY_NUMA]	= "numa",
618	[NVME_IOPOLICY_RR]	= "round-robin",
619};
620
621static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
622		struct device_attribute *attr, char *buf)
623{
624	struct nvme_subsystem *subsys =
625		container_of(dev, struct nvme_subsystem, dev);
626
627	return sysfs_emit(buf, "%s\n",
628			  nvme_iopolicy_names[READ_ONCE(subsys->iopolicy)]);
629}
630
631static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
632		struct device_attribute *attr, const char *buf, size_t count)
633{
634	struct nvme_subsystem *subsys =
635		container_of(dev, struct nvme_subsystem, dev);
636	int i;
637
638	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
639		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
640			WRITE_ONCE(subsys->iopolicy, i);
641			return count;
642		}
643	}
644
645	return -EINVAL;
646}
647SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
648		      nvme_subsys_iopolicy_show, nvme_subsys_iopolicy_store);
649
650static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
651		char *buf)
652{
653	return sysfs_emit(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
654}
655DEVICE_ATTR_RO(ana_grpid);
656
657static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
658		char *buf)
659{
660	struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
661
662	return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
663}
664DEVICE_ATTR_RO(ana_state);
665
666static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
667		struct nvme_ana_group_desc *desc, void *data)
668{
669	struct nvme_ana_group_desc *dst = data;
670
671	if (desc->grpid != dst->grpid)
672		return 0;
673
674	*dst = *desc;
675	return -ENXIO; /* just break out of the loop */
676}
677
678void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
679{
680	if (nvme_ctrl_use_ana(ns->ctrl)) {
681		struct nvme_ana_group_desc desc = {
682			.grpid = id->anagrpid,
683			.state = 0,
684		};
685
686		mutex_lock(&ns->ctrl->ana_lock);
687		ns->ana_grpid = le32_to_cpu(id->anagrpid);
688		nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
689		mutex_unlock(&ns->ctrl->ana_lock);
690		if (desc.state) {
691			/* found the group desc: update */
692			nvme_update_ns_ana_state(&desc, ns);
693		} else {
694			/* group desc not found: trigger a re-read */
695			set_bit(NVME_NS_ANA_PENDING, &ns->flags);
696			queue_work(nvme_wq, &ns->ctrl->ana_work);
697		}
698	} else {
699		ns->ana_state = NVME_ANA_OPTIMIZED;
700		nvme_mpath_set_live(ns);
701	}
702
703	if (blk_queue_stable_writes(ns->queue) && ns->head->disk)
704		blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES,
705				   ns->head->disk->queue);
706#ifdef CONFIG_BLK_DEV_ZONED
707	if (blk_queue_is_zoned(ns->queue) && ns->head->disk)
708		ns->head->disk->queue->nr_zones = ns->queue->nr_zones;
709#endif
710}
711
712void nvme_mpath_remove_disk(struct nvme_ns_head *head)
713{
714	if (!head->disk)
715		return;
716	if (head->disk->flags & GENHD_FL_UP)
717		del_gendisk(head->disk);
718	blk_set_queue_dying(head->disk->queue);
719	/* make sure all pending bios are cleaned up */
720	kblockd_schedule_work(&head->requeue_work);
721	flush_work(&head->requeue_work);
722	blk_cleanup_queue(head->disk->queue);
723	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
724		/*
725		 * if device_add_disk wasn't called, prevent
726		 * disk release to put a bogus reference on the
727		 * request queue
728		 */
729		head->disk->queue = NULL;
730	}
731	put_disk(head->disk);
732}
733
734void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
735{
736	mutex_init(&ctrl->ana_lock);
737	timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
738	INIT_WORK(&ctrl->ana_work, nvme_ana_work);
739}
740
741int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
742{
743	size_t max_transfer_size = ctrl->max_hw_sectors << SECTOR_SHIFT;
744	size_t ana_log_size;
745	int error = 0;
746
747	/* check if multipath is enabled and we have the capability */
748	if (!multipath || !ctrl->subsys ||
749	    !(ctrl->subsys->cmic & NVME_CTRL_CMIC_ANA))
750		return 0;
751
752	ctrl->anacap = id->anacap;
753	ctrl->anatt = id->anatt;
754	ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
755	ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
756
757	ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
758		ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc) +
759		ctrl->max_namespaces * sizeof(__le32);
760	if (ana_log_size > max_transfer_size) {
761		dev_err(ctrl->device,
762			"ANA log page size (%zd) larger than MDTS (%zd).\n",
763			ana_log_size, max_transfer_size);
764		dev_err(ctrl->device, "disabling ANA support.\n");
765		goto out_uninit;
766	}
767	if (ana_log_size > ctrl->ana_log_size) {
768		nvme_mpath_stop(ctrl);
769		kfree(ctrl->ana_log_buf);
770		ctrl->ana_log_buf = kmalloc(ana_log_size, GFP_KERNEL);
771		if (!ctrl->ana_log_buf)
772			return -ENOMEM;
773	}
774	ctrl->ana_log_size = ana_log_size;
775	error = nvme_read_ana_log(ctrl);
776	if (error)
777		goto out_uninit;
778	return 0;
779
780out_uninit:
781	nvme_mpath_uninit(ctrl);
782	return error;
783}
784
785void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
786{
787	kfree(ctrl->ana_log_buf);
788	ctrl->ana_log_buf = NULL;
789}
790
791