xref: /kernel/linux/linux-5.10/drivers/nvdimm/label.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5#include <linux/device.h>
6#include <linux/ndctl.h>
7#include <linux/uuid.h>
8#include <linux/slab.h>
9#include <linux/io.h>
10#include <linux/nd.h>
11#include "nd-core.h"
12#include "label.h"
13#include "nd.h"
14
15static guid_t nvdimm_btt_guid;
16static guid_t nvdimm_btt2_guid;
17static guid_t nvdimm_pfn_guid;
18static guid_t nvdimm_dax_guid;
19
20static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
21
22static u32 best_seq(u32 a, u32 b)
23{
24	a &= NSINDEX_SEQ_MASK;
25	b &= NSINDEX_SEQ_MASK;
26
27	if (a == 0 || a == b)
28		return b;
29	else if (b == 0)
30		return a;
31	else if (nd_inc_seq(a) == b)
32		return b;
33	else
34		return a;
35}
36
37unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
38{
39	return ndd->nslabel_size;
40}
41
42static size_t __sizeof_namespace_index(u32 nslot)
43{
44	return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
45			NSINDEX_ALIGN);
46}
47
48static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
49		size_t index_size)
50{
51	return (ndd->nsarea.config_size - index_size * 2) /
52			sizeof_namespace_label(ndd);
53}
54
55int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
56{
57	u32 tmp_nslot, n;
58
59	tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
60	n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
61
62	return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
63}
64
65size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
66{
67	u32 nslot, space, size;
68
69	/*
70	 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
71	 * enough to hold 2 index blocks and 2 labels.  The minimum index
72	 * block size is 256 bytes. The label size is 128 for namespaces
73	 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
74	 */
75	nslot = nvdimm_num_label_slots(ndd);
76	space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
77	size = __sizeof_namespace_index(nslot) * 2;
78	if (size <= space && nslot >= 2)
79		return size / 2;
80
81	dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
82			ndd->nsarea.config_size, sizeof_namespace_label(ndd));
83	return 0;
84}
85
86static int __nd_label_validate(struct nvdimm_drvdata *ndd)
87{
88	/*
89	 * On media label format consists of two index blocks followed
90	 * by an array of labels.  None of these structures are ever
91	 * updated in place.  A sequence number tracks the current
92	 * active index and the next one to write, while labels are
93	 * written to free slots.
94	 *
95	 *     +------------+
96	 *     |            |
97	 *     |  nsindex0  |
98	 *     |            |
99	 *     +------------+
100	 *     |            |
101	 *     |  nsindex1  |
102	 *     |            |
103	 *     +------------+
104	 *     |   label0   |
105	 *     +------------+
106	 *     |   label1   |
107	 *     +------------+
108	 *     |            |
109	 *      ....nslot...
110	 *     |            |
111	 *     +------------+
112	 *     |   labelN   |
113	 *     +------------+
114	 */
115	struct nd_namespace_index *nsindex[] = {
116		to_namespace_index(ndd, 0),
117		to_namespace_index(ndd, 1),
118	};
119	const int num_index = ARRAY_SIZE(nsindex);
120	struct device *dev = ndd->dev;
121	bool valid[2] = { 0 };
122	int i, num_valid = 0;
123	u32 seq;
124
125	for (i = 0; i < num_index; i++) {
126		u32 nslot;
127		u8 sig[NSINDEX_SIG_LEN];
128		u64 sum_save, sum, size;
129		unsigned int version, labelsize;
130
131		memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
132		if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
133			dev_dbg(dev, "nsindex%d signature invalid\n", i);
134			continue;
135		}
136
137		/* label sizes larger than 128 arrived with v1.2 */
138		version = __le16_to_cpu(nsindex[i]->major) * 100
139			+ __le16_to_cpu(nsindex[i]->minor);
140		if (version >= 102)
141			labelsize = 1 << (7 + nsindex[i]->labelsize);
142		else
143			labelsize = 128;
144
145		if (labelsize != sizeof_namespace_label(ndd)) {
146			dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
147					i, nsindex[i]->labelsize);
148			continue;
149		}
150
151		sum_save = __le64_to_cpu(nsindex[i]->checksum);
152		nsindex[i]->checksum = __cpu_to_le64(0);
153		sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
154		nsindex[i]->checksum = __cpu_to_le64(sum_save);
155		if (sum != sum_save) {
156			dev_dbg(dev, "nsindex%d checksum invalid\n", i);
157			continue;
158		}
159
160		seq = __le32_to_cpu(nsindex[i]->seq);
161		if ((seq & NSINDEX_SEQ_MASK) == 0) {
162			dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
163			continue;
164		}
165
166		/* sanity check the index against expected values */
167		if (__le64_to_cpu(nsindex[i]->myoff)
168				!= i * sizeof_namespace_index(ndd)) {
169			dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
170					i, (unsigned long long)
171					__le64_to_cpu(nsindex[i]->myoff));
172			continue;
173		}
174		if (__le64_to_cpu(nsindex[i]->otheroff)
175				!= (!i) * sizeof_namespace_index(ndd)) {
176			dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
177					i, (unsigned long long)
178					__le64_to_cpu(nsindex[i]->otheroff));
179			continue;
180		}
181		if (__le64_to_cpu(nsindex[i]->labeloff)
182				!= 2 * sizeof_namespace_index(ndd)) {
183			dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
184					i, (unsigned long long)
185					__le64_to_cpu(nsindex[i]->labeloff));
186			continue;
187		}
188
189		size = __le64_to_cpu(nsindex[i]->mysize);
190		if (size > sizeof_namespace_index(ndd)
191				|| size < sizeof(struct nd_namespace_index)) {
192			dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
193			continue;
194		}
195
196		nslot = __le32_to_cpu(nsindex[i]->nslot);
197		if (nslot * sizeof_namespace_label(ndd)
198				+ 2 * sizeof_namespace_index(ndd)
199				> ndd->nsarea.config_size) {
200			dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
201					i, nslot, ndd->nsarea.config_size);
202			continue;
203		}
204		valid[i] = true;
205		num_valid++;
206	}
207
208	switch (num_valid) {
209	case 0:
210		break;
211	case 1:
212		for (i = 0; i < num_index; i++)
213			if (valid[i])
214				return i;
215		/* can't have num_valid > 0 but valid[] = { false, false } */
216		WARN_ON(1);
217		break;
218	default:
219		/* pick the best index... */
220		seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
221				__le32_to_cpu(nsindex[1]->seq));
222		if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
223			return 1;
224		else
225			return 0;
226		break;
227	}
228
229	return -1;
230}
231
232static int nd_label_validate(struct nvdimm_drvdata *ndd)
233{
234	/*
235	 * In order to probe for and validate namespace index blocks we
236	 * need to know the size of the labels, and we can't trust the
237	 * size of the labels until we validate the index blocks.
238	 * Resolve this dependency loop by probing for known label
239	 * sizes, but default to v1.2 256-byte namespace labels if
240	 * discovery fails.
241	 */
242	int label_size[] = { 128, 256 };
243	int i, rc;
244
245	for (i = 0; i < ARRAY_SIZE(label_size); i++) {
246		ndd->nslabel_size = label_size[i];
247		rc = __nd_label_validate(ndd);
248		if (rc >= 0)
249			return rc;
250	}
251
252	return -1;
253}
254
255static void nd_label_copy(struct nvdimm_drvdata *ndd,
256			  struct nd_namespace_index *dst,
257			  struct nd_namespace_index *src)
258{
259	/* just exit if either destination or source is NULL */
260	if (!dst || !src)
261		return;
262
263	memcpy(dst, src, sizeof_namespace_index(ndd));
264}
265
266static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
267{
268	void *base = to_namespace_index(ndd, 0);
269
270	return base + 2 * sizeof_namespace_index(ndd);
271}
272
273static int to_slot(struct nvdimm_drvdata *ndd,
274		struct nd_namespace_label *nd_label)
275{
276	unsigned long label, base;
277
278	label = (unsigned long) nd_label;
279	base = (unsigned long) nd_label_base(ndd);
280
281	return (label - base) / sizeof_namespace_label(ndd);
282}
283
284static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
285{
286	unsigned long label, base;
287
288	base = (unsigned long) nd_label_base(ndd);
289	label = base + sizeof_namespace_label(ndd) * slot;
290
291	return (struct nd_namespace_label *) label;
292}
293
294#define for_each_clear_bit_le(bit, addr, size) \
295	for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
296	     (bit) < (size);                                    \
297	     (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
298
299/**
300 * preamble_index - common variable initialization for nd_label_* routines
301 * @ndd: dimm container for the relevant label set
302 * @idx: namespace_index index
303 * @nsindex_out: on return set to the currently active namespace index
304 * @free: on return set to the free label bitmap in the index
305 * @nslot: on return set to the number of slots in the label space
306 */
307static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
308		struct nd_namespace_index **nsindex_out,
309		unsigned long **free, u32 *nslot)
310{
311	struct nd_namespace_index *nsindex;
312
313	nsindex = to_namespace_index(ndd, idx);
314	if (nsindex == NULL)
315		return false;
316
317	*free = (unsigned long *) nsindex->free;
318	*nslot = __le32_to_cpu(nsindex->nslot);
319	*nsindex_out = nsindex;
320
321	return true;
322}
323
324char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
325{
326	if (!label_id || !uuid)
327		return NULL;
328	snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
329			flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
330	return label_id->id;
331}
332
333static bool preamble_current(struct nvdimm_drvdata *ndd,
334		struct nd_namespace_index **nsindex,
335		unsigned long **free, u32 *nslot)
336{
337	return preamble_index(ndd, ndd->ns_current, nsindex,
338			free, nslot);
339}
340
341static bool preamble_next(struct nvdimm_drvdata *ndd,
342		struct nd_namespace_index **nsindex,
343		unsigned long **free, u32 *nslot)
344{
345	return preamble_index(ndd, ndd->ns_next, nsindex,
346			free, nslot);
347}
348
349static bool slot_valid(struct nvdimm_drvdata *ndd,
350		struct nd_namespace_label *nd_label, u32 slot)
351{
352	/* check that we are written where we expect to be written */
353	if (slot != __le32_to_cpu(nd_label->slot))
354		return false;
355
356	/* check checksum */
357	if (namespace_label_has(ndd, checksum)) {
358		u64 sum, sum_save;
359
360		sum_save = __le64_to_cpu(nd_label->checksum);
361		nd_label->checksum = __cpu_to_le64(0);
362		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
363		nd_label->checksum = __cpu_to_le64(sum_save);
364		if (sum != sum_save) {
365			dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
366				slot, sum);
367			return false;
368		}
369	}
370
371	return true;
372}
373
374int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
375{
376	struct nd_namespace_index *nsindex;
377	unsigned long *free;
378	u32 nslot, slot;
379
380	if (!preamble_current(ndd, &nsindex, &free, &nslot))
381		return 0; /* no label, nothing to reserve */
382
383	for_each_clear_bit_le(slot, free, nslot) {
384		struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
385		struct nd_namespace_label *nd_label;
386		struct nd_region *nd_region = NULL;
387		u8 label_uuid[NSLABEL_UUID_LEN];
388		struct nd_label_id label_id;
389		struct resource *res;
390		u32 flags;
391
392		nd_label = to_label(ndd, slot);
393
394		if (!slot_valid(ndd, nd_label, slot))
395			continue;
396
397		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
398		flags = __le32_to_cpu(nd_label->flags);
399		if (test_bit(NDD_NOBLK, &nvdimm->flags))
400			flags &= ~NSLABEL_FLAG_LOCAL;
401		nd_label_gen_id(&label_id, label_uuid, flags);
402		res = nvdimm_allocate_dpa(ndd, &label_id,
403				__le64_to_cpu(nd_label->dpa),
404				__le64_to_cpu(nd_label->rawsize));
405		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
406		if (!res)
407			return -EBUSY;
408	}
409
410	return 0;
411}
412
413int nd_label_data_init(struct nvdimm_drvdata *ndd)
414{
415	size_t config_size, read_size, max_xfer, offset;
416	struct nd_namespace_index *nsindex;
417	unsigned int i;
418	int rc = 0;
419	u32 nslot;
420
421	if (ndd->data)
422		return 0;
423
424	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
425		dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
426			ndd->nsarea.max_xfer, ndd->nsarea.config_size);
427		return -ENXIO;
428	}
429
430	/*
431	 * We need to determine the maximum index area as this is the section
432	 * we must read and validate before we can start processing labels.
433	 *
434	 * If the area is too small to contain the two indexes and 2 labels
435	 * then we abort.
436	 *
437	 * Start at a label size of 128 as this should result in the largest
438	 * possible namespace index size.
439	 */
440	ndd->nslabel_size = 128;
441	read_size = sizeof_namespace_index(ndd) * 2;
442	if (!read_size)
443		return -ENXIO;
444
445	/* Allocate config data */
446	config_size = ndd->nsarea.config_size;
447	ndd->data = kvzalloc(config_size, GFP_KERNEL);
448	if (!ndd->data)
449		return -ENOMEM;
450
451	/*
452	 * We want to guarantee as few reads as possible while conserving
453	 * memory. To do that we figure out how much unused space will be left
454	 * in the last read, divide that by the total number of reads it is
455	 * going to take given our maximum transfer size, and then reduce our
456	 * maximum transfer size based on that result.
457	 */
458	max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
459	if (read_size < max_xfer) {
460		/* trim waste */
461		max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
462			    DIV_ROUND_UP(config_size, max_xfer);
463		/* make certain we read indexes in exactly 1 read */
464		if (max_xfer < read_size)
465			max_xfer = read_size;
466	}
467
468	/* Make our initial read size a multiple of max_xfer size */
469	read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
470			config_size);
471
472	/* Read the index data */
473	rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
474	if (rc)
475		goto out_err;
476
477	/* Validate index data, if not valid assume all labels are invalid */
478	ndd->ns_current = nd_label_validate(ndd);
479	if (ndd->ns_current < 0)
480		return 0;
481
482	/* Record our index values */
483	ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
484
485	/* Copy "current" index on top of the "next" index */
486	nsindex = to_current_namespace_index(ndd);
487	nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
488
489	/* Determine starting offset for label data */
490	offset = __le64_to_cpu(nsindex->labeloff);
491	nslot = __le32_to_cpu(nsindex->nslot);
492
493	/* Loop through the free list pulling in any active labels */
494	for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
495		size_t label_read_size;
496
497		/* zero out the unused labels */
498		if (test_bit_le(i, nsindex->free)) {
499			memset(ndd->data + offset, 0, ndd->nslabel_size);
500			continue;
501		}
502
503		/* if we already read past here then just continue */
504		if (offset + ndd->nslabel_size <= read_size)
505			continue;
506
507		/* if we haven't read in a while reset our read_size offset */
508		if (read_size < offset)
509			read_size = offset;
510
511		/* determine how much more will be read after this next call. */
512		label_read_size = offset + ndd->nslabel_size - read_size;
513		label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
514				  max_xfer;
515
516		/* truncate last read if needed */
517		if (read_size + label_read_size > config_size)
518			label_read_size = config_size - read_size;
519
520		/* Read the label data */
521		rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
522					    read_size, label_read_size);
523		if (rc)
524			goto out_err;
525
526		/* push read_size to next read offset */
527		read_size += label_read_size;
528	}
529
530	dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
531out_err:
532	return rc;
533}
534
535int nd_label_active_count(struct nvdimm_drvdata *ndd)
536{
537	struct nd_namespace_index *nsindex;
538	unsigned long *free;
539	u32 nslot, slot;
540	int count = 0;
541
542	if (!preamble_current(ndd, &nsindex, &free, &nslot))
543		return 0;
544
545	for_each_clear_bit_le(slot, free, nslot) {
546		struct nd_namespace_label *nd_label;
547
548		nd_label = to_label(ndd, slot);
549
550		if (!slot_valid(ndd, nd_label, slot)) {
551			u32 label_slot = __le32_to_cpu(nd_label->slot);
552			u64 size = __le64_to_cpu(nd_label->rawsize);
553			u64 dpa = __le64_to_cpu(nd_label->dpa);
554
555			dev_dbg(ndd->dev,
556				"slot%d invalid slot: %d dpa: %llx size: %llx\n",
557					slot, label_slot, dpa, size);
558			continue;
559		}
560		count++;
561	}
562	return count;
563}
564
565struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
566{
567	struct nd_namespace_index *nsindex;
568	unsigned long *free;
569	u32 nslot, slot;
570
571	if (!preamble_current(ndd, &nsindex, &free, &nslot))
572		return NULL;
573
574	for_each_clear_bit_le(slot, free, nslot) {
575		struct nd_namespace_label *nd_label;
576
577		nd_label = to_label(ndd, slot);
578		if (!slot_valid(ndd, nd_label, slot))
579			continue;
580
581		if (n-- == 0)
582			return to_label(ndd, slot);
583	}
584
585	return NULL;
586}
587
588u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
589{
590	struct nd_namespace_index *nsindex;
591	unsigned long *free;
592	u32 nslot, slot;
593
594	if (!preamble_next(ndd, &nsindex, &free, &nslot))
595		return UINT_MAX;
596
597	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
598
599	slot = find_next_bit_le(free, nslot, 0);
600	if (slot == nslot)
601		return UINT_MAX;
602
603	clear_bit_le(slot, free);
604
605	return slot;
606}
607
608bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
609{
610	struct nd_namespace_index *nsindex;
611	unsigned long *free;
612	u32 nslot;
613
614	if (!preamble_next(ndd, &nsindex, &free, &nslot))
615		return false;
616
617	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
618
619	if (slot < nslot)
620		return !test_and_set_bit_le(slot, free);
621	return false;
622}
623
624u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
625{
626	struct nd_namespace_index *nsindex;
627	unsigned long *free;
628	u32 nslot;
629
630	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
631
632	if (!preamble_next(ndd, &nsindex, &free, &nslot))
633		return nvdimm_num_label_slots(ndd);
634
635	return bitmap_weight(free, nslot);
636}
637
638static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
639		unsigned long flags)
640{
641	struct nd_namespace_index *nsindex;
642	unsigned long offset;
643	u64 checksum;
644	u32 nslot;
645	int rc;
646
647	nsindex = to_namespace_index(ndd, index);
648	if (flags & ND_NSINDEX_INIT)
649		nslot = nvdimm_num_label_slots(ndd);
650	else
651		nslot = __le32_to_cpu(nsindex->nslot);
652
653	memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
654	memset(&nsindex->flags, 0, 3);
655	nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
656	nsindex->seq = __cpu_to_le32(seq);
657	offset = (unsigned long) nsindex
658		- (unsigned long) to_namespace_index(ndd, 0);
659	nsindex->myoff = __cpu_to_le64(offset);
660	nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
661	offset = (unsigned long) to_namespace_index(ndd,
662			nd_label_next_nsindex(index))
663		- (unsigned long) to_namespace_index(ndd, 0);
664	nsindex->otheroff = __cpu_to_le64(offset);
665	offset = (unsigned long) nd_label_base(ndd)
666		- (unsigned long) to_namespace_index(ndd, 0);
667	nsindex->labeloff = __cpu_to_le64(offset);
668	nsindex->nslot = __cpu_to_le32(nslot);
669	nsindex->major = __cpu_to_le16(1);
670	if (sizeof_namespace_label(ndd) < 256)
671		nsindex->minor = __cpu_to_le16(1);
672	else
673		nsindex->minor = __cpu_to_le16(2);
674	nsindex->checksum = __cpu_to_le64(0);
675	if (flags & ND_NSINDEX_INIT) {
676		unsigned long *free = (unsigned long *) nsindex->free;
677		u32 nfree = ALIGN(nslot, BITS_PER_LONG);
678		int last_bits, i;
679
680		memset(nsindex->free, 0xff, nfree / 8);
681		for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
682			clear_bit_le(nslot + i, free);
683	}
684	checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
685	nsindex->checksum = __cpu_to_le64(checksum);
686	rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
687			nsindex, sizeof_namespace_index(ndd));
688	if (rc < 0)
689		return rc;
690
691	if (flags & ND_NSINDEX_INIT)
692		return 0;
693
694	/* copy the index we just wrote to the new 'next' */
695	WARN_ON(index != ndd->ns_next);
696	nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
697	ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
698	ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
699	WARN_ON(ndd->ns_current == ndd->ns_next);
700
701	return 0;
702}
703
704static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
705		struct nd_namespace_label *nd_label)
706{
707	return (unsigned long) nd_label
708		- (unsigned long) to_namespace_index(ndd, 0);
709}
710
711enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
712{
713	if (guid_equal(guid, &nvdimm_btt_guid))
714		return NVDIMM_CCLASS_BTT;
715	else if (guid_equal(guid, &nvdimm_btt2_guid))
716		return NVDIMM_CCLASS_BTT2;
717	else if (guid_equal(guid, &nvdimm_pfn_guid))
718		return NVDIMM_CCLASS_PFN;
719	else if (guid_equal(guid, &nvdimm_dax_guid))
720		return NVDIMM_CCLASS_DAX;
721	else if (guid_equal(guid, &guid_null))
722		return NVDIMM_CCLASS_NONE;
723
724	return NVDIMM_CCLASS_UNKNOWN;
725}
726
727static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
728	guid_t *target)
729{
730	if (claim_class == NVDIMM_CCLASS_BTT)
731		return &nvdimm_btt_guid;
732	else if (claim_class == NVDIMM_CCLASS_BTT2)
733		return &nvdimm_btt2_guid;
734	else if (claim_class == NVDIMM_CCLASS_PFN)
735		return &nvdimm_pfn_guid;
736	else if (claim_class == NVDIMM_CCLASS_DAX)
737		return &nvdimm_dax_guid;
738	else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
739		/*
740		 * If we're modifying a namespace for which we don't
741		 * know the claim_class, don't touch the existing guid.
742		 */
743		return target;
744	} else
745		return &guid_null;
746}
747
748static void reap_victim(struct nd_mapping *nd_mapping,
749		struct nd_label_ent *victim)
750{
751	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
752	u32 slot = to_slot(ndd, victim->label);
753
754	dev_dbg(ndd->dev, "free: %d\n", slot);
755	nd_label_free_slot(ndd, slot);
756	victim->label = NULL;
757}
758
759static int __pmem_label_update(struct nd_region *nd_region,
760		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
761		int pos, unsigned long flags)
762{
763	struct nd_namespace_common *ndns = &nspm->nsio.common;
764	struct nd_interleave_set *nd_set = nd_region->nd_set;
765	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
766	struct nd_namespace_label *nd_label;
767	struct nd_namespace_index *nsindex;
768	struct nd_label_ent *label_ent;
769	struct nd_label_id label_id;
770	struct resource *res;
771	unsigned long *free;
772	u32 nslot, slot;
773	size_t offset;
774	u64 cookie;
775	int rc;
776
777	if (!preamble_next(ndd, &nsindex, &free, &nslot))
778		return -ENXIO;
779
780	cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
781	nd_label_gen_id(&label_id, nspm->uuid, 0);
782	for_each_dpa_resource(ndd, res)
783		if (strcmp(res->name, label_id.id) == 0)
784			break;
785
786	if (!res) {
787		WARN_ON_ONCE(1);
788		return -ENXIO;
789	}
790
791	/* allocate and write the label to the staging (next) index */
792	slot = nd_label_alloc_slot(ndd);
793	if (slot == UINT_MAX)
794		return -ENXIO;
795	dev_dbg(ndd->dev, "allocated: %d\n", slot);
796
797	nd_label = to_label(ndd, slot);
798	memset(nd_label, 0, sizeof_namespace_label(ndd));
799	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
800	if (nspm->alt_name)
801		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
802	nd_label->flags = __cpu_to_le32(flags);
803	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
804	nd_label->position = __cpu_to_le16(pos);
805	nd_label->isetcookie = __cpu_to_le64(cookie);
806	nd_label->rawsize = __cpu_to_le64(resource_size(res));
807	nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
808	nd_label->dpa = __cpu_to_le64(res->start);
809	nd_label->slot = __cpu_to_le32(slot);
810	if (namespace_label_has(ndd, type_guid))
811		guid_copy(&nd_label->type_guid, &nd_set->type_guid);
812	if (namespace_label_has(ndd, abstraction_guid))
813		guid_copy(&nd_label->abstraction_guid,
814				to_abstraction_guid(ndns->claim_class,
815					&nd_label->abstraction_guid));
816	if (namespace_label_has(ndd, checksum)) {
817		u64 sum;
818
819		nd_label->checksum = __cpu_to_le64(0);
820		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
821		nd_label->checksum = __cpu_to_le64(sum);
822	}
823	nd_dbg_dpa(nd_region, ndd, res, "\n");
824
825	/* update label */
826	offset = nd_label_offset(ndd, nd_label);
827	rc = nvdimm_set_config_data(ndd, offset, nd_label,
828			sizeof_namespace_label(ndd));
829	if (rc < 0)
830		return rc;
831
832	/* Garbage collect the previous label */
833	mutex_lock(&nd_mapping->lock);
834	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
835		if (!label_ent->label)
836			continue;
837		if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
838				|| memcmp(nspm->uuid, label_ent->label->uuid,
839					NSLABEL_UUID_LEN) == 0)
840			reap_victim(nd_mapping, label_ent);
841	}
842
843	/* update index */
844	rc = nd_label_write_index(ndd, ndd->ns_next,
845			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
846	if (rc == 0) {
847		list_for_each_entry(label_ent, &nd_mapping->labels, list)
848			if (!label_ent->label) {
849				label_ent->label = nd_label;
850				nd_label = NULL;
851				break;
852			}
853		dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
854				"failed to track label: %d\n",
855				to_slot(ndd, nd_label));
856		if (nd_label)
857			rc = -ENXIO;
858	}
859	mutex_unlock(&nd_mapping->lock);
860
861	return rc;
862}
863
864static bool is_old_resource(struct resource *res, struct resource **list, int n)
865{
866	int i;
867
868	if (res->flags & DPA_RESOURCE_ADJUSTED)
869		return false;
870	for (i = 0; i < n; i++)
871		if (res == list[i])
872			return true;
873	return false;
874}
875
876static struct resource *to_resource(struct nvdimm_drvdata *ndd,
877		struct nd_namespace_label *nd_label)
878{
879	struct resource *res;
880
881	for_each_dpa_resource(ndd, res) {
882		if (res->start != __le64_to_cpu(nd_label->dpa))
883			continue;
884		if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
885			continue;
886		return res;
887	}
888
889	return NULL;
890}
891
892/*
893 * 1/ Account all the labels that can be freed after this update
894 * 2/ Allocate and write the label to the staging (next) index
895 * 3/ Record the resources in the namespace device
896 */
897static int __blk_label_update(struct nd_region *nd_region,
898		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
899		int num_labels)
900{
901	int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
902	struct nd_interleave_set *nd_set = nd_region->nd_set;
903	struct nd_namespace_common *ndns = &nsblk->common;
904	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
905	struct nd_namespace_label *nd_label;
906	struct nd_label_ent *label_ent, *e;
907	struct nd_namespace_index *nsindex;
908	unsigned long *free, *victim_map = NULL;
909	struct resource *res, **old_res_list;
910	struct nd_label_id label_id;
911	u8 uuid[NSLABEL_UUID_LEN];
912	int min_dpa_idx = 0;
913	LIST_HEAD(list);
914	u32 nslot, slot;
915
916	if (!preamble_next(ndd, &nsindex, &free, &nslot))
917		return -ENXIO;
918
919	old_res_list = nsblk->res;
920	nfree = nd_label_nfree(ndd);
921	old_num_resources = nsblk->num_resources;
922	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
923
924	/*
925	 * We need to loop over the old resources a few times, which seems a
926	 * bit inefficient, but we need to know that we have the label
927	 * space before we start mutating the tracking structures.
928	 * Otherwise the recovery method of last resort for userspace is
929	 * disable and re-enable the parent region.
930	 */
931	alloc = 0;
932	for_each_dpa_resource(ndd, res) {
933		if (strcmp(res->name, label_id.id) != 0)
934			continue;
935		if (!is_old_resource(res, old_res_list, old_num_resources))
936			alloc++;
937	}
938
939	victims = 0;
940	if (old_num_resources) {
941		/* convert old local-label-map to dimm-slot victim-map */
942		victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
943		if (!victim_map)
944			return -ENOMEM;
945
946		/* mark unused labels for garbage collection */
947		for_each_clear_bit_le(slot, free, nslot) {
948			nd_label = to_label(ndd, slot);
949			memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
950			if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
951				continue;
952			res = to_resource(ndd, nd_label);
953			if (res && is_old_resource(res, old_res_list,
954						old_num_resources))
955				continue;
956			slot = to_slot(ndd, nd_label);
957			set_bit(slot, victim_map);
958			victims++;
959		}
960	}
961
962	/* don't allow updates that consume the last label */
963	if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
964		dev_info(&nsblk->common.dev, "insufficient label space\n");
965		bitmap_free(victim_map);
966		return -ENOSPC;
967	}
968	/* from here on we need to abort on error */
969
970
971	/* assign all resources to the namespace before writing the labels */
972	nsblk->res = NULL;
973	nsblk->num_resources = 0;
974	for_each_dpa_resource(ndd, res) {
975		if (strcmp(res->name, label_id.id) != 0)
976			continue;
977		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
978			rc = -ENOMEM;
979			goto abort;
980		}
981	}
982
983	/* release slots associated with any invalidated UUIDs */
984	mutex_lock(&nd_mapping->lock);
985	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
986		if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
987			reap_victim(nd_mapping, label_ent);
988			list_move(&label_ent->list, &list);
989		}
990	mutex_unlock(&nd_mapping->lock);
991
992	/*
993	 * Find the resource associated with the first label in the set
994	 * per the v1.2 namespace specification.
995	 */
996	for (i = 0; i < nsblk->num_resources; i++) {
997		struct resource *min = nsblk->res[min_dpa_idx];
998
999		res = nsblk->res[i];
1000		if (res->start < min->start)
1001			min_dpa_idx = i;
1002	}
1003
1004	for (i = 0; i < nsblk->num_resources; i++) {
1005		size_t offset;
1006
1007		res = nsblk->res[i];
1008		if (is_old_resource(res, old_res_list, old_num_resources))
1009			continue; /* carry-over */
1010		slot = nd_label_alloc_slot(ndd);
1011		if (slot == UINT_MAX) {
1012			rc = -ENXIO;
1013			goto abort;
1014		}
1015		dev_dbg(ndd->dev, "allocated: %d\n", slot);
1016
1017		nd_label = to_label(ndd, slot);
1018		memset(nd_label, 0, sizeof_namespace_label(ndd));
1019		memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1020		if (nsblk->alt_name)
1021			memcpy(nd_label->name, nsblk->alt_name,
1022					NSLABEL_NAME_LEN);
1023		nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1024
1025		/*
1026		 * Use the presence of the type_guid as a flag to
1027		 * determine isetcookie usage and nlabel + position
1028		 * policy for blk-aperture namespaces.
1029		 */
1030		if (namespace_label_has(ndd, type_guid)) {
1031			if (i == min_dpa_idx) {
1032				nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1033				nd_label->position = __cpu_to_le16(0);
1034			} else {
1035				nd_label->nlabel = __cpu_to_le16(0xffff);
1036				nd_label->position = __cpu_to_le16(0xffff);
1037			}
1038			nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1039		} else {
1040			nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1041			nd_label->position = __cpu_to_le16(0); /* N/A */
1042			nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1043		}
1044
1045		nd_label->dpa = __cpu_to_le64(res->start);
1046		nd_label->rawsize = __cpu_to_le64(resource_size(res));
1047		nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1048		nd_label->slot = __cpu_to_le32(slot);
1049		if (namespace_label_has(ndd, type_guid))
1050			guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1051		if (namespace_label_has(ndd, abstraction_guid))
1052			guid_copy(&nd_label->abstraction_guid,
1053					to_abstraction_guid(ndns->claim_class,
1054						&nd_label->abstraction_guid));
1055
1056		if (namespace_label_has(ndd, checksum)) {
1057			u64 sum;
1058
1059			nd_label->checksum = __cpu_to_le64(0);
1060			sum = nd_fletcher64(nd_label,
1061					sizeof_namespace_label(ndd), 1);
1062			nd_label->checksum = __cpu_to_le64(sum);
1063		}
1064
1065		/* update label */
1066		offset = nd_label_offset(ndd, nd_label);
1067		rc = nvdimm_set_config_data(ndd, offset, nd_label,
1068				sizeof_namespace_label(ndd));
1069		if (rc < 0)
1070			goto abort;
1071	}
1072
1073	/* free up now unused slots in the new index */
1074	for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1075		dev_dbg(ndd->dev, "free: %d\n", slot);
1076		nd_label_free_slot(ndd, slot);
1077	}
1078
1079	/* update index */
1080	rc = nd_label_write_index(ndd, ndd->ns_next,
1081			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1082	if (rc)
1083		goto abort;
1084
1085	/*
1086	 * Now that the on-dimm labels are up to date, fix up the tracking
1087	 * entries in nd_mapping->labels
1088	 */
1089	nlabel = 0;
1090	mutex_lock(&nd_mapping->lock);
1091	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1092		nd_label = label_ent->label;
1093		if (!nd_label)
1094			continue;
1095		nlabel++;
1096		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1097		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1098			continue;
1099		nlabel--;
1100		list_move(&label_ent->list, &list);
1101		label_ent->label = NULL;
1102	}
1103	list_splice_tail_init(&list, &nd_mapping->labels);
1104	mutex_unlock(&nd_mapping->lock);
1105
1106	if (nlabel + nsblk->num_resources > num_labels) {
1107		/*
1108		 * Bug, we can't end up with more resources than
1109		 * available labels
1110		 */
1111		WARN_ON_ONCE(1);
1112		rc = -ENXIO;
1113		goto out;
1114	}
1115
1116	mutex_lock(&nd_mapping->lock);
1117	label_ent = list_first_entry_or_null(&nd_mapping->labels,
1118			typeof(*label_ent), list);
1119	if (!label_ent) {
1120		WARN_ON(1);
1121		mutex_unlock(&nd_mapping->lock);
1122		rc = -ENXIO;
1123		goto out;
1124	}
1125	for_each_clear_bit_le(slot, free, nslot) {
1126		nd_label = to_label(ndd, slot);
1127		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1128		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1129			continue;
1130		res = to_resource(ndd, nd_label);
1131		res->flags &= ~DPA_RESOURCE_ADJUSTED;
1132		dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1133		list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1134			if (label_ent->label)
1135				continue;
1136			label_ent->label = nd_label;
1137			nd_label = NULL;
1138			break;
1139		}
1140		if (nd_label)
1141			dev_WARN(&nsblk->common.dev,
1142					"failed to track label slot%d\n", slot);
1143	}
1144	mutex_unlock(&nd_mapping->lock);
1145
1146 out:
1147	kfree(old_res_list);
1148	bitmap_free(victim_map);
1149	return rc;
1150
1151 abort:
1152	/*
1153	 * 1/ repair the allocated label bitmap in the index
1154	 * 2/ restore the resource list
1155	 */
1156	nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1157	kfree(nsblk->res);
1158	nsblk->res = old_res_list;
1159	nsblk->num_resources = old_num_resources;
1160	old_res_list = NULL;
1161	goto out;
1162}
1163
1164static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1165{
1166	int i, old_num_labels = 0;
1167	struct nd_label_ent *label_ent;
1168	struct nd_namespace_index *nsindex;
1169	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1170
1171	mutex_lock(&nd_mapping->lock);
1172	list_for_each_entry(label_ent, &nd_mapping->labels, list)
1173		old_num_labels++;
1174	mutex_unlock(&nd_mapping->lock);
1175
1176	/*
1177	 * We need to preserve all the old labels for the mapping so
1178	 * they can be garbage collected after writing the new labels.
1179	 */
1180	for (i = old_num_labels; i < num_labels; i++) {
1181		label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1182		if (!label_ent)
1183			return -ENOMEM;
1184		mutex_lock(&nd_mapping->lock);
1185		list_add_tail(&label_ent->list, &nd_mapping->labels);
1186		mutex_unlock(&nd_mapping->lock);
1187	}
1188
1189	if (ndd->ns_current == -1 || ndd->ns_next == -1)
1190		/* pass */;
1191	else
1192		return max(num_labels, old_num_labels);
1193
1194	nsindex = to_namespace_index(ndd, 0);
1195	memset(nsindex, 0, ndd->nsarea.config_size);
1196	for (i = 0; i < 2; i++) {
1197		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1198
1199		if (rc)
1200			return rc;
1201	}
1202	ndd->ns_next = 1;
1203	ndd->ns_current = 0;
1204
1205	return max(num_labels, old_num_labels);
1206}
1207
1208static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1209{
1210	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1211	struct nd_label_ent *label_ent, *e;
1212	struct nd_namespace_index *nsindex;
1213	u8 label_uuid[NSLABEL_UUID_LEN];
1214	unsigned long *free;
1215	LIST_HEAD(list);
1216	u32 nslot, slot;
1217	int active = 0;
1218
1219	if (!uuid)
1220		return 0;
1221
1222	/* no index || no labels == nothing to delete */
1223	if (!preamble_next(ndd, &nsindex, &free, &nslot))
1224		return 0;
1225
1226	mutex_lock(&nd_mapping->lock);
1227	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1228		struct nd_namespace_label *nd_label = label_ent->label;
1229
1230		if (!nd_label)
1231			continue;
1232		active++;
1233		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1234		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1235			continue;
1236		active--;
1237		slot = to_slot(ndd, nd_label);
1238		nd_label_free_slot(ndd, slot);
1239		dev_dbg(ndd->dev, "free: %d\n", slot);
1240		list_move_tail(&label_ent->list, &list);
1241		label_ent->label = NULL;
1242	}
1243	list_splice_tail_init(&list, &nd_mapping->labels);
1244
1245	if (active == 0) {
1246		nd_mapping_free_labels(nd_mapping);
1247		dev_dbg(ndd->dev, "no more active labels\n");
1248	}
1249	mutex_unlock(&nd_mapping->lock);
1250
1251	return nd_label_write_index(ndd, ndd->ns_next,
1252			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1253}
1254
1255int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1256		struct nd_namespace_pmem *nspm, resource_size_t size)
1257{
1258	int i, rc;
1259
1260	for (i = 0; i < nd_region->ndr_mappings; i++) {
1261		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1262		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1263		struct resource *res;
1264		int count = 0;
1265
1266		if (size == 0) {
1267			rc = del_labels(nd_mapping, nspm->uuid);
1268			if (rc)
1269				return rc;
1270			continue;
1271		}
1272
1273		for_each_dpa_resource(ndd, res)
1274			if (strncmp(res->name, "pmem", 4) == 0)
1275				count++;
1276		WARN_ON_ONCE(!count);
1277
1278		rc = init_labels(nd_mapping, count);
1279		if (rc < 0)
1280			return rc;
1281
1282		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1283				NSLABEL_FLAG_UPDATING);
1284		if (rc)
1285			return rc;
1286	}
1287
1288	if (size == 0)
1289		return 0;
1290
1291	/* Clear the UPDATING flag per UEFI 2.7 expectations */
1292	for (i = 0; i < nd_region->ndr_mappings; i++) {
1293		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1294
1295		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1296		if (rc)
1297			return rc;
1298	}
1299
1300	return 0;
1301}
1302
1303int nd_blk_namespace_label_update(struct nd_region *nd_region,
1304		struct nd_namespace_blk *nsblk, resource_size_t size)
1305{
1306	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1307	struct resource *res;
1308	int count = 0;
1309
1310	if (size == 0)
1311		return del_labels(nd_mapping, nsblk->uuid);
1312
1313	for_each_dpa_resource(to_ndd(nd_mapping), res)
1314		count++;
1315
1316	count = init_labels(nd_mapping, count);
1317	if (count < 0)
1318		return count;
1319
1320	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1321}
1322
1323int __init nd_label_init(void)
1324{
1325	WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1326	WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1327	WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1328	WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1329
1330	return 0;
1331}
1332