Lines Matching refs:ndd
37 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
39 return ndd->nslabel_size;
48 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
51 return (ndd->nsarea.config_size - index_size * 2) /
52 sizeof_namespace_label(ndd);
55 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
59 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
62 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
65 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
75 nslot = nvdimm_num_label_slots(ndd);
76 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
81 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
82 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
86 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
116 to_namespace_index(ndd, 0),
117 to_namespace_index(ndd, 1),
120 struct device *dev = ndd->dev;
145 if (labelsize != sizeof_namespace_label(ndd)) {
153 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
168 != i * sizeof_namespace_index(ndd)) {
175 != (!i) * sizeof_namespace_index(ndd)) {
182 != 2 * sizeof_namespace_index(ndd)) {
190 if (size > sizeof_namespace_index(ndd)
197 if (nslot * sizeof_namespace_label(ndd)
198 + 2 * sizeof_namespace_index(ndd)
199 > ndd->nsarea.config_size) {
201 i, nslot, ndd->nsarea.config_size);
232 static int nd_label_validate(struct nvdimm_drvdata *ndd)
246 ndd->nslabel_size = label_size[i];
247 rc = __nd_label_validate(ndd);
255 static void nd_label_copy(struct nvdimm_drvdata *ndd,
263 memcpy(dst, src, sizeof_namespace_index(ndd));
266 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
268 void *base = to_namespace_index(ndd, 0);
270 return base + 2 * sizeof_namespace_index(ndd);
273 static int to_slot(struct nvdimm_drvdata *ndd,
279 base = (unsigned long) nd_label_base(ndd);
281 return (label - base) / sizeof_namespace_label(ndd);
284 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
288 base = (unsigned long) nd_label_base(ndd);
289 label = base + sizeof_namespace_label(ndd) * slot;
301 * @ndd: dimm container for the relevant label set
307 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
313 nsindex = to_namespace_index(ndd, idx);
333 static bool preamble_current(struct nvdimm_drvdata *ndd,
337 return preamble_index(ndd, ndd->ns_current, nsindex,
341 static bool preamble_next(struct nvdimm_drvdata *ndd,
345 return preamble_index(ndd, ndd->ns_next, nsindex,
349 static bool slot_valid(struct nvdimm_drvdata *ndd,
357 if (namespace_label_has(ndd, checksum)) {
362 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
365 dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
374 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
380 if (!preamble_current(ndd, &nsindex, &free, &nslot))
384 struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
392 nd_label = to_label(ndd, slot);
394 if (!slot_valid(ndd, nd_label, slot))
402 res = nvdimm_allocate_dpa(ndd, &label_id,
405 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
413 int nd_label_data_init(struct nvdimm_drvdata *ndd)
421 if (ndd->data)
424 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
425 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
426 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
440 ndd->nslabel_size = 128;
441 read_size = sizeof_namespace_index(ndd) * 2;
446 config_size = ndd->nsarea.config_size;
447 ndd->data = kvzalloc(config_size, GFP_KERNEL);
448 if (!ndd->data)
458 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
473 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
478 ndd->ns_current = nd_label_validate(ndd);
479 if (ndd->ns_current < 0)
483 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
486 nsindex = to_current_namespace_index(ndd);
487 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
494 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
499 memset(ndd->data + offset, 0, ndd->nslabel_size);
504 if (offset + ndd->nslabel_size <= read_size)
512 label_read_size = offset + ndd->nslabel_size - read_size;
521 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
530 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
535 int nd_label_active_count(struct nvdimm_drvdata *ndd)
542 if (!preamble_current(ndd, &nsindex, &free, &nslot))
548 nd_label = to_label(ndd, slot);
550 if (!slot_valid(ndd, nd_label, slot)) {
555 dev_dbg(ndd->dev,
565 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
571 if (!preamble_current(ndd, &nsindex, &free, &nslot))
577 nd_label = to_label(ndd, slot);
578 if (!slot_valid(ndd, nd_label, slot))
582 return to_label(ndd, slot);
588 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
594 if (!preamble_next(ndd, &nsindex, &free, &nslot))
597 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
608 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
614 if (!preamble_next(ndd, &nsindex, &free, &nslot))
617 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
624 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
630 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
632 if (!preamble_next(ndd, &nsindex, &free, &nslot))
633 return nvdimm_num_label_slots(ndd);
638 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
647 nsindex = to_namespace_index(ndd, index);
649 nslot = nvdimm_num_label_slots(ndd);
655 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
658 - (unsigned long) to_namespace_index(ndd, 0);
660 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
661 offset = (unsigned long) to_namespace_index(ndd,
663 - (unsigned long) to_namespace_index(ndd, 0);
665 offset = (unsigned long) nd_label_base(ndd)
666 - (unsigned long) to_namespace_index(ndd, 0);
670 if (sizeof_namespace_label(ndd) < 256)
684 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
686 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
687 nsindex, sizeof_namespace_index(ndd));
695 WARN_ON(index != ndd->ns_next);
696 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
697 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
698 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
699 WARN_ON(ndd->ns_current == ndd->ns_next);
704 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
708 - (unsigned long) to_namespace_index(ndd, 0);
751 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
752 u32 slot = to_slot(ndd, victim->label);
754 dev_dbg(ndd->dev, "free: %d\n", slot);
755 nd_label_free_slot(ndd, slot);
765 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
777 if (!preamble_next(ndd, &nsindex, &free, &nslot))
782 for_each_dpa_resource(ndd, res)
792 slot = nd_label_alloc_slot(ndd);
795 dev_dbg(ndd->dev, "allocated: %d\n", slot);
797 nd_label = to_label(ndd, slot);
798 memset(nd_label, 0, sizeof_namespace_label(ndd));
810 if (namespace_label_has(ndd, type_guid))
812 if (namespace_label_has(ndd, abstraction_guid))
816 if (namespace_label_has(ndd, checksum)) {
820 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
823 nd_dbg_dpa(nd_region, ndd, res, "\n");
826 offset = nd_label_offset(ndd, nd_label);
827 rc = nvdimm_set_config_data(ndd, offset, nd_label,
828 sizeof_namespace_label(ndd));
844 rc = nd_label_write_index(ndd, ndd->ns_next,
855 to_slot(ndd, nd_label));
876 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
881 for_each_dpa_resource(ndd, res) {
904 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
916 if (!preamble_next(ndd, &nsindex, &free, &nslot))
920 nfree = nd_label_nfree(ndd);
932 for_each_dpa_resource(ndd, res) {
948 nd_label = to_label(ndd, slot);
952 res = to_resource(ndd, nd_label);
956 slot = to_slot(ndd, nd_label);
974 for_each_dpa_resource(ndd, res) {
977 if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
1010 slot = nd_label_alloc_slot(ndd);
1015 dev_dbg(ndd->dev, "allocated: %d\n", slot);
1017 nd_label = to_label(ndd, slot);
1018 memset(nd_label, 0, sizeof_namespace_label(ndd));
1030 if (namespace_label_has(ndd, type_guid)) {
1049 if (namespace_label_has(ndd, type_guid))
1051 if (namespace_label_has(ndd, abstraction_guid))
1056 if (namespace_label_has(ndd, checksum)) {
1061 sizeof_namespace_label(ndd), 1);
1066 offset = nd_label_offset(ndd, nd_label);
1067 rc = nvdimm_set_config_data(ndd, offset, nd_label,
1068 sizeof_namespace_label(ndd));
1075 dev_dbg(ndd->dev, "free: %d\n", slot);
1076 nd_label_free_slot(ndd, slot);
1080 rc = nd_label_write_index(ndd, ndd->ns_next,
1126 nd_label = to_label(ndd, slot);
1130 res = to_resource(ndd, nd_label);
1156 nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1169 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1189 if (ndd->ns_current == -1 || ndd->ns_next == -1)
1194 nsindex = to_namespace_index(ndd, 0);
1195 memset(nsindex, 0, ndd->nsarea.config_size);
1197 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1202 ndd->ns_next = 1;
1203 ndd->ns_current = 0;
1210 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1223 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1237 slot = to_slot(ndd, nd_label);
1238 nd_label_free_slot(ndd, slot);
1239 dev_dbg(ndd->dev, "free: %d\n", slot);
1247 dev_dbg(ndd->dev, "no more active labels\n");
1251 return nd_label_write_index(ndd, ndd->ns_next,
1262 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1273 for_each_dpa_resource(ndd, res)