Lines Matching refs:ndd
45 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
47 return ndd->nslabel_size;
56 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
59 return (ndd->nsarea.config_size - index_size * 2) /
60 sizeof_namespace_label(ndd);
63 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
67 tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
70 return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
73 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
83 nslot = nvdimm_num_label_slots(ndd);
84 space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
89 dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
90 ndd->nsarea.config_size, sizeof_namespace_label(ndd));
94 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
124 to_namespace_index(ndd, 0),
125 to_namespace_index(ndd, 1),
128 struct device *dev = ndd->dev;
153 if (labelsize != sizeof_namespace_label(ndd)) {
161 sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
176 != i * sizeof_namespace_index(ndd)) {
183 != (!i) * sizeof_namespace_index(ndd)) {
190 != 2 * sizeof_namespace_index(ndd)) {
198 if (size > sizeof_namespace_index(ndd)
205 if (nslot * sizeof_namespace_label(ndd)
206 + 2 * sizeof_namespace_index(ndd)
207 > ndd->nsarea.config_size) {
209 i, nslot, ndd->nsarea.config_size);
240 static int nd_label_validate(struct nvdimm_drvdata *ndd)
254 ndd->nslabel_size = label_size[i];
255 rc = __nd_label_validate(ndd);
263 static void nd_label_copy(struct nvdimm_drvdata *ndd,
271 memcpy(dst, src, sizeof_namespace_index(ndd));
274 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
276 void *base = to_namespace_index(ndd, 0);
278 return base + 2 * sizeof_namespace_index(ndd);
281 static int to_slot(struct nvdimm_drvdata *ndd,
287 base = (unsigned long) nd_label_base(ndd);
289 return (label - base) / sizeof_namespace_label(ndd);
292 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
296 base = (unsigned long) nd_label_base(ndd);
297 label = base + sizeof_namespace_label(ndd) * slot;
309 * @ndd: dimm container for the relevant label set
315 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
321 nsindex = to_namespace_index(ndd, idx);
341 static bool preamble_current(struct nvdimm_drvdata *ndd,
345 return preamble_index(ndd, ndd->ns_current, nsindex,
349 static bool preamble_next(struct nvdimm_drvdata *ndd,
353 return preamble_index(ndd, ndd->ns_next, nsindex,
357 static bool nsl_validate_checksum(struct nvdimm_drvdata *ndd,
362 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
365 sum_save = nsl_get_checksum(ndd, nd_label);
366 nsl_set_checksum(ndd, nd_label, 0);
367 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
368 nsl_set_checksum(ndd, nd_label, sum_save);
372 static void nsl_calculate_checksum(struct nvdimm_drvdata *ndd,
377 if (!ndd->cxl && !efi_namespace_label_has(ndd, checksum))
379 nsl_set_checksum(ndd, nd_label, 0);
380 sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
381 nsl_set_checksum(ndd, nd_label, sum);
384 static bool slot_valid(struct nvdimm_drvdata *ndd,
390 if (slot != nsl_get_slot(ndd, nd_label))
392 valid = nsl_validate_checksum(ndd, nd_label);
394 dev_dbg(ndd->dev, "fail checksum. slot: %d\n", slot);
398 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
404 if (!preamble_current(ndd, &nsindex, &free, &nslot))
415 nd_label = to_label(ndd, slot);
417 if (!slot_valid(ndd, nd_label, slot))
420 nsl_get_uuid(ndd, nd_label, &label_uuid);
421 flags = nsl_get_flags(ndd, nd_label);
423 res = nvdimm_allocate_dpa(ndd, &label_id,
424 nsl_get_dpa(ndd, nd_label),
425 nsl_get_rawsize(ndd, nd_label));
426 nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
434 int nd_label_data_init(struct nvdimm_drvdata *ndd)
442 if (ndd->data)
445 if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
446 dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
447 ndd->nsarea.max_xfer, ndd->nsarea.config_size);
461 ndd->nslabel_size = 128;
462 read_size = sizeof_namespace_index(ndd) * 2;
467 config_size = ndd->nsarea.config_size;
468 ndd->data = kvzalloc(config_size, GFP_KERNEL);
469 if (!ndd->data)
479 max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
494 rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
499 ndd->ns_current = nd_label_validate(ndd);
500 if (ndd->ns_current < 0)
504 ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
507 nsindex = to_current_namespace_index(ndd);
508 nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
515 for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
520 memset(ndd->data + offset, 0, ndd->nslabel_size);
525 if (offset + ndd->nslabel_size <= read_size)
533 label_read_size = offset + ndd->nslabel_size - read_size;
542 rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
551 dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
556 int nd_label_active_count(struct nvdimm_drvdata *ndd)
563 if (!preamble_current(ndd, &nsindex, &free, &nslot))
569 nd_label = to_label(ndd, slot);
571 if (!slot_valid(ndd, nd_label, slot)) {
572 u32 label_slot = nsl_get_slot(ndd, nd_label);
573 u64 size = nsl_get_rawsize(ndd, nd_label);
574 u64 dpa = nsl_get_dpa(ndd, nd_label);
576 dev_dbg(ndd->dev,
586 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
592 if (!preamble_current(ndd, &nsindex, &free, &nslot))
598 nd_label = to_label(ndd, slot);
599 if (!slot_valid(ndd, nd_label, slot))
603 return to_label(ndd, slot);
609 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
615 if (!preamble_next(ndd, &nsindex, &free, &nslot))
618 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
629 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
635 if (!preamble_next(ndd, &nsindex, &free, &nslot))
638 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
645 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
651 WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
653 if (!preamble_next(ndd, &nsindex, &free, &nslot))
654 return nvdimm_num_label_slots(ndd);
659 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
668 nsindex = to_namespace_index(ndd, index);
670 nslot = nvdimm_num_label_slots(ndd);
676 nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
679 - (unsigned long) to_namespace_index(ndd, 0);
681 nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
682 offset = (unsigned long) to_namespace_index(ndd,
684 - (unsigned long) to_namespace_index(ndd, 0);
686 offset = (unsigned long) nd_label_base(ndd)
687 - (unsigned long) to_namespace_index(ndd, 0);
691 if (sizeof_namespace_label(ndd) < 256)
705 checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
707 rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
708 nsindex, sizeof_namespace_index(ndd));
716 WARN_ON(index != ndd->ns_next);
717 nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
718 ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
719 ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
720 WARN_ON(ndd->ns_current == ndd->ns_next);
725 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
729 - (unsigned long) to_namespace_index(ndd, 0);
811 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
812 u32 slot = to_slot(ndd, victim->label);
814 dev_dbg(ndd->dev, "free: %d\n", slot);
815 nd_label_free_slot(ndd, slot);
819 static void nsl_set_type_guid(struct nvdimm_drvdata *ndd,
822 if (efi_namespace_label_has(ndd, type_guid))
826 bool nsl_validate_type_guid(struct nvdimm_drvdata *ndd,
829 if (ndd->cxl || !efi_namespace_label_has(ndd, type_guid))
832 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n", guid,
839 static void nsl_set_claim_class(struct nvdimm_drvdata *ndd,
843 if (ndd->cxl) {
852 if (!efi_namespace_label_has(ndd, abstraction_guid))
859 enum nvdimm_claim_class nsl_get_claim_class(struct nvdimm_drvdata *ndd,
862 if (ndd->cxl) {
868 if (!efi_namespace_label_has(ndd, abstraction_guid))
879 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
891 if (!preamble_next(ndd, &nsindex, &free, &nslot))
896 for_each_dpa_resource(ndd, res)
906 slot = nd_label_alloc_slot(ndd);
909 dev_dbg(ndd->dev, "allocated: %d\n", slot);
911 nd_label = to_label(ndd, slot);
912 memset(nd_label, 0, sizeof_namespace_label(ndd));
913 nsl_set_uuid(ndd, nd_label, nspm->uuid);
914 nsl_set_name(ndd, nd_label, nspm->alt_name);
915 nsl_set_flags(ndd, nd_label, flags);
916 nsl_set_nlabel(ndd, nd_label, nd_region->ndr_mappings);
917 nsl_set_nrange(ndd, nd_label, 1);
918 nsl_set_position(ndd, nd_label, pos);
919 nsl_set_isetcookie(ndd, nd_label, cookie);
920 nsl_set_rawsize(ndd, nd_label, resource_size(res));
921 nsl_set_lbasize(ndd, nd_label, nspm->lbasize);
922 nsl_set_dpa(ndd, nd_label, res->start);
923 nsl_set_slot(ndd, nd_label, slot);
924 nsl_set_type_guid(ndd, nd_label, &nd_set->type_guid);
925 nsl_set_claim_class(ndd, nd_label, ndns->claim_class);
926 nsl_calculate_checksum(ndd, nd_label);
927 nd_dbg_dpa(nd_region, ndd, res, "\n");
930 offset = nd_label_offset(ndd, nd_label);
931 rc = nvdimm_set_config_data(ndd, offset, nd_label,
932 sizeof_namespace_label(ndd));
942 nsl_uuid_equal(ndd, label_ent->label, nspm->uuid))
947 rc = nd_label_write_index(ndd, ndd->ns_next,
958 to_slot(ndd, nd_label));
972 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
992 if (ndd->ns_current == -1 || ndd->ns_next == -1)
997 nsindex = to_namespace_index(ndd, 0);
998 memset(nsindex, 0, ndd->nsarea.config_size);
1000 int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1005 ndd->ns_next = 1;
1006 ndd->ns_current = 0;
1013 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1025 if (!preamble_next(ndd, &nsindex, &free, &nslot))
1035 if (!nsl_uuid_equal(ndd, nd_label, uuid))
1038 slot = to_slot(ndd, nd_label);
1039 nd_label_free_slot(ndd, slot);
1040 dev_dbg(ndd->dev, "free: %d\n", slot);
1048 dev_dbg(ndd->dev, "no more active labels\n");
1052 return nd_label_write_index(ndd, ndd->ns_next,
1063 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1074 for_each_dpa_resource(ndd, res)