Lines Matching refs:nd_mapping
295 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
298 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
347 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
350 rc = scan_free(nd_region, nd_mapping, label_id, n);
359 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
362 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
367 res = nvdimm_allocate_dpa(ndd, label_id, nd_mapping->start, n);
433 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
436 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
437 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
446 valid.start = nd_mapping->start;
458 /* ignore resources outside this nd_mapping */
461 if (res->end < nd_mapping->start)
465 if (!first++ && res->start > nd_mapping->start) {
466 valid.start = nd_mapping->start;
570 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
575 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
577 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
622 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
625 if (nd_mapping->nvdimm != nvdimm)
628 n = nd_pmem_available_dpa(nd_region, nd_mapping);
631 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
643 struct nd_mapping *nd_mapping)
645 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
672 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
676 rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
684 rc = merge_dpa(nd_region, nd_mapping, label_id);
704 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
705 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
719 offset = (res->start - nd_mapping->start)
748 struct nd_mapping *nd_mapping;
785 nd_mapping = &nd_region->mapping[i];
786 ndd = to_ndd(nd_mapping);
906 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
907 struct nvdimm *nvdimm = nd_mapping->nvdimm;
976 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
985 if (list_empty(&nd_mapping->labels))
992 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
993 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1002 mutex_lock(&nd_mapping->lock);
1003 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1016 mutex_unlock(&nd_mapping->lock);
1153 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1154 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1174 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1175 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1561 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1563 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1567 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1611 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1612 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1617 lockdep_assert_held(&nd_mapping->lock);
1618 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1636 hw_start = nd_mapping->start;
1637 hw_end = hw_start + nd_mapping->size;
1651 list_move(&label_ent->list, &nd_mapping->labels);
1663 struct nd_mapping *nd_mapping,
1666 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1745 nd_mapping = &nd_region->mapping[i];
1746 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1755 ndd = to_ndd(nd_mapping);
1889 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1890 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1932 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1933 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1934 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
1937 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1945 if (nsl_get_dpa(ndd, nd_label) < nd_mapping->start ||
1961 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1985 nd_mapping_free_labels(nd_mapping);
2006 nd_mapping = &nd_region->mapping[i];
2007 if (list_empty(&nd_mapping->labels)) {
2013 list_for_each_safe(l, e, &nd_mapping->labels) {
2018 nd_mapping_free_labels(nd_mapping);
2019 list_splice_init(&list, &nd_mapping->labels);
2039 struct nd_mapping *nd_mapping;
2048 nd_mapping = &nd_region->mapping[i];
2049 mutex_lock_nested(&nd_mapping->lock, i);
2057 nd_mapping = &nd_region->mapping[reverse];
2058 mutex_unlock(&nd_mapping->lock);
2070 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2071 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2072 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2074 mutex_lock(&nd_mapping->lock);
2075 nd_mapping_free_labels(nd_mapping);
2076 mutex_unlock(&nd_mapping->lock);
2079 nd_mapping->ndd = NULL;
2090 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2091 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2092 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2109 dev_name(&nd_mapping->nvdimm->dev),
2115 nd_mapping->ndd = ndd;
2132 mutex_lock(&nd_mapping->lock);
2133 list_add_tail(&label_ent->list, &nd_mapping->labels);
2134 mutex_unlock(&nd_mapping->lock);