Lines Matching refs:nd_region
27 struct nd_region *nd_region = to_nd_region(dev->parent);
30 ida_simple_remove(&nd_region->ns_ida, nspm->id);
90 struct nd_region *nd_region = to_nd_region(dev->parent);
97 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
142 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
159 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
162 sprintf(name, "pmem%d%s", nd_region->id,
186 struct nd_region *nd_region = to_nd_region(dev->parent);
188 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
233 static int nd_namespace_label_update(struct nd_region *nd_region,
254 return nd_pmem_namespace_label_update(nd_region, nspm, size);
262 struct nd_region *nd_region = to_nd_region(dev->parent);
270 rc = nd_namespace_label_update(nd_region, dev);
294 static int scan_free(struct nd_region *nd_region,
314 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
323 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
332 * @nd_region: the set of dimms to reclaim @n bytes from
341 static int shrink_dpa_allocation(struct nd_region *nd_region,
346 for (i = 0; i < nd_region->ndr_mappings; i++) {
347 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
350 rc = scan_free(nd_region, nd_mapping, label_id, n);
359 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
371 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
378 * @nd_region: hosting region of the free space
392 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
400 align = nd_region->align / nd_region->ndr_mappings;
432 static resource_size_t scan_allocate(struct nd_region *nd_region,
468 space_valid(nd_region, ndd, label_id, NULL, next, exist,
479 space_valid(nd_region, ndd, label_id, res, next, exist,
490 space_valid(nd_region, ndd, label_id, res, next, exist,
549 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
570 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
574 static int merge_dpa(struct nd_region *nd_region,
595 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
608 struct nd_region *nd_region;
615 nd_region = to_nd_region(dev);
616 if (nd_region->ndr_mappings == 0)
621 for (i = 0; i < nd_region->ndr_mappings; i++) {
622 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
628 n = nd_pmem_available_dpa(nd_region, nd_mapping);
631 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
632 dev_WARN_ONCE(&nd_region->dev, rem,
655 * @nd_region: the set of dimms to allocate @n more bytes from
666 static int grow_dpa_allocation(struct nd_region *nd_region,
671 for (i = 0; i < nd_region->ndr_mappings; i++) {
672 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
676 rem = scan_allocate(nd_region, nd_mapping, label_id, rem);
677 dev_WARN_ONCE(&nd_region->dev, rem,
684 rc = merge_dpa(nd_region, nd_mapping, label_id);
692 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
704 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
720 * nd_region->ndr_mappings;
729 res->start = nd_region->ndr_start + offset;
746 struct nd_region *nd_region = to_nd_region(dev->parent);
771 if (nd_region->ndr_mappings == 0) {
776 div_u64_rem(val, nd_region->align, &remainder);
779 nd_region->align / SZ_1K);
784 for (i = 0; i < nd_region->ndr_mappings; i++) {
785 nd_mapping = &nd_region->mapping[i];
797 available = nd_region_allocatable_dpa(nd_region);
805 val = div_u64(val, nd_region->ndr_mappings);
806 allocated = div_u64(allocated, nd_region->ndr_mappings);
808 rc = shrink_dpa_allocation(nd_region, &label_id,
811 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
819 nd_namespace_pmem_set_resource(nd_region, nspm,
820 val * nd_region->ndr_mappings);
829 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
838 struct nd_region *nd_region = to_nd_region(dev->parent);
851 rc = nd_namespace_label_update(nd_region, dev);
903 struct nd_region *nd_region = to_nd_region(dev->parent);
905 for (i = 0; i < nd_region->ndr_mappings; i++) {
906 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
950 * @nd_region: parent region so we can updates all dimms in the set
955 static int namespace_update_uuid(struct nd_region *nd_region,
975 for (i = 0; i < nd_region->ndr_mappings; i++) {
976 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
991 for (i = 0; i < nd_region->ndr_mappings; i++) {
992 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1027 struct nd_region *nd_region = to_nd_region(dev->parent);
1047 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1049 rc = nd_namespace_label_update(nd_region, dev);
1101 struct nd_region *nd_region = to_nd_region(dev->parent);
1121 rc = nd_namespace_label_update(nd_region, dev);
1134 struct nd_region *nd_region = to_nd_region(dev->parent);
1152 for (i = 0; i < nd_region->ndr_mappings; i++) {
1153 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1170 struct nd_region *nd_region = to_nd_region(dev->parent);
1173 for (i = 0; i < nd_region->ndr_mappings; i++) {
1174 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1271 struct nd_region *nd_region = to_nd_region(dev->parent);
1279 rc = nd_namespace_label_update(nd_region, dev);
1525 static struct device **create_namespace_io(struct nd_region *nd_region)
1543 dev->parent = &nd_region->dev;
1545 res->name = dev_name(&nd_region->dev);
1547 res->start = nd_region->ndr_start;
1548 res->end = res->start + nd_region->ndr_size - 1;
1554 static bool has_uuid_at_pos(struct nd_region *nd_region, const uuid_t *uuid,
1560 for (i = 0; i < nd_region->ndr_mappings; i++) {
1561 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1562 struct nd_interleave_set *nd_set = nd_region->nd_set;
1590 if (!nsl_validate_nlabel(nd_region, ndd, nd_label))
1603 static int select_pmem_id(struct nd_region *nd_region, const uuid_t *pmem_id)
1610 for (i = 0; i < nd_region->ndr_mappings; i++) {
1611 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1644 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1658 * @nd_region: region with mappings to validate
1662 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1669 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1670 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1681 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1686 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1691 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1702 dev->parent = &nd_region->dev;
1704 res->name = dev_name(&nd_region->dev);
1707 for (i = 0; i < nd_region->ndr_mappings; i++) {
1709 if (has_uuid_at_pos(nd_region, &uuid, cookie, i))
1711 if (has_uuid_at_pos(nd_region, &uuid, altcookie, i))
1716 if (i < nd_region->ndr_mappings) {
1717 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
1721 * position (from 0 to nd_region->ndr_mappings - 1), or if we
1724 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
1736 rc = select_pmem_id(nd_region, &uuid);
1741 for (i = 0; i < nd_region->ndr_mappings; i++) {
1745 nd_mapping = &nd_region->mapping[i];
1774 nd_namespace_pmem_set_resource(nd_region, nspm, size);
1781 dev_dbg(&nd_region->dev, "invalid label(s)\n");
1784 dev_dbg(&nd_region->dev, "label not found\n");
1787 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
1793 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
1799 if (!is_memory(&nd_region->dev))
1808 dev->parent = &nd_region->dev;
1810 res->name = dev_name(&nd_region->dev);
1813 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1818 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
1819 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1826 void nd_region_create_ns_seed(struct nd_region *nd_region)
1828 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1830 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
1833 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
1839 if (!nd_region->ns_seed)
1840 dev_err(&nd_region->dev, "failed to create namespace\n");
1842 device_initialize(nd_region->ns_seed);
1843 lockdep_set_class(&nd_region->ns_seed->mutex,
1845 nd_device_register(nd_region->ns_seed);
1849 void nd_region_create_dax_seed(struct nd_region *nd_region)
1851 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1852 nd_region->dax_seed = nd_dax_create(nd_region);
1857 if (!nd_region->dax_seed)
1858 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1861 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1863 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1864 nd_region->pfn_seed = nd_pfn_create(nd_region);
1869 if (!nd_region->pfn_seed)
1870 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1873 void nd_region_create_btt_seed(struct nd_region *nd_region)
1875 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1876 nd_region->btt_seed = nd_btt_create(nd_region);
1881 if (!nd_region->btt_seed)
1882 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1885 static int add_namespace_resource(struct nd_region *nd_region,
1889 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1903 dev_err(&nd_region->dev,
1927 static struct device **scan_labels(struct nd_region *nd_region)
1932 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1949 i = add_namespace_resource(nd_region, nd_label, devs, count);
1961 dev = create_namespace_pmem(nd_region, nd_mapping, nd_label);
1978 dev_dbg(&nd_region->dev, "discovered %d namespace%s\n", count,
1996 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
1997 dev->parent = &nd_region->dev;
1999 } else if (is_memory(&nd_region->dev)) {
2001 for (i = 0; i < nd_region->ndr_mappings; i++) {
2006 nd_mapping = &nd_region->mapping[i];
2037 static struct device **create_namespaces(struct nd_region *nd_region)
2043 if (nd_region->ndr_mappings == 0)
2047 for (i = 0; i < nd_region->ndr_mappings; i++) {
2048 nd_mapping = &nd_region->mapping[i];
2052 devs = scan_labels(nd_region);
2054 for (i = 0; i < nd_region->ndr_mappings; i++) {
2055 int reverse = nd_region->ndr_mappings - 1 - i;
2057 nd_mapping = &nd_region->mapping[reverse];
2066 struct nd_region *nd_region = region;
2069 for (i = 0; i < nd_region->ndr_mappings; i++) {
2070 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2085 static int init_active_labels(struct nd_region *nd_region)
2089 for (i = 0; i < nd_region->ndr_mappings; i++) {
2090 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2108 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2141 if (i < nd_region->ndr_mappings)
2146 deactivate_labels(nd_region);
2150 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2151 nd_region);
2154 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2160 nvdimm_bus_lock(&nd_region->dev);
2161 rc = init_active_labels(nd_region);
2163 nvdimm_bus_unlock(&nd_region->dev);
2167 type = nd_region_to_nstype(nd_region);
2170 devs = create_namespace_io(nd_region);
2173 devs = create_namespaces(nd_region);
2178 nvdimm_bus_unlock(&nd_region->dev);
2191 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2199 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2205 nd_region->ns_seed = devs[0];