Lines Matching refs:nd_region

26 	struct nd_region *nd_region = to_nd_region(dev->parent);
29 ida_simple_remove(&nd_region->ns_ida, nspm->id);
38 struct nd_region *nd_region = to_nd_region(dev->parent);
41 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
107 struct nd_region *nd_region = to_nd_region(dev->parent);
114 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
159 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
176 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
179 sprintf(name, "pmem%d%s", nd_region->id,
185 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
218 struct nd_region *nd_region = to_nd_region(dev->parent);
220 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
271 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
272 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
289 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
290 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
346 static int nd_namespace_label_update(struct nd_region *nd_region,
367 return nd_pmem_namespace_label_update(nd_region, nspm, size);
377 return nd_blk_namespace_label_update(nd_region, nsblk, size);
385 struct nd_region *nd_region = to_nd_region(dev->parent);
393 rc = nd_namespace_label_update(nd_region, dev);
421 static int scan_free(struct nd_region *nd_region,
443 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
461 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
470 * @nd_region: the set of dimms to reclaim @n bytes from
479 static int shrink_dpa_allocation(struct nd_region *nd_region,
484 for (i = 0; i < nd_region->ndr_mappings; i++) {
485 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
488 rc = scan_free(nd_region, nd_mapping, label_id, n);
497 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
517 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
524 * @nd_region: hosting region of the free space
538 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
547 align = nd_region->align / nd_region->ndr_mappings;
558 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
566 WARN_ON(!is_nd_blk(&nd_region->dev));
567 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
594 static resource_size_t scan_allocate(struct nd_region *nd_region,
631 space_valid(nd_region, ndd, label_id, NULL, next, exist,
642 space_valid(nd_region, ndd, label_id, res, next, exist,
653 space_valid(nd_region, ndd, label_id, res, next, exist,
716 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
742 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
746 static int merge_dpa(struct nd_region *nd_region,
767 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
780 struct nd_region *nd_region;
787 nd_region = to_nd_region(dev);
788 if (nd_region->ndr_mappings == 0)
793 for (i = 0; i < nd_region->ndr_mappings; i++) {
794 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
800 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
803 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
804 dev_WARN_ONCE(&nd_region->dev, rem,
840 * @nd_region: the set of dimms to allocate @n more bytes from
851 static int grow_dpa_allocation(struct nd_region *nd_region,
854 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
858 for (i = 0; i < nd_region->ndr_mappings; i++) {
859 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
875 rem = scan_allocate(nd_region, nd_mapping,
885 dev_WARN_ONCE(&nd_region->dev, rem,
892 rc = merge_dpa(nd_region, nd_mapping, label_id);
900 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
912 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
928 * nd_region->ndr_mappings;
937 res->start = nd_region->ndr_start + offset;
953 struct nd_region *nd_region = to_nd_region(dev->parent);
984 if (nd_region->ndr_mappings == 0) {
989 div_u64_rem(val, nd_region->align, &remainder);
992 nd_region->align / SZ_1K);
997 for (i = 0; i < nd_region->ndr_mappings; i++) {
998 nd_mapping = &nd_region->mapping[i];
1010 available = nd_region_allocatable_dpa(nd_region);
1018 val = div_u64(val, nd_region->ndr_mappings);
1019 allocated = div_u64(allocated, nd_region->ndr_mappings);
1021 rc = shrink_dpa_allocation(nd_region, &label_id,
1024 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1032 nd_namespace_pmem_set_resource(nd_region, nspm,
1033 val * nd_region->ndr_mappings);
1042 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1051 struct nd_region *nd_region = to_nd_region(dev->parent);
1065 rc = nd_namespace_label_update(nd_region, dev);
1127 struct nd_region *nd_region = to_nd_region(dev->parent);
1129 for (i = 0; i < nd_region->ndr_mappings; i++) {
1130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1178 * @nd_region: parent region so we can updates all dimms in the set
1183 static int namespace_update_uuid(struct nd_region *nd_region,
1203 for (i = 0; i < nd_region->ndr_mappings; i++) {
1204 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1219 for (i = 0; i < nd_region->ndr_mappings; i++) {
1220 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1253 struct nd_region *nd_region = to_nd_region(dev->parent);
1277 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1279 rc = nd_namespace_label_update(nd_region, dev);
1341 struct nd_region *nd_region = to_nd_region(dev->parent);
1366 rc = nd_namespace_label_update(nd_region, dev);
1379 struct nd_region *nd_region = to_nd_region(dev->parent);
1402 for (i = 0; i < nd_region->ndr_mappings; i++) {
1403 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1420 struct nd_region *nd_region = to_nd_region(dev->parent);
1423 for (i = 0; i < nd_region->ndr_mappings; i++) {
1424 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1521 struct nd_region *nd_region = to_nd_region(dev->parent);
1529 rc = nd_namespace_label_update(nd_region, dev);
1805 static struct device **create_namespace_io(struct nd_region *nd_region)
1823 dev->parent = &nd_region->dev;
1825 res->name = dev_name(&nd_region->dev);
1827 res->start = nd_region->ndr_start;
1828 res->end = res->start + nd_region->ndr_size - 1;
1834 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1840 for (i = 0; i < nd_region->ndr_mappings; i++) {
1841 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1842 struct nd_interleave_set *nd_set = nd_region->nd_set;
1878 if (nlabel != nd_region->ndr_mappings)
1891 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1898 for (i = 0; i < nd_region->ndr_mappings; i++) {
1899 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1932 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1945 * @nd_region: region with mappings to validate
1949 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1953 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1954 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1965 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1970 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1975 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1986 dev->parent = &nd_region->dev;
1988 res->name = dev_name(&nd_region->dev);
1991 for (i = 0; i < nd_region->ndr_mappings; i++) {
1992 if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1994 if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1999 if (i < nd_region->ndr_mappings) {
2000 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
2004 * position (from 0 to nd_region->ndr_mappings - 1), or if we
2007 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
2021 rc = select_pmem_id(nd_region, nd_label->uuid);
2026 for (i = 0; i < nd_region->ndr_mappings; i++) {
2030 nd_mapping = &nd_region->mapping[i];
2061 nd_namespace_pmem_set_resource(nd_region, nspm, size);
2068 dev_dbg(&nd_region->dev, "invalid label(s)\n");
2071 dev_dbg(&nd_region->dev, "label not found\n");
2074 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2080 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2103 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2108 if (!is_nd_blk(&nd_region->dev))
2117 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2122 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2123 dev->parent = &nd_region->dev;
2128 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2134 if (!is_memory(&nd_region->dev))
2143 dev->parent = &nd_region->dev;
2145 res->name = dev_name(&nd_region->dev);
2148 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2153 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2154 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2159 void nd_region_create_ns_seed(struct nd_region *nd_region)
2161 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2163 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2166 if (is_nd_blk(&nd_region->dev))
2167 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2169 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2175 if (!nd_region->ns_seed)
2176 dev_err(&nd_region->dev, "failed to create %s namespace\n",
2177 is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2179 nd_device_register(nd_region->ns_seed);
2182 void nd_region_create_dax_seed(struct nd_region *nd_region)
2184 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2185 nd_region->dax_seed = nd_dax_create(nd_region);
2190 if (!nd_region->dax_seed)
2191 dev_err(&nd_region->dev, "failed to create dax namespace\n");
2194 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2196 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2197 nd_region->pfn_seed = nd_pfn_create(nd_region);
2202 if (!nd_region->pfn_seed)
2203 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2206 void nd_region_create_btt_seed(struct nd_region *nd_region)
2208 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2209 nd_region->btt_seed = nd_btt_create(nd_region);
2214 if (!nd_region->btt_seed)
2215 dev_err(&nd_region->dev, "failed to create btt namespace\n");
2218 static int add_namespace_resource(struct nd_region *nd_region,
2222 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2238 res = nsblk_add_resource(nd_region, ndd,
2243 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2245 dev_err(&nd_region->dev,
2256 static struct device *create_namespace_blk(struct nd_region *nd_region,
2260 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2261 struct nd_interleave_set *nd_set = nd_region->nd_set;
2289 dev->parent = &nd_region->dev;
2306 res = nsblk_add_resource(nd_region, ndd, nsblk,
2310 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2342 static struct device **scan_labels(struct nd_region *nd_region)
2347 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2359 if (is_nd_blk(&nd_region->dev)
2370 i = add_namespace_resource(nd_region, nd_label, devs, count);
2382 if (is_nd_blk(&nd_region->dev))
2383 dev = create_namespace_blk(nd_region, nd_label, count);
2389 dev = create_namespace_pmem(nd_region, nsindex, nd_label);
2408 dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2409 count, is_nd_blk(&nd_region->dev)
2419 if (is_nd_blk(&nd_region->dev)) {
2435 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2437 dev->parent = &nd_region->dev;
2439 } else if (is_memory(&nd_region->dev)) {
2441 for (i = 0; i < nd_region->ndr_mappings; i++) {
2446 nd_mapping = &nd_region->mapping[i];
2471 if (is_nd_blk(&nd_region->dev))
2480 static struct device **create_namespaces(struct nd_region *nd_region)
2486 if (nd_region->ndr_mappings == 0)
2490 for (i = 0; i < nd_region->ndr_mappings; i++) {
2491 nd_mapping = &nd_region->mapping[i];
2495 devs = scan_labels(nd_region);
2497 for (i = 0; i < nd_region->ndr_mappings; i++) {
2498 int reverse = nd_region->ndr_mappings - 1 - i;
2500 nd_mapping = &nd_region->mapping[reverse];
2509 struct nd_region *nd_region = region;
2512 for (i = 0; i < nd_region->ndr_mappings; i++) {
2513 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2528 static int init_active_labels(struct nd_region *nd_region)
2532 for (i = 0; i < nd_region->ndr_mappings; i++) {
2533 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2551 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2590 if (i < nd_region->ndr_mappings)
2595 deactivate_labels(nd_region);
2599 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2600 nd_region);
2603 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2609 nvdimm_bus_lock(&nd_region->dev);
2610 rc = init_active_labels(nd_region);
2612 nvdimm_bus_unlock(&nd_region->dev);
2616 type = nd_region_to_nstype(nd_region);
2619 devs = create_namespace_io(nd_region);
2623 devs = create_namespaces(nd_region);
2628 nvdimm_bus_unlock(&nd_region->dev);
2641 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2648 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2656 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2660 nd_region->ns_seed = devs[0];