Lines Matching refs:nd_region

63 static int nd_region_invalidate_memregion(struct nd_region *nd_region)
67 for (i = 0; i < nd_region->ndr_mappings; i++) {
68 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
83 &nd_region->dev,
87 dev_err(&nd_region->dev,
95 for (i = 0; i < nd_region->ndr_mappings; i++) {
96 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
105 int nd_region_activate(struct nd_region *nd_region)
109 struct device *dev = &nd_region->dev;
112 nvdimm_bus_lock(&nd_region->dev);
113 for (i = 0; i < nd_region->ndr_mappings; i++) {
114 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
118 nvdimm_bus_unlock(&nd_region->dev);
129 nvdimm_bus_unlock(&nd_region->dev);
131 rc = nd_region_invalidate_memregion(nd_region);
144 for (i = 0; i < nd_region->ndr_mappings; i++) {
145 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
147 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
157 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
162 for (j = i + 1; j < nd_region->ndr_mappings; j++)
173 struct nd_region *nd_region = to_nd_region(dev);
176 for (i = 0; i < nd_region->ndr_mappings; i++) {
177 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
182 free_percpu(nd_region->lane);
183 if (!test_bit(ND_REGION_CXL, &nd_region->flags))
184 memregion_free(nd_region->id);
185 kfree(nd_region);
188 struct nd_region *to_nd_region(struct device *dev)
190 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
193 return nd_region;
197 struct device *nd_region_dev(struct nd_region *nd_region)
199 if (!nd_region)
201 return &nd_region->dev;
205 void *nd_region_provider_data(struct nd_region *nd_region)
207 return nd_region->provider_data;
213 * @nd_region: region-device to interrogate
219 int nd_region_to_nstype(struct nd_region *nd_region)
221 if (is_memory(&nd_region->dev)) {
224 for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
225 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
241 static unsigned long long region_size(struct nd_region *nd_region)
243 if (is_memory(&nd_region->dev)) {
244 return nd_region->ndr_size;
245 } else if (nd_region->ndr_mappings == 1) {
246 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
257 struct nd_region *nd_region = to_nd_region(dev);
259 return sprintf(buf, "%llu\n", region_size(nd_region));
266 struct nd_region *nd_region = to_nd_region(dev);
272 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
280 struct nd_region *nd_region = to_nd_region(dev);
286 rc = nvdimm_flush(nd_region, NULL);
297 struct nd_region *nd_region = to_nd_region(dev);
299 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
306 struct nd_region *nd_region = to_nd_region(dev);
308 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
315 struct nd_region *nd_region = to_nd_region(dev);
316 struct nd_interleave_set *nd_set = nd_region->nd_set;
333 if (nd_region->ndr_mappings) {
334 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
342 nd_region_interleave_set_cookie(nd_region,
355 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
360 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
363 for (i = 0; i < nd_region->ndr_mappings; i++) {
364 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
371 available += nd_pmem_available_dpa(nd_region, nd_mapping);
377 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
382 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
383 for (i = 0; i < nd_region->ndr_mappings; i++) {
384 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
387 nd_region, nd_mapping));
389 return avail * nd_region->ndr_mappings;
395 struct nd_region *nd_region = to_nd_region(dev);
407 available = nd_region_available_dpa(nd_region);
418 struct nd_region *nd_region = to_nd_region(dev);
424 available = nd_region_allocatable_dpa(nd_region);
452 struct nd_region *nd_region = to_nd_region(dev);
456 if (nd_region->ns_seed)
457 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
468 struct nd_region *nd_region = to_nd_region(dev);
472 if (nd_region->btt_seed)
473 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
485 struct nd_region *nd_region = to_nd_region(dev);
489 if (nd_region->pfn_seed)
490 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
502 struct nd_region *nd_region = to_nd_region(dev);
506 if (nd_region->dax_seed)
507 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
519 struct nd_region *nd_region = to_nd_region(dev);
521 return sprintf(buf, "%d\n", nd_region->ro);
535 struct nd_region *nd_region = to_nd_region(dev);
540 nd_region->ro = ro;
549 struct nd_region *nd_region = to_nd_region(dev);
551 return sprintf(buf, "%#lx\n", nd_region->align);
557 struct nd_region *nd_region = to_nd_region(dev);
573 mappings = max_t(u32, 1, nd_region->ndr_mappings);
576 || val > region_size(nd_region) || remainder)
585 nd_region->align = val;
595 struct nd_region *nd_region = to_nd_region(dev);
600 rc = badblocks_show(&nd_region->bb, buf, 0);
612 struct nd_region *nd_region = to_nd_region(dev);
614 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
621 struct nd_region *nd_region = to_nd_region(dev);
623 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
625 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
656 struct nd_region *nd_region = to_nd_region(dev);
657 struct nd_interleave_set *nd_set = nd_region->nd_set;
658 int type = nd_region_to_nstype(nd_region);
673 int has_flush = nvdimm_has_flush(nd_region);
684 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
708 struct nd_region *nd_region = to_nd_region(dev);
712 if (n >= nd_region->ndr_mappings)
714 nd_mapping = &nd_region->mapping[n];
770 struct nd_region *nd_region = to_nd_region(dev);
772 if (n < nd_region->ndr_mappings)
853 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
856 struct nd_interleave_set *nd_set = nd_region->nd_set;
867 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
869 struct nd_interleave_set *nd_set = nd_region->nd_set;
891 void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev)
894 if (nd_region->ns_seed == dev) {
895 nd_region_create_ns_seed(nd_region);
899 if (nd_region->btt_seed == dev)
900 nd_region_create_btt_seed(nd_region);
901 if (nd_region->ns_seed == &nd_btt->ndns->dev)
902 nd_region_create_ns_seed(nd_region);
906 if (nd_region->pfn_seed == dev)
907 nd_region_create_pfn_seed(nd_region);
908 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
909 nd_region_create_ns_seed(nd_region);
913 if (nd_region->dax_seed == dev)
914 nd_region_create_dax_seed(nd_region);
915 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
916 nd_region_create_ns_seed(nd_region);
923 * @nd_region: region id and number of lanes possible
938 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
944 if (nd_region->num_lanes < nr_cpu_ids) {
947 lane = cpu % nd_region->num_lanes;
948 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
949 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
959 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
961 if (nd_region->num_lanes < nr_cpu_ids) {
965 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
966 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
980 static unsigned long default_align(struct nd_region *nd_region)
987 if (nd_region->ndr_size < MEMREMAP_COMPAT_ALIGN_MAX)
990 mappings = max_t(u16, 1, nd_region->ndr_mappings);
1000 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1004 struct nd_region *nd_region;
1025 nd_region =
1026 kzalloc(struct_size(nd_region, mapping, ndr_desc->num_mappings),
1029 if (!nd_region)
1033 nd_region->id = ndr_desc->memregion;
1035 nd_region->id = memregion_alloc(GFP_KERNEL);
1036 if (nd_region->id < 0)
1040 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1041 if (!nd_region->lane)
1047 ndl = per_cpu_ptr(nd_region->lane, i);
1056 nd_region->mapping[i].nvdimm = nvdimm;
1057 nd_region->mapping[i].start = mapping->start;
1058 nd_region->mapping[i].size = mapping->size;
1059 nd_region->mapping[i].position = mapping->position;
1060 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1061 mutex_init(&nd_region->mapping[i].lock);
1065 nd_region->ndr_mappings = ndr_desc->num_mappings;
1066 nd_region->provider_data = ndr_desc->provider_data;
1067 nd_region->nd_set = ndr_desc->nd_set;
1068 nd_region->num_lanes = ndr_desc->num_lanes;
1069 nd_region->flags = ndr_desc->flags;
1070 nd_region->ro = ro;
1071 nd_region->numa_node = ndr_desc->numa_node;
1072 nd_region->target_node = ndr_desc->target_node;
1073 ida_init(&nd_region->ns_ida);
1074 ida_init(&nd_region->btt_ida);
1075 ida_init(&nd_region->pfn_ida);
1076 ida_init(&nd_region->dax_ida);
1077 dev = &nd_region->dev;
1078 dev_set_name(dev, "region%d", nd_region->id);
1083 nd_region->ndr_size = resource_size(ndr_desc->res);
1084 nd_region->ndr_start = ndr_desc->res->start;
1085 nd_region->align = default_align(nd_region);
1087 nd_region->flush = ndr_desc->flush;
1089 nd_region->flush = NULL;
1095 return nd_region;
1099 memregion_free(nd_region->id);
1101 kfree(nd_region);
1105 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1114 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1123 void nvdimm_region_delete(struct nd_region *nd_region)
1125 if (nd_region)
1126 nd_device_unregister(&nd_region->dev, ND_SYNC);
1130 int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
1134 if (!nd_region->flush)
1135 rc = generic_nvdimm_flush(nd_region);
1137 if (nd_region->flush(nd_region, bio))
1145 * @nd_region: interleaved pmem region
1147 int generic_nvdimm_flush(struct nd_region *nd_region)
1149 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1167 for (i = 0; i < nd_region->ndr_mappings; i++)
1178 * @nd_region: interleaved pmem region
1184 int nvdimm_has_flush(struct nd_region *nd_region)
1189 if (nd_region->ndr_mappings == 0
1194 if (test_bit(ND_REGION_ASYNC, &nd_region->flags) && nd_region->flush)
1198 for (i = 0; i < nd_region->ndr_mappings; i++) {
1199 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1215 int nvdimm_has_cache(struct nd_region *nd_region)
1217 return is_nd_pmem(&nd_region->dev) &&
1218 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1222 bool is_nvdimm_sync(struct nd_region *nd_region)
1224 if (is_nd_volatile(&nd_region->dev))
1227 return is_nd_pmem(&nd_region->dev) &&
1228 !test_bit(ND_REGION_ASYNC, &nd_region->flags);
1233 struct nd_region *nd_region;
1239 struct nd_region *nd_region;
1246 nd_region = to_nd_region(dev);
1247 if (nd_region == ctx->nd_region)
1251 region_start = nd_region->ndr_start;
1252 region_end = region_start + nd_region->ndr_size;
1260 int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
1263 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
1265 .nd_region = nd_region,