Lines Matching refs:dev_dax
139 static u64 dev_dax_size(struct dev_dax *dev_dax)
144 device_lock_assert(&dev_dax->dev);
146 for (i = 0; i < dev_dax->nr_range; i++)
147 size += range_len(&dev_dax->ranges[i].range);
155 struct dev_dax *dev_dax = to_dev_dax(dev);
156 struct dax_region *dax_region = dev_dax->region;
159 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
162 rc = dax_drv->probe(dev_dax);
180 struct dev_dax *dev_dax = to_dev_dax(dev);
182 return dax_drv->remove(dev_dax);
336 struct dev_dax *dev_dax = devm_create_dev_dax(&data);
338 if (IS_ERR(dev_dax))
339 rc = PTR_ERR(dev_dax);
349 dax_region->seed = &dev_dax->dev;
350 dax_region->youngest = &dev_dax->dev;
360 void kill_dev_dax(struct dev_dax *dev_dax)
362 struct dax_device *dax_dev = dev_dax->dax_dev;
370 static void trim_dev_dax_range(struct dev_dax *dev_dax)
372 int i = dev_dax->nr_range - 1;
373 struct range *range = &dev_dax->ranges[i].range;
374 struct dax_region *dax_region = dev_dax->region;
377 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
382 if (--dev_dax->nr_range == 0) {
383 kfree(dev_dax->ranges);
384 dev_dax->ranges = NULL;
388 static void free_dev_dax_ranges(struct dev_dax *dev_dax)
390 while (dev_dax->nr_range)
391 trim_dev_dax_range(dev_dax);
396 struct dev_dax *dev_dax = to_dev_dax(dev);
400 kill_dev_dax(dev_dax);
402 free_dev_dax_ranges(dev_dax);
421 static int __free_dev_dax_id(struct dev_dax *dev_dax)
423 struct device *dev = &dev_dax->dev;
425 int rc = dev_dax->id;
429 if (!dev_dax->dyn_id || dev_dax->id < 0)
431 dax_region = dev_dax->region;
432 ida_free(&dax_region->ida, dev_dax->id);
434 dev_dax->id = -1;
438 static int free_dev_dax_id(struct dev_dax *dev_dax)
440 struct device *dev = &dev_dax->dev;
444 rc = __free_dev_dax_id(dev_dax);
449 static int alloc_dev_dax_id(struct dev_dax *dev_dax)
451 struct dax_region *dax_region = dev_dax->region;
458 dev_dax->dyn_id = true;
459 dev_dax->id = id;
467 struct dev_dax *dev_dax;
481 dev_dax = to_dev_dax(victim);
482 if (victim->driver || dev_dax_size(dev_dax))
491 if (dev_dax->id > 0) {
492 do_del = __free_dev_dax_id(dev_dax) >= 0;
612 struct dev_dax *dev_dax = to_dev_dax(parent);
614 ida_free(&dev_dax->ida, mapping->id);
623 struct dev_dax *dev_dax = to_dev_dax(dev->parent);
624 struct dax_region *dax_region = dev_dax->region;
630 dev_dax->ranges[mapping->range_id].mapping = NULL;
640 struct dev_dax *dev_dax = to_dev_dax(dev->parent);
641 struct dax_region *dax_region = dev_dax->region;
649 return &dev_dax->ranges[mapping->range_id];
655 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
656 struct dax_region *dax_region = dev_dax->region;
730 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
732 struct dax_region *dax_region = dev_dax->region;
739 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
747 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
752 dev_dax->ranges[range_id].mapping = mapping;
755 dev->parent = &dev_dax->dev;
772 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
775 struct dax_region *dax_region = dev_dax->region;
777 struct device *dev = &dev_dax->dev;
787 if (dev_WARN_ONCE(dev, dev_dax->nr_range,
794 ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
795 * (dev_dax->nr_range + 1), GFP_KERNEL);
805 if (!dev_dax->nr_range) {
809 dev_dax->ranges = ranges;
813 for (i = 0; i < dev_dax->nr_range; i++)
815 dev_dax->ranges = ranges;
816 ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
824 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
827 * A dev_dax instance must be registered before mapping device
831 if (!device_is_registered(&dev_dax->dev))
834 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
836 trim_dev_dax_range(dev_dax);
841 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
843 int last_range = dev_dax->nr_range - 1;
844 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
845 struct dax_region *dax_region = dev_dax->region;
848 struct device *dev = &dev_dax->dev;
875 struct dev_dax *dev_dax = to_dev_dax(dev);
879 size = dev_dax_size(dev_dax);
885 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
891 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
894 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
896 resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
897 struct dax_region *dax_region = dev_dax->region;
898 struct device *dev = &dev_dax->dev;
901 for (i = dev_dax->nr_range - 1; i >= 0; i--) {
902 struct range *range = &dev_dax->ranges[i].range;
903 struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
911 trim_dev_dax_range(dev_dax);
925 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
928 return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
936 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
938 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
943 if (dev_dax->nr_range == 0)
945 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
947 last = &dev_dax->ranges[dev_dax->nr_range - 1];
950 for (i = 0; i < dev_dax->nr_range - 1; i++) {
951 struct dev_dax_range *dax_range = &dev_dax->ranges[i];
961 struct dev_dax *dev_dax, resource_size_t size)
964 resource_size_t dev_size = dev_dax_size(dev_dax);
966 struct device *dev = &dev_dax->dev;
978 return dev_dax_shrink(dev_dax, size);
981 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
993 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
1002 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
1018 if (adjust_ok(dev_dax, res)) {
1019 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
1022 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
1038 struct dev_dax *dev_dax = to_dev_dax(dev);
1039 struct dax_region *dax_region = dev_dax->region;
1045 if (!alloc_is_aligned(dev_dax, val)) {
1056 rc = dev_dax_resize(dax_region, dev_dax, val);
1097 struct dev_dax *dev_dax = to_dev_dax(dev);
1098 struct dax_region *dax_region = dev_dax->region;
1116 if (alloc_is_aligned(dev_dax, to_alloc))
1117 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
1128 struct dev_dax *dev_dax = to_dev_dax(dev);
1130 return sprintf(buf, "%d\n", dev_dax->align);
1133 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
1135 resource_size_t dev_size = dev_dax_size(dev_dax);
1136 struct device *dev = &dev_dax->dev;
1139 if (dev_size > 0 && !alloc_is_aligned(dev_dax, dev_size)) {
1141 __func__, dev_dax->align, &dev_size);
1145 for (i = 0; i < dev_dax->nr_range; i++) {
1146 size_t len = range_len(&dev_dax->ranges[i].range);
1148 if (!alloc_is_aligned(dev_dax, len)) {
1150 __func__, dev_dax->align, i);
1161 struct dev_dax *dev_dax = to_dev_dax(dev);
1162 struct dax_region *dax_region = dev_dax->region;
1185 align_save = dev_dax->align;
1186 dev_dax->align = val;
1187 rc = dev_dax_validate_align(dev_dax);
1189 dev_dax->align = align_save;
1197 static int dev_dax_target_node(struct dev_dax *dev_dax)
1199 struct dax_region *dax_region = dev_dax->region;
1207 struct dev_dax *dev_dax = to_dev_dax(dev);
1209 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
1216 struct dev_dax *dev_dax = to_dev_dax(dev);
1217 struct dax_region *dax_region = dev_dax->region;
1220 if (dev_dax->nr_range < 1)
1223 start = dev_dax->ranges[0].range.start;
1250 struct dev_dax *dev_dax = to_dev_dax(dev);
1251 struct dax_region *dax_region = dev_dax->region;
1253 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
1288 struct dev_dax *dev_dax = to_dev_dax(dev);
1289 struct dax_device *dax_dev = dev_dax->dax_dev;
1292 free_dev_dax_id(dev_dax);
1293 kfree(dev_dax->pgmap);
1294 kfree(dev_dax);
1302 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
1307 struct dev_dax *dev_dax;
1312 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
1313 if (!dev_dax)
1316 dev_dax->region = dax_region;
1324 dev_dax->id = data->id;
1332 rc = alloc_dev_dax_id(dev_dax);
1337 dev = &dev_dax->dev;
1339 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
1341 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
1349 dev_dax->pgmap = kmemdup(data->pgmap,
1351 if (!dev_dax->pgmap) {
1361 dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
1370 dev_dax->dax_dev = dax_dev;
1371 dev_dax->target_node = dax_region->target_node;
1372 dev_dax->align = dax_region->align;
1373 ida_init(&dev_dax->ida);
1386 kill_dev_dax(dev_dax);
1396 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
1397 rc = devm_register_dax_mapping(dev_dax, 0);
1402 return dev_dax;
1405 kfree(dev_dax->pgmap);
1407 free_dev_dax_ranges(dev_dax);
1409 free_dev_dax_id(dev_dax);
1411 kfree(dev_dax);