Lines Matching refs:dev_dax

62 	struct dev_dax *dev_dax = to_dev_dax(dev);
64 if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM)
172 bool static_dev_dax(struct dev_dax *dev_dax)
174 return is_static(dev_dax->region);
178 static u64 dev_dax_size(struct dev_dax *dev_dax)
183 device_lock_assert(&dev_dax->dev);
185 for (i = 0; i < dev_dax->nr_range; i++)
186 size += range_len(&dev_dax->ranges[i].range);
194 struct dev_dax *dev_dax = to_dev_dax(dev);
195 struct dax_region *dax_region = dev_dax->region;
198 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0)
201 rc = dax_drv->probe(dev_dax);
219 struct dev_dax *dev_dax = to_dev_dax(dev);
222 dax_drv->remove(dev_dax);
371 struct dev_dax *dev_dax = devm_create_dev_dax(&data);
373 if (IS_ERR(dev_dax))
374 rc = PTR_ERR(dev_dax);
384 dax_region->seed = &dev_dax->dev;
385 dax_region->youngest = &dev_dax->dev;
395 void kill_dev_dax(struct dev_dax *dev_dax)
397 struct dax_device *dax_dev = dev_dax->dax_dev;
408 if (!static_dev_dax(dev_dax))
409 dev_dax->pgmap = NULL;
413 static void trim_dev_dax_range(struct dev_dax *dev_dax)
415 int i = dev_dax->nr_range - 1;
416 struct range *range = &dev_dax->ranges[i].range;
417 struct dax_region *dax_region = dev_dax->region;
420 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
425 if (--dev_dax->nr_range == 0) {
426 kfree(dev_dax->ranges);
427 dev_dax->ranges = NULL;
431 static void free_dev_dax_ranges(struct dev_dax *dev_dax)
433 while (dev_dax->nr_range)
434 trim_dev_dax_range(dev_dax);
439 struct dev_dax *dev_dax = to_dev_dax(dev);
443 kill_dev_dax(dev_dax);
445 free_dev_dax_ranges(dev_dax);
463 static int __free_dev_dax_id(struct dev_dax *dev_dax)
465 struct device *dev = &dev_dax->dev;
467 int rc = dev_dax->id;
471 if (!dev_dax->dyn_id || dev_dax->id < 0)
473 dax_region = dev_dax->region;
474 ida_free(&dax_region->ida, dev_dax->id);
476 dev_dax->id = -1;
480 static int free_dev_dax_id(struct dev_dax *dev_dax)
482 struct device *dev = &dev_dax->dev;
486 rc = __free_dev_dax_id(dev_dax);
491 static int alloc_dev_dax_id(struct dev_dax *dev_dax)
493 struct dax_region *dax_region = dev_dax->region;
500 dev_dax->dyn_id = true;
501 dev_dax->id = id;
509 struct dev_dax *dev_dax;
523 dev_dax = to_dev_dax(victim);
524 if (victim->driver || dev_dax_size(dev_dax))
533 if (dev_dax->id > 0) {
534 do_del = __free_dev_dax_id(dev_dax) >= 0;
653 struct dev_dax *dev_dax = to_dev_dax(parent);
655 ida_free(&dev_dax->ida, mapping->id);
664 struct dev_dax *dev_dax = to_dev_dax(dev->parent);
665 struct dax_region *dax_region = dev_dax->region;
671 dev_dax->ranges[mapping->range_id].mapping = NULL;
680 struct dev_dax *dev_dax = to_dev_dax(dev->parent);
681 struct dax_region *dax_region = dev_dax->region;
689 return &dev_dax->ranges[mapping->range_id];
695 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent);
696 struct dax_region *dax_region = dev_dax->region;
770 static int devm_register_dax_mapping(struct dev_dax *dev_dax, int range_id)
772 struct dax_region *dax_region = dev_dax->region;
779 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver,
787 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL);
792 dev_dax->ranges[range_id].mapping = mapping;
795 dev->parent = &dev_dax->dev;
812 static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
815 struct dax_region *dax_region = dev_dax->region;
817 struct device *dev = &dev_dax->dev;
827 if (dev_WARN_ONCE(dev, dev_dax->nr_range,
838 ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
839 * (dev_dax->nr_range + 1), GFP_KERNEL);
845 for (i = 0; i < dev_dax->nr_range; i++)
847 dev_dax->ranges = ranges;
848 ranges[dev_dax->nr_range++] = (struct dev_dax_range) {
856 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
859 * A dev_dax instance must be registered before mapping device
863 if (!device_is_registered(&dev_dax->dev))
866 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
868 trim_dev_dax_range(dev_dax);
873 static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
875 int last_range = dev_dax->nr_range - 1;
876 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range];
877 struct dax_region *dax_region = dev_dax->region;
880 struct device *dev = &dev_dax->dev;
907 struct dev_dax *dev_dax = to_dev_dax(dev);
911 size = dev_dax_size(dev_dax);
917 static bool alloc_is_aligned(struct dev_dax *dev_dax, resource_size_t size)
923 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align()));
926 static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
928 resource_size_t to_shrink = dev_dax_size(dev_dax) - size;
929 struct dax_region *dax_region = dev_dax->region;
930 struct device *dev = &dev_dax->dev;
933 for (i = dev_dax->nr_range - 1; i >= 0; i--) {
934 struct range *range = &dev_dax->ranges[i].range;
935 struct dax_mapping *mapping = dev_dax->ranges[i].mapping;
943 trim_dev_dax_range(dev_dax);
957 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1,
960 return adjust_dev_dax_range(dev_dax, adjust, range_len(range)
968 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
970 static bool adjust_ok(struct dev_dax *dev_dax, struct resource *res)
975 if (dev_dax->nr_range == 0)
977 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0)
979 last = &dev_dax->ranges[dev_dax->nr_range - 1];
982 for (i = 0; i < dev_dax->nr_range - 1; i++) {
983 struct dev_dax_range *dax_range = &dev_dax->ranges[i];
993 struct dev_dax *dev_dax, resource_size_t size)
996 resource_size_t dev_size = dev_dax_size(dev_dax);
998 struct device *dev = &dev_dax->dev;
1010 return dev_dax_shrink(dev_dax, size);
1013 if (dev_WARN_ONCE(dev, !alloc_is_aligned(dev_dax, to_alloc),
1025 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc);
1034 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc);
1050 if (adjust_ok(dev_dax, res)) {
1051 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc);
1054 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc);
1070 struct dev_dax *dev_dax = to_dev_dax(dev);
1071 struct dax_region *dax_region = dev_dax->region;
1077 if (!alloc_is_aligned(dev_dax, val)) {
1088 rc = dev_dax_resize(dax_region, dev_dax, val);
1129 struct dev_dax *dev_dax = to_dev_dax(dev);
1130 struct dax_region *dax_region = dev_dax->region;
1148 if (alloc_is_aligned(dev_dax, to_alloc))
1149 rc = alloc_dev_dax_range(dev_dax, r.start, to_alloc);
1160 struct dev_dax *dev_dax = to_dev_dax(dev);
1162 return sprintf(buf, "%d\n", dev_dax->align);
1165 static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
1167 struct device *dev = &dev_dax->dev;
1170 for (i = 0; i < dev_dax->nr_range; i++) {
1171 size_t len = range_len(&dev_dax->ranges[i].range);
1173 if (!alloc_is_aligned(dev_dax, len)) {
1175 __func__, dev_dax->align, i);
1186 struct dev_dax *dev_dax = to_dev_dax(dev);
1187 struct dax_region *dax_region = dev_dax->region;
1210 align_save = dev_dax->align;
1211 dev_dax->align = val;
1212 rc = dev_dax_validate_align(dev_dax);
1214 dev_dax->align = align_save;
1222 static int dev_dax_target_node(struct dev_dax *dev_dax)
1224 struct dax_region *dax_region = dev_dax->region;
1232 struct dev_dax *dev_dax = to_dev_dax(dev);
1234 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
1241 struct dev_dax *dev_dax = to_dev_dax(dev);
1242 struct dax_region *dax_region = dev_dax->region;
1245 if (dev_dax->nr_range < 1)
1248 start = dev_dax->ranges[0].range.start;
1275 struct dev_dax *dev_dax = to_dev_dax(dev);
1276 struct dax_region *dax_region = dev_dax->region;
1278 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
1313 struct dev_dax *dev_dax = to_dev_dax(dev);
1314 struct dax_device *dax_dev = dev_dax->dax_dev;
1317 free_dev_dax_id(dev_dax);
1318 kfree(dev_dax->pgmap);
1319 kfree(dev_dax);
1327 struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
1332 struct dev_dax *dev_dax;
1337 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
1338 if (!dev_dax)
1341 dev_dax->region = dax_region;
1349 dev_dax->id = data->id;
1357 rc = alloc_dev_dax_id(dev_dax);
1362 dev = &dev_dax->dev;
1364 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id);
1366 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size);
1374 dev_dax->pgmap = kmemdup(data->pgmap,
1376 if (!dev_dax->pgmap) {
1386 dax_dev = alloc_dax(dev_dax, NULL);
1398 dev_dax->dax_dev = dax_dev;
1399 dev_dax->target_node = dax_region->target_node;
1400 dev_dax->align = dax_region->align;
1401 ida_init(&dev_dax->ida);
1411 kill_dev_dax(dev_dax);
1421 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) {
1422 rc = devm_register_dax_mapping(dev_dax, 0);
1427 return dev_dax;
1430 kfree(dev_dax->pgmap);
1432 free_dev_dax_ranges(dev_dax);
1434 free_dev_dax_id(dev_dax);
1436 kfree(dev_dax);