Lines Matching refs:nvdimm_bus
58 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
60 return nvdimm_bus->nd_desc->module;
65 static void nvdimm_bus_probe_start(struct nvdimm_bus *nvdimm_bus)
67 nvdimm_bus_lock(&nvdimm_bus->dev);
68 nvdimm_bus->probe_active++;
69 nvdimm_bus_unlock(&nvdimm_bus->dev);
72 static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
74 nvdimm_bus_lock(&nvdimm_bus->dev);
75 if (--nvdimm_bus->probe_active == 0)
76 wake_up(&nvdimm_bus->wait);
77 nvdimm_bus_unlock(&nvdimm_bus->dev);
84 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
90 dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
93 nvdimm_bus_probe_start(nvdimm_bus);
101 nvdimm_bus_probe_end(nvdimm_bus);
103 dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
115 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
124 dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
132 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
140 dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
161 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
163 if (!nvdimm_bus)
203 static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
211 device_for_each_child(&nvdimm_bus->dev, &ctx,
215 static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
219 badrange_forget(&nvdimm_bus->badrange, phys, cleared);
222 nvdimm_clear_badblocks_regions(nvdimm_bus, phys, cleared);
228 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
236 if (!nvdimm_bus)
239 nd_desc = nvdimm_bus->nd_desc;
277 nvdimm_account_cleared_poison(nvdimm_bus, phys, clear_err.cleared);
296 struct nvdimm_bus *nvdimm_bus;
298 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
299 ida_simple_remove(&nd_ida, nvdimm_bus->id);
300 kfree(nvdimm_bus);
313 struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev)
326 struct nvdimm_bus *to_nvdimm_bus(struct device *dev)
328 struct nvdimm_bus *nvdimm_bus;
330 nvdimm_bus = container_of(dev, struct nvdimm_bus, dev);
332 return nvdimm_bus;
336 struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
342 struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
345 struct nvdimm_bus *nvdimm_bus;
348 nvdimm_bus = kzalloc(sizeof(*nvdimm_bus), GFP_KERNEL);
349 if (!nvdimm_bus)
351 INIT_LIST_HEAD(&nvdimm_bus->list);
352 INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
353 init_waitqueue_head(&nvdimm_bus->wait);
354 nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
355 if (nvdimm_bus->id < 0) {
356 kfree(nvdimm_bus);
359 mutex_init(&nvdimm_bus->reconfig_mutex);
360 badrange_init(&nvdimm_bus->badrange);
361 nvdimm_bus->nd_desc = nd_desc;
362 nvdimm_bus->dev.parent = parent;
363 nvdimm_bus->dev.type = &nvdimm_bus_dev_type;
364 nvdimm_bus->dev.groups = nd_desc->attr_groups;
365 nvdimm_bus->dev.bus = &nvdimm_bus_type;
366 nvdimm_bus->dev.of_node = nd_desc->of_node;
367 dev_set_name(&nvdimm_bus->dev, "ndbus%d", nvdimm_bus->id);
368 rc = device_register(&nvdimm_bus->dev);
370 dev_dbg(&nvdimm_bus->dev, "registration failed: %d\n", rc);
374 return nvdimm_bus;
376 put_device(&nvdimm_bus->dev);
381 void nvdimm_bus_unregister(struct nvdimm_bus *nvdimm_bus)
383 if (!nvdimm_bus)
385 device_unregister(&nvdimm_bus->dev);
432 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
435 list_del_init(&nvdimm_bus->list);
438 wait_event(nvdimm_bus->wait,
439 atomic_read(&nvdimm_bus->ioctl_active) == 0);
442 device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
444 spin_lock(&nvdimm_bus->badrange.lock);
445 free_badrange_list(&nvdimm_bus->badrange.list);
446 spin_unlock(&nvdimm_bus->badrange.lock);
448 nvdimm_bus_destroy_ndctl(nvdimm_bus);
455 struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
458 rc = nvdimm_bus_create_ndctl(nvdimm_bus);
463 list_add_tail(&nvdimm_bus->list, &nvdimm_bus_list);
467 dev_set_drvdata(dev, nvdimm_bus->nd_desc);
736 int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
738 dev_t devt = MKDEV(nvdimm_bus_major, nvdimm_bus->id);
741 dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
742 "ndctl%d", nvdimm_bus->id);
745 dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
746 nvdimm_bus->id, PTR_ERR(dev));
750 void nvdimm_bus_destroy_ndctl(struct nvdimm_bus *nvdimm_bus)
752 device_destroy(nd_class, MKDEV(nvdimm_bus_major, nvdimm_bus->id));
918 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
921 if (nvdimm_bus->probe_active == 0)
925 wait_event(nvdimm_bus->wait,
926 nvdimm_bus->probe_active == 0);
979 static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
982 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
994 return device_for_each_child(&nvdimm_bus->dev, data,
1001 wait_nvdimm_bus_probe_idle(&nvdimm_bus->dev);
1007 static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
1010 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
1013 struct device *dev = &nvdimm_bus->dev;
1163 rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
1174 nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
1213 struct nvdimm_bus *nvdimm_bus, *found = NULL;
1220 list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
1224 dev = device_find_child(&nvdimm_bus->dev,
1229 found = nvdimm_bus;
1230 } else if (nvdimm_bus->id == id) {
1231 found = nvdimm_bus;
1235 atomic_inc(&nvdimm_bus->ioctl_active);
1244 nvdimm_bus = found;
1245 rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
1249 if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
1250 wake_up(&nvdimm_bus->wait);