Lines Matching refs:nbd

14 #define pr_fmt(fmt) "nbd: " fmt
45 #include <linux/nbd.h>
46 #include <linux/nbd-netlink.h>
50 #include <trace/events/nbd.h>
69 struct nbd_device *nbd;
131 pid_t pid; /* pid of nbd-client, if attached */
145 struct nbd_device *nbd;
159 #define nbd_name(nbd) ((nbd)->disk->disk_name)
167 static int nbd_dev_dbg_init(struct nbd_device *nbd);
168 static void nbd_dev_dbg_close(struct nbd_device *nbd);
169 static void nbd_config_put(struct nbd_device *nbd);
173 static void nbd_disconnect_and_put(struct nbd_device *nbd);
175 static inline struct device *nbd_to_dev(struct nbd_device *nbd)
177 return disk_to_dev(nbd->disk);
225 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
227 return sprintf(buf, "%d\n", nbd->pid);
239 struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
241 return sprintf(buf, "%s\n", nbd->backend ?: "");
249 static void nbd_dev_remove(struct nbd_device *nbd)
251 struct gendisk *disk = nbd->disk;
254 blk_mq_free_tag_set(&nbd->tag_set);
261 idr_remove(&nbd_index_idr, nbd->index);
263 destroy_workqueue(nbd->recv_workq);
272 static void nbd_put(struct nbd_device *nbd)
274 if (!refcount_dec_and_test(&nbd->refs))
278 if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
279 queue_work(nbd_del_wq, &nbd->remove_work);
281 nbd_dev_remove(nbd);
290 static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
293 if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
298 args->index = nbd->index;
304 if (atomic_dec_return(&nbd->config->live_connections) == 0) {
306 &nbd->config->runtime_flags)) {
308 &nbd->config->runtime_flags);
309 dev_info(nbd_to_dev(nbd),
319 static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
331 nbd->config->bytesize = bytesize;
332 nbd->config->blksize_bits = __ffs(blksize);
334 if (!nbd->pid)
337 if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
338 nbd->disk->queue->limits.discard_granularity = blksize;
339 blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
341 blk_queue_logical_block_size(nbd->disk->queue, blksize);
342 blk_queue_physical_block_size(nbd->disk->queue, blksize);
345 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
346 if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
347 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
355 dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
364 static void sock_shutdown(struct nbd_device *nbd)
366 struct nbd_config *config = nbd->config;
377 nbd_mark_nsock_dead(nbd, nsock, 0);
380 dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
399 static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
401 if (refcount_inc_not_zero(&nbd->config_refs)) {
403 * Add smp_mb__after_atomic to ensure that reading nbd->config_refs
404 * and reading nbd->config is ordered. The pair is the barrier in
405 * nbd_alloc_and_init_config(), avoid nbd->config_refs is set
406 * before nbd->config.
409 return nbd->config;
418 struct nbd_device *nbd = cmd->nbd;
429 config = nbd_get_config_unlocked(nbd);
438 (config->num_connections == 1 && nbd->tag_set.timeout)) {
439 dev_err_ratelimited(nbd_to_dev(nbd),
461 nbd_mark_nsock_dead(nbd, nsock, 1);
466 nbd_config_put(nbd);
471 if (!nbd->tag_set.timeout) {
478 dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
488 nbd_config_put(nbd);
493 nbd_config_put(nbd);
497 dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
502 sock_shutdown(nbd);
503 nbd_config_put(nbd);
509 static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
517 dev_err_ratelimited(disk_to_dev(nbd->disk),
554 static int sock_xmit(struct nbd_device *nbd, int index, int send,
557 struct nbd_config *config = nbd->config;
560 return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
573 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
576 struct nbd_config *config = nbd->config;
597 dev_err_ratelimited(disk_to_dev(nbd->disk),
633 trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
635 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
638 result = sock_xmit(nbd, index, 1, &from,
655 dev_err_ratelimited(disk_to_dev(nbd->disk),
673 dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
684 result = sock_xmit(nbd, index, 1, &from, flags, &sent);
696 dev_err(disk_to_dev(nbd->disk),
719 static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
728 result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
730 if (!nbd_disconnected(nbd->config))
731 dev_err(disk_to_dev(nbd->disk),
737 dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
746 static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
760 if (hwq < nbd->tag_set.nr_hw_queues)
761 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
764 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
773 dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
779 dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
785 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
791 dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
797 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
803 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
809 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
817 result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
819 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
827 if (nbd_disconnected(nbd->config)) {
834 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
849 struct nbd_device *nbd = args->nbd;
850 struct nbd_config *config = nbd->config;
851 struct request_queue *q = nbd->disk->queue;
859 if (nbd_read_reply(nbd, nsock->sock, &reply))
869 dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
874 cmd = nbd_handle_reply(nbd, args->index, &reply);
895 nbd_mark_nsock_dead(nbd, nsock, 1);
898 nbd_config_put(nbd);
924 static void nbd_clear_que(struct nbd_device *nbd)
926 blk_mq_quiesce_queue(nbd->disk->queue);
927 blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
928 blk_mq_unquiesce_queue(nbd->disk->queue);
929 dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
932 static int find_fallback(struct nbd_device *nbd, int index)
934 struct nbd_config *config = nbd->config;
943 dev_err_ratelimited(disk_to_dev(nbd->disk),
966 dev_err_ratelimited(disk_to_dev(nbd->disk),
975 static int wait_for_reconnect(struct nbd_device *nbd)
977 struct nbd_config *config = nbd->config;
994 struct nbd_device *nbd = cmd->nbd;
999 config = nbd_get_config_unlocked(nbd);
1001 dev_err_ratelimited(disk_to_dev(nbd->disk),
1007 dev_err_ratelimited(disk_to_dev(nbd->disk),
1009 nbd_config_put(nbd);
1018 index = find_fallback(nbd, index);
1021 if (wait_for_reconnect(nbd)) {
1031 sock_shutdown(nbd);
1032 nbd_config_put(nbd);
1053 ret = nbd_send_cmd(nbd, cmd, index);
1061 dev_err_ratelimited(disk_to_dev(nbd->disk),
1063 nbd_mark_nsock_dead(nbd, nsock, 1);
1069 nbd_config_put(nbd);
1106 static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1117 dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1126 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1129 struct nbd_config *config = nbd->config;
1138 sock = nbd_get_socket(nbd, arg, &err);
1146 blk_mq_freeze_queue(nbd->disk->queue);
1148 if (!netlink && !nbd->task_setup &&
1150 nbd->task_setup = current;
1153 (nbd->task_setup != current ||
1155 dev_err(disk_to_dev(nbd->disk),
1186 blk_mq_unfreeze_queue(nbd->disk->queue);
1191 blk_mq_unfreeze_queue(nbd->disk->queue);
1196 static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1198 struct nbd_config *config = nbd->config;
1204 sock = nbd_get_socket(nbd, arg, &err);
1226 if (nbd->tag_set.timeout)
1227 sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1229 refcount_inc(&nbd->config_refs);
1236 args->nbd = nbd;
1247 queue_work(nbd->recv_workq, &args->work);
1258 static void nbd_bdev_reset(struct nbd_device *nbd)
1260 if (disk_openers(nbd->disk) > 1)
1262 set_capacity(nbd->disk, 0);
1265 static void nbd_parse_flags(struct nbd_device *nbd)
1267 struct nbd_config *config = nbd->config;
1269 set_disk_ro(nbd->disk, true);
1271 set_disk_ro(nbd->disk, false);
1274 blk_queue_write_cache(nbd->disk->queue, true, true);
1276 blk_queue_write_cache(nbd->disk->queue, true, false);
1279 blk_queue_write_cache(nbd->disk->queue, false, false);
1282 static void send_disconnects(struct nbd_device *nbd)
1284 struct nbd_config *config = nbd->config;
1298 ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1300 dev_err(disk_to_dev(nbd->disk),
1306 static int nbd_disconnect(struct nbd_device *nbd)
1308 struct nbd_config *config = nbd->config;
1310 dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1312 set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1313 send_disconnects(nbd);
1317 static void nbd_clear_sock(struct nbd_device *nbd)
1319 sock_shutdown(nbd);
1320 nbd_clear_que(nbd);
1321 nbd->task_setup = NULL;
1324 static void nbd_config_put(struct nbd_device *nbd)
1326 if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1327 &nbd->config_lock)) {
1328 struct nbd_config *config = nbd->config;
1329 nbd_dev_dbg_close(nbd);
1330 invalidate_disk(nbd->disk);
1331 if (nbd->config->bytesize)
1332 kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1335 device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1336 nbd->pid = 0;
1339 device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1340 kfree(nbd->backend);
1341 nbd->backend = NULL;
1343 nbd_clear_sock(nbd);
1352 kfree(nbd->config);
1353 nbd->config = NULL;
1355 nbd->tag_set.timeout = 0;
1356 nbd->disk->queue->limits.discard_granularity = 0;
1357 blk_queue_max_discard_sectors(nbd->disk->queue, 0);
1359 mutex_unlock(&nbd->config_lock);
1360 nbd_put(nbd);
1365 static int nbd_start_device(struct nbd_device *nbd)
1367 struct nbd_config *config = nbd->config;
1371 if (nbd->pid)
1377 dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1381 blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
1382 nbd->pid = task_pid_nr(current);
1384 nbd_parse_flags(nbd);
1386 error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1388 dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1393 nbd_dev_dbg_init(nbd);
1399 sock_shutdown(nbd);
1409 flush_workqueue(nbd->recv_workq);
1413 if (nbd->tag_set.timeout)
1415 nbd->tag_set.timeout;
1417 refcount_inc(&nbd->config_refs);
1419 args->nbd = nbd;
1422 queue_work(nbd->recv_workq, &args->work);
1424 return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1427 static int nbd_start_device_ioctl(struct nbd_device *nbd)
1429 struct nbd_config *config = nbd->config;
1432 ret = nbd_start_device(nbd);
1437 set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1438 mutex_unlock(&nbd->config_lock);
1442 sock_shutdown(nbd);
1443 nbd_clear_que(nbd);
1446 flush_workqueue(nbd->recv_workq);
1447 mutex_lock(&nbd->config_lock);
1448 nbd_bdev_reset(nbd);
1457 static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1459 nbd_clear_sock(nbd);
1460 disk_force_media_change(nbd->disk);
1461 nbd_bdev_reset(nbd);
1463 &nbd->config->runtime_flags))
1464 nbd_config_put(nbd);
1467 static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1469 nbd->tag_set.timeout = timeout * HZ;
1471 blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1473 blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1477 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1480 struct nbd_config *config = nbd->config;
1485 return nbd_disconnect(nbd);
1487 nbd_clear_sock_ioctl(nbd);
1490 return nbd_add_socket(nbd, arg, false);
1492 return nbd_set_size(nbd, config->bytesize, arg);
1494 return nbd_set_size(nbd, arg, nbd_blksize(config));
1498 return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1500 nbd_set_cmd_timeout(nbd, arg);
1507 return nbd_start_device_ioctl(nbd);
1527 struct nbd_device *nbd = bdev->bd_disk->private_data;
1528 struct nbd_config *config = nbd->config;
1534 /* The block layer will pass back some non-nbd ioctls in case we have
1540 mutex_lock(&nbd->config_lock);
1542 /* Don't allow ioctl operations on a nbd device that was created with
1547 error = __nbd_ioctl(bdev, nbd, cmd, arg);
1549 dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1550 mutex_unlock(&nbd->config_lock);
1554 static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1558 if (WARN_ON(nbd->config))
1576 nbd->config = config;
1578 * Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1580 * So nbd_get_config_unlocked() won't see nbd->config as null after
1584 refcount_set(&nbd->config_refs, 1);
1591 struct nbd_device *nbd;
1596 nbd = disk->private_data;
1597 if (!nbd) {
1601 if (!refcount_inc_not_zero(&nbd->refs)) {
1606 config = nbd_get_config_unlocked(nbd);
1608 mutex_lock(&nbd->config_lock);
1609 if (refcount_inc_not_zero(&nbd->config_refs)) {
1610 mutex_unlock(&nbd->config_lock);
1613 ret = nbd_alloc_and_init_config(nbd);
1615 mutex_unlock(&nbd->config_lock);
1619 refcount_inc(&nbd->refs);
1620 mutex_unlock(&nbd->config_lock);
1634 struct nbd_device *nbd = disk->private_data;
1636 if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1638 nbd_disconnect_and_put(nbd);
1640 nbd_config_put(nbd);
1641 nbd_put(nbd);
1646 struct nbd_device *nbd = disk->private_data;
1648 kfree(nbd);
1665 struct nbd_device *nbd = s->private;
1667 if (nbd->pid)
1668 seq_printf(s, "recv: %d\n", nbd->pid);
1677 struct nbd_device *nbd = s->private;
1678 u32 flags = nbd->config->flags;
1700 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1703 struct nbd_config *config = nbd->config;
1708 dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1710 dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1711 nbd_name(nbd));
1716 debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1718 debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1720 debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1725 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1727 debugfs_remove_recursive(nbd->config->dbg_dir);
1734 dbg_dir = debugfs_create_dir("nbd", NULL);
1750 static int nbd_dev_dbg_init(struct nbd_device *nbd)
1755 static void nbd_dev_dbg_close(struct nbd_device *nbd)
1774 cmd->nbd = set->driver_data;
1789 struct nbd_device *nbd;
1793 nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1794 if (!nbd)
1797 nbd->tag_set.ops = &nbd_mq_ops;
1798 nbd->tag_set.nr_hw_queues = 1;
1799 nbd->tag_set.queue_depth = 128;
1800 nbd->tag_set.numa_node = NUMA_NO_NODE;
1801 nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1802 nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
1804 nbd->tag_set.driver_data = nbd;
1805 INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1806 nbd->backend = NULL;
1808 err = blk_mq_alloc_tag_set(&nbd->tag_set);
1814 err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1819 err = idr_alloc(&nbd_index_idr, nbd, 0,
1824 nbd->index = index;
1829 disk = blk_mq_alloc_disk(&nbd->tag_set, NULL);
1834 nbd->disk = disk;
1836 nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1838 WQ_UNBOUND, 0, nbd->index);
1839 if (!nbd->recv_workq) {
1840 dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1856 mutex_init(&nbd->config_lock);
1857 refcount_set(&nbd->config_refs, 0);
1862 refcount_set(&nbd->refs, 0);
1863 INIT_LIST_HEAD(&nbd->list);
1868 disk->private_data = nbd;
1869 sprintf(disk->disk_name, "nbd%d", index);
1877 refcount_set(&nbd->refs, refs);
1879 return nbd;
1882 destroy_workqueue(nbd->recv_workq);
1890 blk_mq_free_tag_set(&nbd->tag_set);
1892 kfree(nbd);
1899 struct nbd_device *nbd;
1904 idr_for_each_entry(&nbd_index_idr, nbd, id) {
1905 if (refcount_read(&nbd->config_refs) ||
1906 test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
1908 if (refcount_inc_not_zero(&nbd->refs))
1909 return nbd;
1942 static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
1944 struct nbd_config *config = nbd->config;
1955 return nbd_set_size(nbd, bytes, bsize);
1961 struct nbd_device *nbd;
1994 nbd = nbd_find_get_unused();
1996 nbd = idr_find(&nbd_index_idr, index);
1997 if (nbd) {
1998 if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
1999 test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
2000 !refcount_inc_not_zero(&nbd->refs)) {
2010 if (!nbd) {
2011 nbd = nbd_dev_add(index, 2);
2012 if (IS_ERR(nbd)) {
2014 return PTR_ERR(nbd);
2018 mutex_lock(&nbd->config_lock);
2019 if (refcount_read(&nbd->config_refs)) {
2020 mutex_unlock(&nbd->config_lock);
2021 nbd_put(nbd);
2024 pr_err("nbd%d already in use\n", index);
2028 ret = nbd_alloc_and_init_config(nbd);
2030 mutex_unlock(&nbd->config_lock);
2031 nbd_put(nbd);
2036 config = nbd->config;
2038 ret = nbd_genl_size_set(info, nbd);
2043 nbd_set_cmd_timeout(nbd,
2065 &nbd->flags))
2069 &nbd->flags))
2070 refcount_inc(&nbd->refs);
2103 ret = nbd_add_socket(nbd, fd, true);
2108 ret = nbd_start_device(nbd);
2112 nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2114 if (!nbd->backend) {
2119 ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2121 dev_err(disk_to_dev(nbd->disk),
2127 mutex_unlock(&nbd->config_lock);
2130 refcount_inc(&nbd->config_refs);
2131 nbd_connect_reply(info, nbd->index);
2133 nbd_config_put(nbd);
2135 nbd_put(nbd);
2139 static void nbd_disconnect_and_put(struct nbd_device *nbd)
2141 mutex_lock(&nbd->config_lock);
2142 nbd_disconnect(nbd);
2143 sock_shutdown(nbd);
2144 wake_up(&nbd->config->conn_wait);
2149 flush_workqueue(nbd->recv_workq);
2150 nbd_clear_que(nbd);
2151 nbd->task_setup = NULL;
2152 mutex_unlock(&nbd->config_lock);
2155 &nbd->config->runtime_flags))
2156 nbd_config_put(nbd);
2161 struct nbd_device *nbd;
2173 nbd = idr_find(&nbd_index_idr, index);
2174 if (!nbd) {
2179 if (!refcount_inc_not_zero(&nbd->refs)) {
2185 if (!refcount_inc_not_zero(&nbd->config_refs))
2187 nbd_disconnect_and_put(nbd);
2188 nbd_config_put(nbd);
2190 nbd_put(nbd);
2196 struct nbd_device *nbd = NULL;
2211 nbd = idr_find(&nbd_index_idr, index);
2212 if (!nbd) {
2217 if (nbd->backend) {
2220 nbd->backend)) {
2222 dev_err(nbd_to_dev(nbd),
2224 nbd->backend);
2229 dev_err(nbd_to_dev(nbd), "must specify backend\n");
2233 if (!refcount_inc_not_zero(&nbd->refs)) {
2240 config = nbd_get_config_unlocked(nbd);
2242 dev_err(nbd_to_dev(nbd),
2244 nbd_put(nbd);
2248 mutex_lock(&nbd->config_lock);
2250 !nbd->pid) {
2251 dev_err(nbd_to_dev(nbd),
2257 ret = nbd_genl_size_set(info, nbd);
2262 nbd_set_cmd_timeout(nbd,
2273 &nbd->flags))
2277 &nbd->flags))
2278 refcount_inc(&nbd->refs);
2315 ret = nbd_reconnect_socket(nbd, fd);
2321 dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2325 mutex_unlock(&nbd->config_lock);
2326 nbd_config_put(nbd);
2327 nbd_put(nbd);
2329 nbd_put(nbd);
2376 static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2389 if (refcount_read(&nbd->config_refs))
2394 ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2407 struct nbd_device *nbd = ptr;
2408 return populate_nbd_status(nbd, (struct sk_buff *)data);
2453 struct nbd_device *nbd;
2454 nbd = idr_find(&nbd_index_idr, index);
2455 if (nbd) {
2456 ret = populate_nbd_status(nbd, reply);
2559 if (register_blkdev(NBD_MAJOR, "nbd"))
2562 nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2564 unregister_blkdev(NBD_MAJOR, "nbd");
2570 unregister_blkdev(NBD_MAJOR, "nbd");
2583 struct nbd_device *nbd = ptr;
2585 /* Skip nbd that is being removed asynchronously */
2586 if (refcount_read(&nbd->refs))
2587 list_add_tail(&nbd->list, list);
2594 struct nbd_device *nbd;
2610 nbd = list_first_entry(&del_list, struct nbd_device, list);
2611 list_del_init(&nbd->list);
2612 if (refcount_read(&nbd->config_refs))
2614 refcount_read(&nbd->config_refs));
2615 if (refcount_read(&nbd->refs) != 1)
2617 nbd_put(nbd);
2624 unregister_blkdev(NBD_MAJOR, "nbd");