1// SPDX-License-Identifier: GPL-2.0-only
2/*
3   drbd_nl.c
4
5   This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7   Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8   Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9   Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12 */
13
14#define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/drbd.h>
18#include <linux/in.h>
19#include <linux/fs.h>
20#include <linux/file.h>
21#include <linux/slab.h>
22#include <linux/blkpg.h>
23#include <linux/cpumask.h>
24#include "drbd_int.h"
25#include "drbd_protocol.h"
26#include "drbd_req.h"
27#include "drbd_state_change.h"
28#include <asm/unaligned.h>
29#include <linux/drbd_limits.h>
30#include <linux/kthread.h>
31
32#include <net/genetlink.h>
33
34/* .doit */
35// int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36// int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
37
38int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
39int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
40
41int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
42int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
43int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
44
45int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
46int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
47int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
48int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
49int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
50int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
51int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
52int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
53int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
54int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
55int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
56int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
57int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
58int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
59int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
60int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
61int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
62int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
63int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
64int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
65/* .dumpit */
66int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
67int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb);
68int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb);
69int drbd_adm_dump_devices_done(struct netlink_callback *cb);
70int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb);
71int drbd_adm_dump_connections_done(struct netlink_callback *cb);
72int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb);
73int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb);
74int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb);
75
76#include <linux/drbd_genl_api.h>
77#include "drbd_nla.h"
78#include <linux/genl_magic_func.h>
79
80static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
81static atomic_t notify_genl_seq = ATOMIC_INIT(2); /* two. */
82
83DEFINE_MUTEX(notification_mutex);
84
85/* used blkdev_get_by_path, to claim our meta data device(s) */
86static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
87
88static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
89{
90	genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
91	if (genlmsg_reply(skb, info))
92		pr_err("error sending genl reply\n");
93}
94
95/* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
97static int drbd_msg_put_info(struct sk_buff *skb, const char *info)
98{
99	struct nlattr *nla;
100	int err = -EMSGSIZE;
101
102	if (!info || !info[0])
103		return 0;
104
105	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
106	if (!nla)
107		return err;
108
109	err = nla_put_string(skb, T_info_text, info);
110	if (err) {
111		nla_nest_cancel(skb, nla);
112		return err;
113	} else
114		nla_nest_end(skb, nla);
115	return 0;
116}
117
118__printf(2, 3)
119static int drbd_msg_sprintf_info(struct sk_buff *skb, const char *fmt, ...)
120{
121	va_list args;
122	struct nlattr *nla, *txt;
123	int err = -EMSGSIZE;
124	int len;
125
126	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_REPLY);
127	if (!nla)
128		return err;
129
130	txt = nla_reserve(skb, T_info_text, 256);
131	if (!txt) {
132		nla_nest_cancel(skb, nla);
133		return err;
134	}
135	va_start(args, fmt);
136	len = vscnprintf(nla_data(txt), 256, fmt, args);
137	va_end(args);
138
139	/* maybe: retry with larger reserve, if truncated */
140	txt->nla_len = nla_attr_size(len+1);
141	nlmsg_trim(skb, (char*)txt + NLA_ALIGN(txt->nla_len));
142	nla_nest_end(skb, nla);
143
144	return 0;
145}
146
147/* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
151 *
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
155 */
156#define DRBD_ADM_NEED_MINOR	1
157#define DRBD_ADM_NEED_RESOURCE	2
158#define DRBD_ADM_NEED_CONNECTION 4
159static int drbd_adm_prepare(struct drbd_config_context *adm_ctx,
160	struct sk_buff *skb, struct genl_info *info, unsigned flags)
161{
162	struct drbd_genlmsghdr *d_in = genl_info_userhdr(info);
163	const u8 cmd = info->genlhdr->cmd;
164	int err;
165
166	memset(adm_ctx, 0, sizeof(*adm_ctx));
167
168	/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169	if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
170	       return -EPERM;
171
172	adm_ctx->reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
173	if (!adm_ctx->reply_skb) {
174		err = -ENOMEM;
175		goto fail;
176	}
177
178	adm_ctx->reply_dh = genlmsg_put_reply(adm_ctx->reply_skb,
179					info, &drbd_genl_family, 0, cmd);
180	/* put of a few bytes into a fresh skb of >= 4k will always succeed.
181	 * but anyways */
182	if (!adm_ctx->reply_dh) {
183		err = -ENOMEM;
184		goto fail;
185	}
186
187	adm_ctx->reply_dh->minor = d_in->minor;
188	adm_ctx->reply_dh->ret_code = NO_ERROR;
189
190	adm_ctx->volume = VOLUME_UNSPECIFIED;
191	if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
192		struct nlattr *nla;
193		/* parse and validate only */
194		err = drbd_cfg_context_from_attrs(NULL, info);
195		if (err)
196			goto fail;
197
198		/* It was present, and valid,
199		 * copy it over to the reply skb. */
200		err = nla_put_nohdr(adm_ctx->reply_skb,
201				info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
202				info->attrs[DRBD_NLA_CFG_CONTEXT]);
203		if (err)
204			goto fail;
205
206		/* and assign stuff to the adm_ctx */
207		nla = nested_attr_tb[__nla_type(T_ctx_volume)];
208		if (nla)
209			adm_ctx->volume = nla_get_u32(nla);
210		nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
211		if (nla)
212			adm_ctx->resource_name = nla_data(nla);
213		adm_ctx->my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
214		adm_ctx->peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
215		if ((adm_ctx->my_addr &&
216		     nla_len(adm_ctx->my_addr) > sizeof(adm_ctx->connection->my_addr)) ||
217		    (adm_ctx->peer_addr &&
218		     nla_len(adm_ctx->peer_addr) > sizeof(adm_ctx->connection->peer_addr))) {
219			err = -EINVAL;
220			goto fail;
221		}
222	}
223
224	adm_ctx->minor = d_in->minor;
225	adm_ctx->device = minor_to_device(d_in->minor);
226
227	/* We are protected by the global genl_lock().
228	 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229	 * so make sure this object stays around. */
230	if (adm_ctx->device)
231		kref_get(&adm_ctx->device->kref);
232
233	if (adm_ctx->resource_name) {
234		adm_ctx->resource = drbd_find_resource(adm_ctx->resource_name);
235	}
236
237	if (!adm_ctx->device && (flags & DRBD_ADM_NEED_MINOR)) {
238		drbd_msg_put_info(adm_ctx->reply_skb, "unknown minor");
239		return ERR_MINOR_INVALID;
240	}
241	if (!adm_ctx->resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
242		drbd_msg_put_info(adm_ctx->reply_skb, "unknown resource");
243		if (adm_ctx->resource_name)
244			return ERR_RES_NOT_KNOWN;
245		return ERR_INVALID_REQUEST;
246	}
247
248	if (flags & DRBD_ADM_NEED_CONNECTION) {
249		if (adm_ctx->resource) {
250			drbd_msg_put_info(adm_ctx->reply_skb, "no resource name expected");
251			return ERR_INVALID_REQUEST;
252		}
253		if (adm_ctx->device) {
254			drbd_msg_put_info(adm_ctx->reply_skb, "no minor number expected");
255			return ERR_INVALID_REQUEST;
256		}
257		if (adm_ctx->my_addr && adm_ctx->peer_addr)
258			adm_ctx->connection = conn_get_by_addrs(nla_data(adm_ctx->my_addr),
259							  nla_len(adm_ctx->my_addr),
260							  nla_data(adm_ctx->peer_addr),
261							  nla_len(adm_ctx->peer_addr));
262		if (!adm_ctx->connection) {
263			drbd_msg_put_info(adm_ctx->reply_skb, "unknown connection");
264			return ERR_INVALID_REQUEST;
265		}
266	}
267
268	/* some more paranoia, if the request was over-determined */
269	if (adm_ctx->device && adm_ctx->resource &&
270	    adm_ctx->device->resource != adm_ctx->resource) {
271		pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272			adm_ctx->minor, adm_ctx->resource->name,
273			adm_ctx->device->resource->name);
274		drbd_msg_put_info(adm_ctx->reply_skb, "minor exists in different resource");
275		return ERR_INVALID_REQUEST;
276	}
277	if (adm_ctx->device &&
278	    adm_ctx->volume != VOLUME_UNSPECIFIED &&
279	    adm_ctx->volume != adm_ctx->device->vnr) {
280		pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281			adm_ctx->minor, adm_ctx->volume,
282			adm_ctx->device->vnr, adm_ctx->device->resource->name);
283		drbd_msg_put_info(adm_ctx->reply_skb, "minor exists as different volume");
284		return ERR_INVALID_REQUEST;
285	}
286
287	/* still, provide adm_ctx->resource always, if possible. */
288	if (!adm_ctx->resource) {
289		adm_ctx->resource = adm_ctx->device ? adm_ctx->device->resource
290			: adm_ctx->connection ? adm_ctx->connection->resource : NULL;
291		if (adm_ctx->resource)
292			kref_get(&adm_ctx->resource->kref);
293	}
294
295	return NO_ERROR;
296
297fail:
298	nlmsg_free(adm_ctx->reply_skb);
299	adm_ctx->reply_skb = NULL;
300	return err;
301}
302
303static int drbd_adm_finish(struct drbd_config_context *adm_ctx,
304	struct genl_info *info, int retcode)
305{
306	if (adm_ctx->device) {
307		kref_put(&adm_ctx->device->kref, drbd_destroy_device);
308		adm_ctx->device = NULL;
309	}
310	if (adm_ctx->connection) {
311		kref_put(&adm_ctx->connection->kref, &drbd_destroy_connection);
312		adm_ctx->connection = NULL;
313	}
314	if (adm_ctx->resource) {
315		kref_put(&adm_ctx->resource->kref, drbd_destroy_resource);
316		adm_ctx->resource = NULL;
317	}
318
319	if (!adm_ctx->reply_skb)
320		return -ENOMEM;
321
322	adm_ctx->reply_dh->ret_code = retcode;
323	drbd_adm_send_reply(adm_ctx->reply_skb, info);
324	return 0;
325}
326
327static void setup_khelper_env(struct drbd_connection *connection, char **envp)
328{
329	char *afs;
330
331	/* FIXME: A future version will not allow this case. */
332	if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
333		return;
334
335	switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
336	case AF_INET6:
337		afs = "ipv6";
338		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339			 &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
340		break;
341	case AF_INET:
342		afs = "ipv4";
343		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344			 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
345		break;
346	default:
347		afs = "ssocks";
348		snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349			 &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
350	}
351	snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
352}
353
354int drbd_khelper(struct drbd_device *device, char *cmd)
355{
356	char *envp[] = { "HOME=/",
357			"TERM=linux",
358			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359			 (char[20]) { }, /* address family */
360			 (char[60]) { }, /* address */
361			NULL };
362	char mb[14];
363	char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
364	struct drbd_connection *connection = first_peer_device(device)->connection;
365	struct sib_info sib;
366	int ret;
367
368	if (current == connection->worker.task)
369		set_bit(CALLBACK_PENDING, &connection->flags);
370
371	snprintf(mb, 14, "minor-%d", device_to_minor(device));
372	setup_khelper_env(connection, envp);
373
374	/* The helper may take some time.
375	 * write out any unsynced meta data changes now */
376	drbd_md_sync(device);
377
378	drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
379	sib.sib_reason = SIB_HELPER_PRE;
380	sib.helper_name = cmd;
381	drbd_bcast_event(device, &sib);
382	notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
383	ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
384	if (ret)
385		drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
386				drbd_usermode_helper, cmd, mb,
387				(ret >> 8) & 0xff, ret);
388	else
389		drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
390				drbd_usermode_helper, cmd, mb,
391				(ret >> 8) & 0xff, ret);
392	sib.sib_reason = SIB_HELPER_POST;
393	sib.helper_exit_code = ret;
394	drbd_bcast_event(device, &sib);
395	notify_helper(NOTIFY_RESPONSE, device, connection, cmd, ret);
396
397	if (current == connection->worker.task)
398		clear_bit(CALLBACK_PENDING, &connection->flags);
399
400	if (ret < 0) /* Ignore any ERRNOs we got. */
401		ret = 0;
402
403	return ret;
404}
405
406enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
407{
408	char *envp[] = { "HOME=/",
409			"TERM=linux",
410			"PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411			 (char[20]) { }, /* address family */
412			 (char[60]) { }, /* address */
413			NULL };
414	char *resource_name = connection->resource->name;
415	char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
416	int ret;
417
418	setup_khelper_env(connection, envp);
419	conn_md_sync(connection);
420
421	drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
422	/* TODO: conn_bcast_event() ?? */
423	notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
424
425	ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
426	if (ret)
427		drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
428			  drbd_usermode_helper, cmd, resource_name,
429			  (ret >> 8) & 0xff, ret);
430	else
431		drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
432			  drbd_usermode_helper, cmd, resource_name,
433			  (ret >> 8) & 0xff, ret);
434	/* TODO: conn_bcast_event() ?? */
435	notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
436
437	if (ret < 0) /* Ignore any ERRNOs we got. */
438		ret = 0;
439
440	return ret;
441}
442
443static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
444{
445	enum drbd_fencing_p fp = FP_NOT_AVAIL;
446	struct drbd_peer_device *peer_device;
447	int vnr;
448
449	rcu_read_lock();
450	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
451		struct drbd_device *device = peer_device->device;
452		if (get_ldev_if_state(device, D_CONSISTENT)) {
453			struct disk_conf *disk_conf =
454				rcu_dereference(peer_device->device->ldev->disk_conf);
455			fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
456			put_ldev(device);
457		}
458	}
459	rcu_read_unlock();
460
461	return fp;
462}
463
464static bool resource_is_supended(struct drbd_resource *resource)
465{
466	return resource->susp || resource->susp_fen || resource->susp_nod;
467}
468
469bool conn_try_outdate_peer(struct drbd_connection *connection)
470{
471	struct drbd_resource * const resource = connection->resource;
472	unsigned int connect_cnt;
473	union drbd_state mask = { };
474	union drbd_state val = { };
475	enum drbd_fencing_p fp;
476	char *ex_to_string;
477	int r;
478
479	spin_lock_irq(&resource->req_lock);
480	if (connection->cstate >= C_WF_REPORT_PARAMS) {
481		drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
482		spin_unlock_irq(&resource->req_lock);
483		return false;
484	}
485
486	connect_cnt = connection->connect_cnt;
487	spin_unlock_irq(&resource->req_lock);
488
489	fp = highest_fencing_policy(connection);
490	switch (fp) {
491	case FP_NOT_AVAIL:
492		drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
493		spin_lock_irq(&resource->req_lock);
494		if (connection->cstate < C_WF_REPORT_PARAMS) {
495			_conn_request_state(connection,
496					    (union drbd_state) { { .susp_fen = 1 } },
497					    (union drbd_state) { { .susp_fen = 0 } },
498					    CS_VERBOSE | CS_HARD | CS_DC_SUSP);
499			/* We are no longer suspended due to the fencing policy.
500			 * We may still be suspended due to the on-no-data-accessible policy.
501			 * If that was OND_IO_ERROR, fail pending requests. */
502			if (!resource_is_supended(resource))
503				_tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
504		}
505		/* Else: in case we raced with a connection handshake,
506		 * let the handshake figure out if we maybe can RESEND,
507		 * and do not resume/fail pending requests here.
508		 * Worst case is we stay suspended for now, which may be
509		 * resolved by either re-establishing the replication link, or
510		 * the next link failure, or eventually the administrator.  */
511		spin_unlock_irq(&resource->req_lock);
512		return false;
513
514	case FP_DONT_CARE:
515		return true;
516	default: ;
517	}
518
519	r = conn_khelper(connection, "fence-peer");
520
521	switch ((r>>8) & 0xff) {
522	case P_INCONSISTENT: /* peer is inconsistent */
523		ex_to_string = "peer is inconsistent or worse";
524		mask.pdsk = D_MASK;
525		val.pdsk = D_INCONSISTENT;
526		break;
527	case P_OUTDATED: /* peer got outdated, or was already outdated */
528		ex_to_string = "peer was fenced";
529		mask.pdsk = D_MASK;
530		val.pdsk = D_OUTDATED;
531		break;
532	case P_DOWN: /* peer was down */
533		if (conn_highest_disk(connection) == D_UP_TO_DATE) {
534			/* we will(have) create(d) a new UUID anyways... */
535			ex_to_string = "peer is unreachable, assumed to be dead";
536			mask.pdsk = D_MASK;
537			val.pdsk = D_OUTDATED;
538		} else {
539			ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
540		}
541		break;
542	case P_PRIMARY: /* Peer is primary, voluntarily outdate myself.
543		 * This is useful when an unconnected R_SECONDARY is asked to
544		 * become R_PRIMARY, but finds the other peer being active. */
545		ex_to_string = "peer is active";
546		drbd_warn(connection, "Peer is primary, outdating myself.\n");
547		mask.disk = D_MASK;
548		val.disk = D_OUTDATED;
549		break;
550	case P_FENCING:
551		/* THINK: do we need to handle this
552		 * like case 4, or more like case 5? */
553		if (fp != FP_STONITH)
554			drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
555		ex_to_string = "peer was stonithed";
556		mask.pdsk = D_MASK;
557		val.pdsk = D_OUTDATED;
558		break;
559	default:
560		/* The script is broken ... */
561		drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
562		return false; /* Eventually leave IO frozen */
563	}
564
565	drbd_info(connection, "fence-peer helper returned %d (%s)\n",
566		  (r>>8) & 0xff, ex_to_string);
567
568	/* Not using
569	   conn_request_state(connection, mask, val, CS_VERBOSE);
570	   here, because we might were able to re-establish the connection in the
571	   meantime. */
572	spin_lock_irq(&resource->req_lock);
573	if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
574		if (connection->connect_cnt != connect_cnt)
575			/* In case the connection was established and droped
576			   while the fence-peer handler was running, ignore it */
577			drbd_info(connection, "Ignoring fence-peer exit code\n");
578		else
579			_conn_request_state(connection, mask, val, CS_VERBOSE);
580	}
581	spin_unlock_irq(&resource->req_lock);
582
583	return conn_highest_pdsk(connection) <= D_OUTDATED;
584}
585
586static int _try_outdate_peer_async(void *data)
587{
588	struct drbd_connection *connection = (struct drbd_connection *)data;
589
590	conn_try_outdate_peer(connection);
591
592	kref_put(&connection->kref, drbd_destroy_connection);
593	return 0;
594}
595
596void conn_try_outdate_peer_async(struct drbd_connection *connection)
597{
598	struct task_struct *opa;
599
600	kref_get(&connection->kref);
601	/* We may have just sent a signal to this thread
602	 * to get it out of some blocking network function.
603	 * Clear signals; otherwise kthread_run(), which internally uses
604	 * wait_on_completion_killable(), will mistake our pending signal
605	 * for a new fatal signal and fail. */
606	flush_signals(current);
607	opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
608	if (IS_ERR(opa)) {
609		drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
610		kref_put(&connection->kref, drbd_destroy_connection);
611	}
612}
613
614enum drbd_state_rv
615drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int force)
616{
617	struct drbd_peer_device *const peer_device = first_peer_device(device);
618	struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL;
619	const int max_tries = 4;
620	enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
621	struct net_conf *nc;
622	int try = 0;
623	int forced = 0;
624	union drbd_state mask, val;
625
626	if (new_role == R_PRIMARY) {
627		struct drbd_connection *connection;
628
629		/* Detect dead peers as soon as possible.  */
630
631		rcu_read_lock();
632		for_each_connection(connection, device->resource)
633			request_ping(connection);
634		rcu_read_unlock();
635	}
636
637	mutex_lock(device->state_mutex);
638
639	mask.i = 0; mask.role = R_MASK;
640	val.i  = 0; val.role  = new_role;
641
642	while (try++ < max_tries) {
643		rv = _drbd_request_state_holding_state_mutex(device, mask, val, CS_WAIT_COMPLETE);
644
645		/* in case we first succeeded to outdate,
646		 * but now suddenly could establish a connection */
647		if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
648			val.pdsk = 0;
649			mask.pdsk = 0;
650			continue;
651		}
652
653		if (rv == SS_NO_UP_TO_DATE_DISK && force &&
654		    (device->state.disk < D_UP_TO_DATE &&
655		     device->state.disk >= D_INCONSISTENT)) {
656			mask.disk = D_MASK;
657			val.disk  = D_UP_TO_DATE;
658			forced = 1;
659			continue;
660		}
661
662		if (rv == SS_NO_UP_TO_DATE_DISK &&
663		    device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
664			D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
665
666			if (conn_try_outdate_peer(connection)) {
667				val.disk = D_UP_TO_DATE;
668				mask.disk = D_MASK;
669			}
670			continue;
671		}
672
673		if (rv == SS_NOTHING_TO_DO)
674			goto out;
675		if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
676			if (!conn_try_outdate_peer(connection) && force) {
677				drbd_warn(device, "Forced into split brain situation!\n");
678				mask.pdsk = D_MASK;
679				val.pdsk  = D_OUTDATED;
680
681			}
682			continue;
683		}
684		if (rv == SS_TWO_PRIMARIES) {
685			/* Maybe the peer is detected as dead very soon...
686			   retry at most once more in this case. */
687			if (try < max_tries) {
688				int timeo;
689				try = max_tries - 1;
690				rcu_read_lock();
691				nc = rcu_dereference(connection->net_conf);
692				timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
693				rcu_read_unlock();
694				schedule_timeout_interruptible(timeo);
695			}
696			continue;
697		}
698		if (rv < SS_SUCCESS) {
699			rv = _drbd_request_state(device, mask, val,
700						CS_VERBOSE + CS_WAIT_COMPLETE);
701			if (rv < SS_SUCCESS)
702				goto out;
703		}
704		break;
705	}
706
707	if (rv < SS_SUCCESS)
708		goto out;
709
710	if (forced)
711		drbd_warn(device, "Forced to consider local data as UpToDate!\n");
712
713	/* Wait until nothing is on the fly :) */
714	wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
715
716	/* FIXME also wait for all pending P_BARRIER_ACK? */
717
718	if (new_role == R_SECONDARY) {
719		if (get_ldev(device)) {
720			device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
721			put_ldev(device);
722		}
723	} else {
724		mutex_lock(&device->resource->conf_update);
725		nc = connection->net_conf;
726		if (nc)
727			nc->discard_my_data = 0; /* without copy; single bit op is atomic */
728		mutex_unlock(&device->resource->conf_update);
729
730		if (get_ldev(device)) {
731			if (((device->state.conn < C_CONNECTED ||
732			       device->state.pdsk <= D_FAILED)
733			      && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
734				drbd_uuid_new_current(device);
735
736			device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
737			put_ldev(device);
738		}
739	}
740
741	/* writeout of activity log covered areas of the bitmap
742	 * to stable storage done in after state change already */
743
744	if (device->state.conn >= C_WF_REPORT_PARAMS) {
745		/* if this was forced, we should consider sync */
746		if (forced)
747			drbd_send_uuids(peer_device);
748		drbd_send_current_state(peer_device);
749	}
750
751	drbd_md_sync(device);
752	set_disk_ro(device->vdisk, new_role == R_SECONDARY);
753	kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
754out:
755	mutex_unlock(device->state_mutex);
756	return rv;
757}
758
759static const char *from_attrs_err_to_txt(int err)
760{
761	return	err == -ENOMSG ? "required attribute missing" :
762		err == -EOPNOTSUPP ? "unknown mandatory attribute" :
763		err == -EEXIST ? "can not change invariant setting" :
764		"invalid attribute value";
765}
766
767int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
768{
769	struct drbd_config_context adm_ctx;
770	struct set_role_parms parms;
771	int err;
772	enum drbd_ret_code retcode;
773	enum drbd_state_rv rv;
774
775	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
776	if (!adm_ctx.reply_skb)
777		return retcode;
778	if (retcode != NO_ERROR)
779		goto out;
780
781	memset(&parms, 0, sizeof(parms));
782	if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
783		err = set_role_parms_from_attrs(&parms, info);
784		if (err) {
785			retcode = ERR_MANDATORY_TAG;
786			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
787			goto out;
788		}
789	}
790	genl_unlock();
791	mutex_lock(&adm_ctx.resource->adm_mutex);
792
793	if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
794		rv = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
795	else
796		rv = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
797
798	mutex_unlock(&adm_ctx.resource->adm_mutex);
799	genl_lock();
800	drbd_adm_finish(&adm_ctx, info, rv);
801	return 0;
802out:
803	drbd_adm_finish(&adm_ctx, info, retcode);
804	return 0;
805}
806
807/* Initializes the md.*_offset members, so we are able to find
808 * the on disk meta data.
809 *
810 * We currently have two possible layouts:
811 * external:
812 *   |----------- md_size_sect ------------------|
813 *   [ 4k superblock ][ activity log ][  Bitmap  ]
814 *   | al_offset == 8 |
815 *   | bm_offset = al_offset + X      |
816 *  ==> bitmap sectors = md_size_sect - bm_offset
817 *
818 * internal:
819 *            |----------- md_size_sect ------------------|
820 * [data.....][  Bitmap  ][ activity log ][ 4k superblock ]
821 *                        | al_offset < 0 |
822 *            | bm_offset = al_offset - Y |
823 *  ==> bitmap sectors = Y = al_offset - bm_offset
824 *
825 *  Activity log size used to be fixed 32kB,
826 *  but is about to become configurable.
827 */
828static void drbd_md_set_sector_offsets(struct drbd_device *device,
829				       struct drbd_backing_dev *bdev)
830{
831	sector_t md_size_sect = 0;
832	unsigned int al_size_sect = bdev->md.al_size_4k * 8;
833
834	bdev->md.md_offset = drbd_md_ss(bdev);
835
836	switch (bdev->md.meta_dev_idx) {
837	default:
838		/* v07 style fixed size indexed meta data */
839		bdev->md.md_size_sect = MD_128MB_SECT;
840		bdev->md.al_offset = MD_4kB_SECT;
841		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
842		break;
843	case DRBD_MD_INDEX_FLEX_EXT:
844		/* just occupy the full device; unit: sectors */
845		bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
846		bdev->md.al_offset = MD_4kB_SECT;
847		bdev->md.bm_offset = MD_4kB_SECT + al_size_sect;
848		break;
849	case DRBD_MD_INDEX_INTERNAL:
850	case DRBD_MD_INDEX_FLEX_INT:
851		/* al size is still fixed */
852		bdev->md.al_offset = -al_size_sect;
853		/* we need (slightly less than) ~ this much bitmap sectors: */
854		md_size_sect = drbd_get_capacity(bdev->backing_bdev);
855		md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
856		md_size_sect = BM_SECT_TO_EXT(md_size_sect);
857		md_size_sect = ALIGN(md_size_sect, 8);
858
859		/* plus the "drbd meta data super block",
860		 * and the activity log; */
861		md_size_sect += MD_4kB_SECT + al_size_sect;
862
863		bdev->md.md_size_sect = md_size_sect;
864		/* bitmap offset is adjusted by 'super' block size */
865		bdev->md.bm_offset   = -md_size_sect + MD_4kB_SECT;
866		break;
867	}
868}
869
870/* input size is expected to be in KB */
871char *ppsize(char *buf, unsigned long long size)
872{
873	/* Needs 9 bytes at max including trailing NUL:
874	 * -1ULL ==> "16384 EB" */
875	static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
876	int base = 0;
877	while (size >= 10000 && base < sizeof(units)-1) {
878		/* shift + round */
879		size = (size >> 10) + !!(size & (1<<9));
880		base++;
881	}
882	sprintf(buf, "%u %cB", (unsigned)size, units[base]);
883
884	return buf;
885}
886
887/* there is still a theoretical deadlock when called from receiver
888 * on an D_INCONSISTENT R_PRIMARY:
889 *  remote READ does inc_ap_bio, receiver would need to receive answer
890 *  packet from remote to dec_ap_bio again.
891 *  receiver receive_sizes(), comes here,
892 *  waits for ap_bio_cnt == 0. -> deadlock.
893 * but this cannot happen, actually, because:
894 *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
895 *  (not connected, or bad/no disk on peer):
896 *  see drbd_fail_request_early, ap_bio_cnt is zero.
897 *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
898 *  peer may not initiate a resize.
899 */
900/* Note these are not to be confused with
901 * drbd_adm_suspend_io/drbd_adm_resume_io,
902 * which are (sub) state changes triggered by admin (drbdsetup),
903 * and can be long lived.
904 * This changes an device->flag, is triggered by drbd internals,
905 * and should be short-lived. */
906/* It needs to be a counter, since multiple threads might
907   independently suspend and resume IO. */
908void drbd_suspend_io(struct drbd_device *device)
909{
910	atomic_inc(&device->suspend_cnt);
911	if (drbd_suspended(device))
912		return;
913	wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
914}
915
916void drbd_resume_io(struct drbd_device *device)
917{
918	if (atomic_dec_and_test(&device->suspend_cnt))
919		wake_up(&device->misc_wait);
920}
921
922/*
923 * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
924 * @device:	DRBD device.
925 *
926 * Returns 0 on success, negative return values indicate errors.
927 * You should call drbd_md_sync() after calling this function.
928 */
929enum determine_dev_size
930drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
931{
932	struct md_offsets_and_sizes {
933		u64 last_agreed_sect;
934		u64 md_offset;
935		s32 al_offset;
936		s32 bm_offset;
937		u32 md_size_sect;
938
939		u32 al_stripes;
940		u32 al_stripe_size_4k;
941	} prev;
942	sector_t u_size, size;
943	struct drbd_md *md = &device->ldev->md;
944	void *buffer;
945
946	int md_moved, la_size_changed;
947	enum determine_dev_size rv = DS_UNCHANGED;
948
949	/* We may change the on-disk offsets of our meta data below.  Lock out
950	 * anything that may cause meta data IO, to avoid acting on incomplete
951	 * layout changes or scribbling over meta data that is in the process
952	 * of being moved.
953	 *
954	 * Move is not exactly correct, btw, currently we have all our meta
955	 * data in core memory, to "move" it we just write it all out, there
956	 * are no reads. */
957	drbd_suspend_io(device);
958	buffer = drbd_md_get_buffer(device, __func__); /* Lock meta-data IO */
959	if (!buffer) {
960		drbd_resume_io(device);
961		return DS_ERROR;
962	}
963
964	/* remember current offset and sizes */
965	prev.last_agreed_sect = md->la_size_sect;
966	prev.md_offset = md->md_offset;
967	prev.al_offset = md->al_offset;
968	prev.bm_offset = md->bm_offset;
969	prev.md_size_sect = md->md_size_sect;
970	prev.al_stripes = md->al_stripes;
971	prev.al_stripe_size_4k = md->al_stripe_size_4k;
972
973	if (rs) {
974		/* rs is non NULL if we should change the AL layout only */
975		md->al_stripes = rs->al_stripes;
976		md->al_stripe_size_4k = rs->al_stripe_size / 4;
977		md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
978	}
979
980	drbd_md_set_sector_offsets(device, device->ldev);
981
982	rcu_read_lock();
983	u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
984	rcu_read_unlock();
985	size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
986
987	if (size < prev.last_agreed_sect) {
988		if (rs && u_size == 0) {
989			/* Remove "rs &&" later. This check should always be active, but
990			   right now the receiver expects the permissive behavior */
991			drbd_warn(device, "Implicit shrink not allowed. "
992				 "Use --size=%llus for explicit shrink.\n",
993				 (unsigned long long)size);
994			rv = DS_ERROR_SHRINK;
995		}
996		if (u_size > size)
997			rv = DS_ERROR_SPACE_MD;
998		if (rv != DS_UNCHANGED)
999			goto err_out;
1000	}
1001
1002	if (get_capacity(device->vdisk) != size ||
1003	    drbd_bm_capacity(device) != size) {
1004		int err;
1005		err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
1006		if (unlikely(err)) {
1007			/* currently there is only one error: ENOMEM! */
1008			size = drbd_bm_capacity(device);
1009			if (size == 0) {
1010				drbd_err(device, "OUT OF MEMORY! "
1011				    "Could not allocate bitmap!\n");
1012			} else {
1013				drbd_err(device, "BM resizing failed. "
1014				    "Leaving size unchanged\n");
1015			}
1016			rv = DS_ERROR;
1017		}
1018		/* racy, see comments above. */
1019		drbd_set_my_capacity(device, size);
1020		md->la_size_sect = size;
1021	}
1022	if (rv <= DS_ERROR)
1023		goto err_out;
1024
1025	la_size_changed = (prev.last_agreed_sect != md->la_size_sect);
1026
1027	md_moved = prev.md_offset    != md->md_offset
1028		|| prev.md_size_sect != md->md_size_sect;
1029
1030	if (la_size_changed || md_moved || rs) {
1031		u32 prev_flags;
1032
1033		/* We do some synchronous IO below, which may take some time.
1034		 * Clear the timer, to avoid scary "timer expired!" messages,
1035		 * "Superblock" is written out at least twice below, anyways. */
1036		del_timer(&device->md_sync_timer);
1037
1038		/* We won't change the "al-extents" setting, we just may need
1039		 * to move the on-disk location of the activity log ringbuffer.
1040		 * Lock for transaction is good enough, it may well be "dirty"
1041		 * or even "starving". */
1042		wait_event(device->al_wait, lc_try_lock_for_transaction(device->act_log));
1043
1044		/* mark current on-disk bitmap and activity log as unreliable */
1045		prev_flags = md->flags;
1046		md->flags |= MDF_FULL_SYNC | MDF_AL_DISABLED;
1047		drbd_md_write(device, buffer);
1048
1049		drbd_al_initialize(device, buffer);
1050
1051		drbd_info(device, "Writing the whole bitmap, %s\n",
1052			 la_size_changed && md_moved ? "size changed and md moved" :
1053			 la_size_changed ? "size changed" : "md moved");
1054		/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1055		drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
1056			       "size changed", BM_LOCKED_MASK, NULL);
1057
1058		/* on-disk bitmap and activity log is authoritative again
1059		 * (unless there was an IO error meanwhile...) */
1060		md->flags = prev_flags;
1061		drbd_md_write(device, buffer);
1062
1063		if (rs)
1064			drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1065				  md->al_stripes, md->al_stripe_size_4k * 4);
1066	}
1067
1068	if (size > prev.last_agreed_sect)
1069		rv = prev.last_agreed_sect ? DS_GREW : DS_GREW_FROM_ZERO;
1070	if (size < prev.last_agreed_sect)
1071		rv = DS_SHRUNK;
1072
1073	if (0) {
1074	err_out:
1075		/* restore previous offset and sizes */
1076		md->la_size_sect = prev.last_agreed_sect;
1077		md->md_offset = prev.md_offset;
1078		md->al_offset = prev.al_offset;
1079		md->bm_offset = prev.bm_offset;
1080		md->md_size_sect = prev.md_size_sect;
1081		md->al_stripes = prev.al_stripes;
1082		md->al_stripe_size_4k = prev.al_stripe_size_4k;
1083		md->al_size_4k = (u64)prev.al_stripes * prev.al_stripe_size_4k;
1084	}
1085	lc_unlock(device->act_log);
1086	wake_up(&device->al_wait);
1087	drbd_md_put_buffer(device);
1088	drbd_resume_io(device);
1089
1090	return rv;
1091}
1092
1093sector_t
1094drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
1095		  sector_t u_size, int assume_peer_has_space)
1096{
1097	sector_t p_size = device->p_size;   /* partner's disk size. */
1098	sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
1099	sector_t m_size; /* my size */
1100	sector_t size = 0;
1101
1102	m_size = drbd_get_max_capacity(bdev);
1103
1104	if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
1105		drbd_warn(device, "Resize while not connected was forced by the user!\n");
1106		p_size = m_size;
1107	}
1108
1109	if (p_size && m_size) {
1110		size = min_t(sector_t, p_size, m_size);
1111	} else {
1112		if (la_size_sect) {
1113			size = la_size_sect;
1114			if (m_size && m_size < size)
1115				size = m_size;
1116			if (p_size && p_size < size)
1117				size = p_size;
1118		} else {
1119			if (m_size)
1120				size = m_size;
1121			if (p_size)
1122				size = p_size;
1123		}
1124	}
1125
1126	if (size == 0)
1127		drbd_err(device, "Both nodes diskless!\n");
1128
1129	if (u_size) {
1130		if (u_size > size)
1131			drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
1132			    (unsigned long)u_size>>1, (unsigned long)size>>1);
1133		else
1134			size = u_size;
1135	}
1136
1137	return size;
1138}
1139
1140/*
1141 * drbd_check_al_size() - Ensures that the AL is of the right size
1142 * @device:	DRBD device.
1143 *
1144 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1145 * failed, and 0 on success. You should call drbd_md_sync() after you called
1146 * this function.
1147 */
1148static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1149{
1150	struct lru_cache *n, *t;
1151	struct lc_element *e;
1152	unsigned int in_use;
1153	int i;
1154
1155	if (device->act_log &&
1156	    device->act_log->nr_elements == dc->al_extents)
1157		return 0;
1158
1159	in_use = 0;
1160	t = device->act_log;
1161	n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
1162		dc->al_extents, sizeof(struct lc_element), 0);
1163
1164	if (n == NULL) {
1165		drbd_err(device, "Cannot allocate act_log lru!\n");
1166		return -ENOMEM;
1167	}
1168	spin_lock_irq(&device->al_lock);
1169	if (t) {
1170		for (i = 0; i < t->nr_elements; i++) {
1171			e = lc_element_by_index(t, i);
1172			if (e->refcnt)
1173				drbd_err(device, "refcnt(%d)==%d\n",
1174				    e->lc_number, e->refcnt);
1175			in_use += e->refcnt;
1176		}
1177	}
1178	if (!in_use)
1179		device->act_log = n;
1180	spin_unlock_irq(&device->al_lock);
1181	if (in_use) {
1182		drbd_err(device, "Activity log still in use!\n");
1183		lc_destroy(n);
1184		return -EBUSY;
1185	} else {
1186		lc_destroy(t);
1187	}
1188	drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
1189	return 0;
1190}
1191
1192static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity)
1193{
1194	q->limits.discard_granularity = granularity;
1195}
1196
1197static unsigned int drbd_max_discard_sectors(struct drbd_connection *connection)
1198{
1199	/* when we introduced REQ_WRITE_SAME support, we also bumped
1200	 * our maximum supported batch bio size used for discards. */
1201	if (connection->agreed_features & DRBD_FF_WSAME)
1202		return DRBD_MAX_BBIO_SECTORS;
1203	/* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1204	return AL_EXTENT_SIZE >> 9;
1205}
1206
1207static void decide_on_discard_support(struct drbd_device *device,
1208		struct drbd_backing_dev *bdev)
1209{
1210	struct drbd_connection *connection =
1211		first_peer_device(device)->connection;
1212	struct request_queue *q = device->rq_queue;
1213	unsigned int max_discard_sectors;
1214
1215	if (bdev && !bdev_max_discard_sectors(bdev->backing_bdev))
1216		goto not_supported;
1217
1218	if (connection->cstate >= C_CONNECTED &&
1219	    !(connection->agreed_features & DRBD_FF_TRIM)) {
1220		drbd_info(connection,
1221			"peer DRBD too old, does not support TRIM: disabling discards\n");
1222		goto not_supported;
1223	}
1224
1225	/*
1226	 * We don't care for the granularity, really.
1227	 *
1228	 * Stacking limits below should fix it for the local device.  Whether or
1229	 * not it is a suitable granularity on the remote device is not our
1230	 * problem, really. If you care, you need to use devices with similar
1231	 * topology on all peers.
1232	 */
1233	blk_queue_discard_granularity(q, 512);
1234	max_discard_sectors = drbd_max_discard_sectors(connection);
1235	blk_queue_max_discard_sectors(q, max_discard_sectors);
1236	blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
1237	return;
1238
1239not_supported:
1240	blk_queue_discard_granularity(q, 0);
1241	blk_queue_max_discard_sectors(q, 0);
1242}
1243
1244static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q)
1245{
1246	/* Fixup max_write_zeroes_sectors after blk_stack_limits():
1247	 * if we can handle "zeroes" efficiently on the protocol,
1248	 * we want to do that, even if our backend does not announce
1249	 * max_write_zeroes_sectors itself. */
1250	struct drbd_connection *connection = first_peer_device(device)->connection;
1251	/* If the peer announces WZEROES support, use it.  Otherwise, rather
1252	 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1253	if (connection->agreed_features & DRBD_FF_WZEROES)
1254		q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS;
1255	else
1256		q->limits.max_write_zeroes_sectors = 0;
1257}
1258
1259static void fixup_discard_support(struct drbd_device *device, struct request_queue *q)
1260{
1261	unsigned int max_discard = device->rq_queue->limits.max_discard_sectors;
1262	unsigned int discard_granularity =
1263		device->rq_queue->limits.discard_granularity >> SECTOR_SHIFT;
1264
1265	if (discard_granularity > max_discard) {
1266		blk_queue_discard_granularity(q, 0);
1267		blk_queue_max_discard_sectors(q, 0);
1268	}
1269}
1270
1271static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backing_dev *bdev,
1272				   unsigned int max_bio_size, struct o_qlim *o)
1273{
1274	struct request_queue * const q = device->rq_queue;
1275	unsigned int max_hw_sectors = max_bio_size >> 9;
1276	unsigned int max_segments = 0;
1277	struct request_queue *b = NULL;
1278	struct disk_conf *dc;
1279
1280	if (bdev) {
1281		b = bdev->backing_bdev->bd_disk->queue;
1282
1283		max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1284		rcu_read_lock();
1285		dc = rcu_dereference(device->ldev->disk_conf);
1286		max_segments = dc->max_bio_bvecs;
1287		rcu_read_unlock();
1288
1289		blk_set_stacking_limits(&q->limits);
1290	}
1291
1292	blk_queue_max_hw_sectors(q, max_hw_sectors);
1293	/* This is the workaround for "bio would need to, but cannot, be split" */
1294	blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1295	blk_queue_segment_boundary(q, PAGE_SIZE-1);
1296	decide_on_discard_support(device, bdev);
1297
1298	if (b) {
1299		blk_stack_limits(&q->limits, &b->limits, 0);
1300		disk_update_readahead(device->vdisk);
1301	}
1302	fixup_write_zeroes(device, q);
1303	fixup_discard_support(device, q);
1304}
1305
1306void drbd_reconsider_queue_parameters(struct drbd_device *device, struct drbd_backing_dev *bdev, struct o_qlim *o)
1307{
1308	unsigned int now, new, local, peer;
1309
1310	now = queue_max_hw_sectors(device->rq_queue) << 9;
1311	local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
1312	peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
1313
1314	if (bdev) {
1315		local = queue_max_hw_sectors(bdev->backing_bdev->bd_disk->queue) << 9;
1316		device->local_max_bio_size = local;
1317	}
1318	local = min(local, DRBD_MAX_BIO_SIZE);
1319
1320	/* We may ignore peer limits if the peer is modern enough.
1321	   Because new from 8.3.8 onwards the peer can use multiple
1322	   BIOs for a single peer_request */
1323	if (device->state.conn >= C_WF_REPORT_PARAMS) {
1324		if (first_peer_device(device)->connection->agreed_pro_version < 94)
1325			peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1326			/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1327		else if (first_peer_device(device)->connection->agreed_pro_version == 94)
1328			peer = DRBD_MAX_SIZE_H80_PACKET;
1329		else if (first_peer_device(device)->connection->agreed_pro_version < 100)
1330			peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
1331		else
1332			peer = DRBD_MAX_BIO_SIZE;
1333
1334		/* We may later detach and re-attach on a disconnected Primary.
1335		 * Avoid this setting to jump back in that case.
1336		 * We want to store what we know the peer DRBD can handle,
1337		 * not what the peer IO backend can handle. */
1338		if (peer > device->peer_max_bio_size)
1339			device->peer_max_bio_size = peer;
1340	}
1341	new = min(local, peer);
1342
1343	if (device->state.role == R_PRIMARY && new < now)
1344		drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
1345
1346	if (new != now)
1347		drbd_info(device, "max BIO size = %u\n", new);
1348
1349	drbd_setup_queue_param(device, bdev, new, o);
1350}
1351
1352/* Starts the worker thread */
1353static void conn_reconfig_start(struct drbd_connection *connection)
1354{
1355	drbd_thread_start(&connection->worker);
1356	drbd_flush_workqueue(&connection->sender_work);
1357}
1358
1359/* if still unconfigured, stops worker again. */
1360static void conn_reconfig_done(struct drbd_connection *connection)
1361{
1362	bool stop_threads;
1363	spin_lock_irq(&connection->resource->req_lock);
1364	stop_threads = conn_all_vols_unconf(connection) &&
1365		connection->cstate == C_STANDALONE;
1366	spin_unlock_irq(&connection->resource->req_lock);
1367	if (stop_threads) {
1368		/* ack_receiver thread and ack_sender workqueue are implicitly
1369		 * stopped by receiver in conn_disconnect() */
1370		drbd_thread_stop(&connection->receiver);
1371		drbd_thread_stop(&connection->worker);
1372	}
1373}
1374
1375/* Make sure IO is suspended before calling this function(). */
1376static void drbd_suspend_al(struct drbd_device *device)
1377{
1378	int s = 0;
1379
1380	if (!lc_try_lock(device->act_log)) {
1381		drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
1382		return;
1383	}
1384
1385	drbd_al_shrink(device);
1386	spin_lock_irq(&device->resource->req_lock);
1387	if (device->state.conn < C_CONNECTED)
1388		s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
1389	spin_unlock_irq(&device->resource->req_lock);
1390	lc_unlock(device->act_log);
1391
1392	if (s)
1393		drbd_info(device, "Suspended AL updates\n");
1394}
1395
1396
1397static bool should_set_defaults(struct genl_info *info)
1398{
1399	struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
1400
1401	return 0 != (dh->flags & DRBD_GENL_F_SET_DEFAULTS);
1402}
1403
1404static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
1405{
1406	/* This is limited by 16 bit "slot" numbers,
1407	 * and by available on-disk context storage.
1408	 *
1409	 * Also (u16)~0 is special (denotes a "free" extent).
1410	 *
1411	 * One transaction occupies one 4kB on-disk block,
1412	 * we have n such blocks in the on disk ring buffer,
1413	 * the "current" transaction may fail (n-1),
1414	 * and there is 919 slot numbers context information per transaction.
1415	 *
1416	 * 72 transaction blocks amounts to more than 2**16 context slots,
1417	 * so cap there first.
1418	 */
1419	const unsigned int max_al_nr = DRBD_AL_EXTENTS_MAX;
1420	const unsigned int sufficient_on_disk =
1421		(max_al_nr + AL_CONTEXT_PER_TRANSACTION -1)
1422		/AL_CONTEXT_PER_TRANSACTION;
1423
1424	unsigned int al_size_4k = bdev->md.al_size_4k;
1425
1426	if (al_size_4k > sufficient_on_disk)
1427		return max_al_nr;
1428
1429	return (al_size_4k - 1) * AL_CONTEXT_PER_TRANSACTION;
1430}
1431
1432static bool write_ordering_changed(struct disk_conf *a, struct disk_conf *b)
1433{
1434	return	a->disk_barrier != b->disk_barrier ||
1435		a->disk_flushes != b->disk_flushes ||
1436		a->disk_drain != b->disk_drain;
1437}
1438
1439static void sanitize_disk_conf(struct drbd_device *device, struct disk_conf *disk_conf,
1440			       struct drbd_backing_dev *nbc)
1441{
1442	struct block_device *bdev = nbc->backing_bdev;
1443
1444	if (disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
1445		disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1446	if (disk_conf->al_extents > drbd_al_extents_max(nbc))
1447		disk_conf->al_extents = drbd_al_extents_max(nbc);
1448
1449	if (!bdev_max_discard_sectors(bdev)) {
1450		if (disk_conf->rs_discard_granularity) {
1451			disk_conf->rs_discard_granularity = 0; /* disable feature */
1452			drbd_info(device, "rs_discard_granularity feature disabled\n");
1453		}
1454	}
1455
1456	if (disk_conf->rs_discard_granularity) {
1457		int orig_value = disk_conf->rs_discard_granularity;
1458		sector_t discard_size = bdev_max_discard_sectors(bdev) << 9;
1459		unsigned int discard_granularity = bdev_discard_granularity(bdev);
1460		int remainder;
1461
1462		if (discard_granularity > disk_conf->rs_discard_granularity)
1463			disk_conf->rs_discard_granularity = discard_granularity;
1464
1465		remainder = disk_conf->rs_discard_granularity %
1466				discard_granularity;
1467		disk_conf->rs_discard_granularity += remainder;
1468
1469		if (disk_conf->rs_discard_granularity > discard_size)
1470			disk_conf->rs_discard_granularity = discard_size;
1471
1472		if (disk_conf->rs_discard_granularity != orig_value)
1473			drbd_info(device, "rs_discard_granularity changed to %d\n",
1474				  disk_conf->rs_discard_granularity);
1475	}
1476}
1477
1478static int disk_opts_check_al_size(struct drbd_device *device, struct disk_conf *dc)
1479{
1480	int err = -EBUSY;
1481
1482	if (device->act_log &&
1483	    device->act_log->nr_elements == dc->al_extents)
1484		return 0;
1485
1486	drbd_suspend_io(device);
1487	/* If IO completion is currently blocked, we would likely wait
1488	 * "forever" for the activity log to become unused. So we don't. */
1489	if (atomic_read(&device->ap_bio_cnt))
1490		goto out;
1491
1492	wait_event(device->al_wait, lc_try_lock(device->act_log));
1493	drbd_al_shrink(device);
1494	err = drbd_check_al_size(device, dc);
1495	lc_unlock(device->act_log);
1496	wake_up(&device->al_wait);
1497out:
1498	drbd_resume_io(device);
1499	return err;
1500}
1501
1502int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1503{
1504	struct drbd_config_context adm_ctx;
1505	enum drbd_ret_code retcode;
1506	struct drbd_device *device;
1507	struct disk_conf *new_disk_conf, *old_disk_conf;
1508	struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1509	int err;
1510	unsigned int fifo_size;
1511
1512	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1513	if (!adm_ctx.reply_skb)
1514		return retcode;
1515	if (retcode != NO_ERROR)
1516		goto finish;
1517
1518	device = adm_ctx.device;
1519	mutex_lock(&adm_ctx.resource->adm_mutex);
1520
1521	/* we also need a disk
1522	 * to change the options on */
1523	if (!get_ldev(device)) {
1524		retcode = ERR_NO_DISK;
1525		goto out;
1526	}
1527
1528	new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1529	if (!new_disk_conf) {
1530		retcode = ERR_NOMEM;
1531		goto fail;
1532	}
1533
1534	mutex_lock(&device->resource->conf_update);
1535	old_disk_conf = device->ldev->disk_conf;
1536	*new_disk_conf = *old_disk_conf;
1537	if (should_set_defaults(info))
1538		set_disk_conf_defaults(new_disk_conf);
1539
1540	err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1541	if (err && err != -ENOMSG) {
1542		retcode = ERR_MANDATORY_TAG;
1543		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1544		goto fail_unlock;
1545	}
1546
1547	if (!expect(device, new_disk_conf->resync_rate >= 1))
1548		new_disk_conf->resync_rate = 1;
1549
1550	sanitize_disk_conf(device, new_disk_conf, device->ldev);
1551
1552	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1553		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1554
1555	fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1556	if (fifo_size != device->rs_plan_s->size) {
1557		new_plan = fifo_alloc(fifo_size);
1558		if (!new_plan) {
1559			drbd_err(device, "kmalloc of fifo_buffer failed");
1560			retcode = ERR_NOMEM;
1561			goto fail_unlock;
1562		}
1563	}
1564
1565	err = disk_opts_check_al_size(device, new_disk_conf);
1566	if (err) {
1567		/* Could be just "busy". Ignore?
1568		 * Introduce dedicated error code? */
1569		drbd_msg_put_info(adm_ctx.reply_skb,
1570			"Try again without changing current al-extents setting");
1571		retcode = ERR_NOMEM;
1572		goto fail_unlock;
1573	}
1574
1575	lock_all_resources();
1576	retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1577	if (retcode == NO_ERROR) {
1578		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
1579		drbd_resync_after_changed(device);
1580	}
1581	unlock_all_resources();
1582
1583	if (retcode != NO_ERROR)
1584		goto fail_unlock;
1585
1586	if (new_plan) {
1587		old_plan = device->rs_plan_s;
1588		rcu_assign_pointer(device->rs_plan_s, new_plan);
1589	}
1590
1591	mutex_unlock(&device->resource->conf_update);
1592
1593	if (new_disk_conf->al_updates)
1594		device->ldev->md.flags &= ~MDF_AL_DISABLED;
1595	else
1596		device->ldev->md.flags |= MDF_AL_DISABLED;
1597
1598	if (new_disk_conf->md_flushes)
1599		clear_bit(MD_NO_FUA, &device->flags);
1600	else
1601		set_bit(MD_NO_FUA, &device->flags);
1602
1603	if (write_ordering_changed(old_disk_conf, new_disk_conf))
1604		drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
1605
1606	if (old_disk_conf->discard_zeroes_if_aligned !=
1607	    new_disk_conf->discard_zeroes_if_aligned)
1608		drbd_reconsider_queue_parameters(device, device->ldev, NULL);
1609
1610	drbd_md_sync(device);
1611
1612	if (device->state.conn >= C_CONNECTED) {
1613		struct drbd_peer_device *peer_device;
1614
1615		for_each_peer_device(peer_device, device)
1616			drbd_send_sync_param(peer_device);
1617	}
1618
1619	kvfree_rcu_mightsleep(old_disk_conf);
1620	kfree(old_plan);
1621	mod_timer(&device->request_timer, jiffies + HZ);
1622	goto success;
1623
1624fail_unlock:
1625	mutex_unlock(&device->resource->conf_update);
1626 fail:
1627	kfree(new_disk_conf);
1628	kfree(new_plan);
1629success:
1630	put_ldev(device);
1631 out:
1632	mutex_unlock(&adm_ctx.resource->adm_mutex);
1633 finish:
1634	drbd_adm_finish(&adm_ctx, info, retcode);
1635	return 0;
1636}
1637
1638static struct block_device *open_backing_dev(struct drbd_device *device,
1639		const char *bdev_path, void *claim_ptr, bool do_bd_link)
1640{
1641	struct block_device *bdev;
1642	int err = 0;
1643
1644	bdev = blkdev_get_by_path(bdev_path, BLK_OPEN_READ | BLK_OPEN_WRITE,
1645				  claim_ptr, NULL);
1646	if (IS_ERR(bdev)) {
1647		drbd_err(device, "open(\"%s\") failed with %ld\n",
1648				bdev_path, PTR_ERR(bdev));
1649		return bdev;
1650	}
1651
1652	if (!do_bd_link)
1653		return bdev;
1654
1655	err = bd_link_disk_holder(bdev, device->vdisk);
1656	if (err) {
1657		blkdev_put(bdev, claim_ptr);
1658		drbd_err(device, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1659				bdev_path, err);
1660		bdev = ERR_PTR(err);
1661	}
1662	return bdev;
1663}
1664
1665static int open_backing_devices(struct drbd_device *device,
1666		struct disk_conf *new_disk_conf,
1667		struct drbd_backing_dev *nbc)
1668{
1669	struct block_device *bdev;
1670
1671	bdev = open_backing_dev(device, new_disk_conf->backing_dev, device, true);
1672	if (IS_ERR(bdev))
1673		return ERR_OPEN_DISK;
1674	nbc->backing_bdev = bdev;
1675
1676	/*
1677	 * meta_dev_idx >= 0: external fixed size, possibly multiple
1678	 * drbd sharing one meta device.  TODO in that case, paranoia
1679	 * check that [md_bdev, meta_dev_idx] is not yet used by some
1680	 * other drbd minor!  (if you use drbd.conf + drbdadm, that
1681	 * should check it for you already; but if you don't, or
1682	 * someone fooled it, we need to double check here)
1683	 */
1684	bdev = open_backing_dev(device, new_disk_conf->meta_dev,
1685		/* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1686		 * if potentially shared with other drbd minors */
1687			(new_disk_conf->meta_dev_idx < 0) ? (void*)device : (void*)drbd_m_holder,
1688		/* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1689		 * as would happen with internal metadata. */
1690			(new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_FLEX_INT &&
1691			 new_disk_conf->meta_dev_idx != DRBD_MD_INDEX_INTERNAL));
1692	if (IS_ERR(bdev))
1693		return ERR_OPEN_MD_DISK;
1694	nbc->md_bdev = bdev;
1695	return NO_ERROR;
1696}
1697
1698static void close_backing_dev(struct drbd_device *device, struct block_device *bdev,
1699		void *claim_ptr, bool do_bd_unlink)
1700{
1701	if (!bdev)
1702		return;
1703	if (do_bd_unlink)
1704		bd_unlink_disk_holder(bdev, device->vdisk);
1705	blkdev_put(bdev, claim_ptr);
1706}
1707
1708void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev)
1709{
1710	if (ldev == NULL)
1711		return;
1712
1713	close_backing_dev(device, ldev->md_bdev,
1714			  ldev->md.meta_dev_idx < 0 ?
1715				(void *)device : (void *)drbd_m_holder,
1716			  ldev->md_bdev != ldev->backing_bdev);
1717	close_backing_dev(device, ldev->backing_bdev, device, true);
1718
1719	kfree(ldev->disk_conf);
1720	kfree(ldev);
1721}
1722
1723int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1724{
1725	struct drbd_config_context adm_ctx;
1726	struct drbd_device *device;
1727	struct drbd_peer_device *peer_device;
1728	struct drbd_connection *connection;
1729	int err;
1730	enum drbd_ret_code retcode;
1731	enum determine_dev_size dd;
1732	sector_t max_possible_sectors;
1733	sector_t min_md_device_sectors;
1734	struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1735	struct disk_conf *new_disk_conf = NULL;
1736	struct lru_cache *resync_lru = NULL;
1737	struct fifo_buffer *new_plan = NULL;
1738	union drbd_state ns, os;
1739	enum drbd_state_rv rv;
1740	struct net_conf *nc;
1741
1742	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
1743	if (!adm_ctx.reply_skb)
1744		return retcode;
1745	if (retcode != NO_ERROR)
1746		goto finish;
1747
1748	device = adm_ctx.device;
1749	mutex_lock(&adm_ctx.resource->adm_mutex);
1750	peer_device = first_peer_device(device);
1751	connection = peer_device->connection;
1752	conn_reconfig_start(connection);
1753
1754	/* if you want to reconfigure, please tear down first */
1755	if (device->state.disk > D_DISKLESS) {
1756		retcode = ERR_DISK_CONFIGURED;
1757		goto fail;
1758	}
1759	/* It may just now have detached because of IO error.  Make sure
1760	 * drbd_ldev_destroy is done already, we may end up here very fast,
1761	 * e.g. if someone calls attach from the on-io-error handler,
1762	 * to realize a "hot spare" feature (not that I'd recommend that) */
1763	wait_event(device->misc_wait, !test_bit(GOING_DISKLESS, &device->flags));
1764
1765	/* make sure there is no leftover from previous force-detach attempts */
1766	clear_bit(FORCE_DETACH, &device->flags);
1767	clear_bit(WAS_IO_ERROR, &device->flags);
1768	clear_bit(WAS_READ_ERROR, &device->flags);
1769
1770	/* and no leftover from previously aborted resync or verify, either */
1771	device->rs_total = 0;
1772	device->rs_failed = 0;
1773	atomic_set(&device->rs_pending_cnt, 0);
1774
1775	/* allocation not in the IO path, drbdsetup context */
1776	nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1777	if (!nbc) {
1778		retcode = ERR_NOMEM;
1779		goto fail;
1780	}
1781	spin_lock_init(&nbc->md.uuid_lock);
1782
1783	new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1784	if (!new_disk_conf) {
1785		retcode = ERR_NOMEM;
1786		goto fail;
1787	}
1788	nbc->disk_conf = new_disk_conf;
1789
1790	set_disk_conf_defaults(new_disk_conf);
1791	err = disk_conf_from_attrs(new_disk_conf, info);
1792	if (err) {
1793		retcode = ERR_MANDATORY_TAG;
1794		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
1795		goto fail;
1796	}
1797
1798	if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1799		new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1800
1801	new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1802	if (!new_plan) {
1803		retcode = ERR_NOMEM;
1804		goto fail;
1805	}
1806
1807	if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1808		retcode = ERR_MD_IDX_INVALID;
1809		goto fail;
1810	}
1811
1812	rcu_read_lock();
1813	nc = rcu_dereference(connection->net_conf);
1814	if (nc) {
1815		if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1816			rcu_read_unlock();
1817			retcode = ERR_STONITH_AND_PROT_A;
1818			goto fail;
1819		}
1820	}
1821	rcu_read_unlock();
1822
1823	retcode = open_backing_devices(device, new_disk_conf, nbc);
1824	if (retcode != NO_ERROR)
1825		goto fail;
1826
1827	if ((nbc->backing_bdev == nbc->md_bdev) !=
1828	    (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1829	     new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1830		retcode = ERR_MD_IDX_INVALID;
1831		goto fail;
1832	}
1833
1834	resync_lru = lc_create("resync", drbd_bm_ext_cache,
1835			1, 61, sizeof(struct bm_extent),
1836			offsetof(struct bm_extent, lce));
1837	if (!resync_lru) {
1838		retcode = ERR_NOMEM;
1839		goto fail;
1840	}
1841
1842	/* Read our meta data super block early.
1843	 * This also sets other on-disk offsets. */
1844	retcode = drbd_md_read(device, nbc);
1845	if (retcode != NO_ERROR)
1846		goto fail;
1847
1848	sanitize_disk_conf(device, new_disk_conf, nbc);
1849
1850	if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1851		drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
1852			(unsigned long long) drbd_get_max_capacity(nbc),
1853			(unsigned long long) new_disk_conf->disk_size);
1854		retcode = ERR_DISK_TOO_SMALL;
1855		goto fail;
1856	}
1857
1858	if (new_disk_conf->meta_dev_idx < 0) {
1859		max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1860		/* at least one MB, otherwise it does not make sense */
1861		min_md_device_sectors = (2<<10);
1862	} else {
1863		max_possible_sectors = DRBD_MAX_SECTORS;
1864		min_md_device_sectors = MD_128MB_SECT * (new_disk_conf->meta_dev_idx + 1);
1865	}
1866
1867	if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1868		retcode = ERR_MD_DISK_TOO_SMALL;
1869		drbd_warn(device, "refusing attach: md-device too small, "
1870		     "at least %llu sectors needed for this meta-disk type\n",
1871		     (unsigned long long) min_md_device_sectors);
1872		goto fail;
1873	}
1874
1875	/* Make sure the new disk is big enough
1876	 * (we may currently be R_PRIMARY with no local disk...) */
1877	if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) {
1878		retcode = ERR_DISK_TOO_SMALL;
1879		goto fail;
1880	}
1881
1882	nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1883
1884	if (nbc->known_size > max_possible_sectors) {
1885		drbd_warn(device, "==> truncating very big lower level device "
1886			"to currently maximum possible %llu sectors <==\n",
1887			(unsigned long long) max_possible_sectors);
1888		if (new_disk_conf->meta_dev_idx >= 0)
1889			drbd_warn(device, "==>> using internal or flexible "
1890				      "meta data may help <<==\n");
1891	}
1892
1893	drbd_suspend_io(device);
1894	/* also wait for the last barrier ack. */
1895	/* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1896	 * We need a way to either ignore barrier acks for barriers sent before a device
1897	 * was attached, or a way to wait for all pending barrier acks to come in.
1898	 * As barriers are counted per resource,
1899	 * we'd need to suspend io on all devices of a resource.
1900	 */
1901	wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
1902	/* and for any other previously queued work */
1903	drbd_flush_workqueue(&connection->sender_work);
1904
1905	rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
1906	retcode = (enum drbd_ret_code)rv;
1907	drbd_resume_io(device);
1908	if (rv < SS_SUCCESS)
1909		goto fail;
1910
1911	if (!get_ldev_if_state(device, D_ATTACHING))
1912		goto force_diskless;
1913
1914	if (!device->bitmap) {
1915		if (drbd_bm_init(device)) {
1916			retcode = ERR_NOMEM;
1917			goto force_diskless_dec;
1918		}
1919	}
1920
1921	if (device->state.pdsk != D_UP_TO_DATE && device->ed_uuid &&
1922	    (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
1923            (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1924		drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
1925		    (unsigned long long)device->ed_uuid);
1926		retcode = ERR_DATA_NOT_CURRENT;
1927		goto force_diskless_dec;
1928	}
1929
1930	/* Since we are diskless, fix the activity log first... */
1931	if (drbd_check_al_size(device, new_disk_conf)) {
1932		retcode = ERR_NOMEM;
1933		goto force_diskless_dec;
1934	}
1935
1936	/* Prevent shrinking of consistent devices ! */
1937	{
1938	unsigned long long nsz = drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0);
1939	unsigned long long eff = nbc->md.la_size_sect;
1940	if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && nsz < eff) {
1941		if (nsz == nbc->disk_conf->disk_size) {
1942			drbd_warn(device, "truncating a consistent device during attach (%llu < %llu)\n", nsz, eff);
1943		} else {
1944			drbd_warn(device, "refusing to truncate a consistent device (%llu < %llu)\n", nsz, eff);
1945			drbd_msg_sprintf_info(adm_ctx.reply_skb,
1946				"To-be-attached device has last effective > current size, and is consistent\n"
1947				"(%llu > %llu sectors). Refusing to attach.", eff, nsz);
1948			retcode = ERR_IMPLICIT_SHRINK;
1949			goto force_diskless_dec;
1950		}
1951	}
1952	}
1953
1954	lock_all_resources();
1955	retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
1956	if (retcode != NO_ERROR) {
1957		unlock_all_resources();
1958		goto force_diskless_dec;
1959	}
1960
1961	/* Reset the "barriers don't work" bits here, then force meta data to
1962	 * be written, to ensure we determine if barriers are supported. */
1963	if (new_disk_conf->md_flushes)
1964		clear_bit(MD_NO_FUA, &device->flags);
1965	else
1966		set_bit(MD_NO_FUA, &device->flags);
1967
1968	/* Point of no return reached.
1969	 * Devices and memory are no longer released by error cleanup below.
1970	 * now device takes over responsibility, and the state engine should
1971	 * clean it up somewhere.  */
1972	D_ASSERT(device, device->ldev == NULL);
1973	device->ldev = nbc;
1974	device->resync = resync_lru;
1975	device->rs_plan_s = new_plan;
1976	nbc = NULL;
1977	resync_lru = NULL;
1978	new_disk_conf = NULL;
1979	new_plan = NULL;
1980
1981	drbd_resync_after_changed(device);
1982	drbd_bump_write_ordering(device->resource, device->ldev, WO_BDEV_FLUSH);
1983	unlock_all_resources();
1984
1985	if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
1986		set_bit(CRASHED_PRIMARY, &device->flags);
1987	else
1988		clear_bit(CRASHED_PRIMARY, &device->flags);
1989
1990	if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
1991	    !(device->state.role == R_PRIMARY && device->resource->susp_nod))
1992		set_bit(CRASHED_PRIMARY, &device->flags);
1993
1994	device->send_cnt = 0;
1995	device->recv_cnt = 0;
1996	device->read_cnt = 0;
1997	device->writ_cnt = 0;
1998
1999	drbd_reconsider_queue_parameters(device, device->ldev, NULL);
2000
2001	/* If I am currently not R_PRIMARY,
2002	 * but meta data primary indicator is set,
2003	 * I just now recover from a hard crash,
2004	 * and have been R_PRIMARY before that crash.
2005	 *
2006	 * Now, if I had no connection before that crash
2007	 * (have been degraded R_PRIMARY), chances are that
2008	 * I won't find my peer now either.
2009	 *
2010	 * In that case, and _only_ in that case,
2011	 * we use the degr-wfc-timeout instead of the default,
2012	 * so we can automatically recover from a crash of a
2013	 * degraded but active "cluster" after a certain timeout.
2014	 */
2015	clear_bit(USE_DEGR_WFC_T, &device->flags);
2016	if (device->state.role != R_PRIMARY &&
2017	     drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
2018	    !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
2019		set_bit(USE_DEGR_WFC_T, &device->flags);
2020
2021	dd = drbd_determine_dev_size(device, 0, NULL);
2022	if (dd <= DS_ERROR) {
2023		retcode = ERR_NOMEM_BITMAP;
2024		goto force_diskless_dec;
2025	} else if (dd == DS_GREW)
2026		set_bit(RESYNC_AFTER_NEG, &device->flags);
2027
2028	if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
2029	    (test_bit(CRASHED_PRIMARY, &device->flags) &&
2030	     drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
2031		drbd_info(device, "Assuming that all blocks are out of sync "
2032		     "(aka FullSync)\n");
2033		if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2034			"set_n_write from attaching", BM_LOCKED_MASK,
2035			NULL)) {
2036			retcode = ERR_IO_MD_DISK;
2037			goto force_diskless_dec;
2038		}
2039	} else {
2040		if (drbd_bitmap_io(device, &drbd_bm_read,
2041			"read from attaching", BM_LOCKED_MASK,
2042			NULL)) {
2043			retcode = ERR_IO_MD_DISK;
2044			goto force_diskless_dec;
2045		}
2046	}
2047
2048	if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
2049		drbd_suspend_al(device); /* IO is still suspended here... */
2050
2051	spin_lock_irq(&device->resource->req_lock);
2052	os = drbd_read_state(device);
2053	ns = os;
2054	/* If MDF_CONSISTENT is not set go into inconsistent state,
2055	   otherwise investigate MDF_WasUpToDate...
2056	   If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2057	   otherwise into D_CONSISTENT state.
2058	*/
2059	if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
2060		if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
2061			ns.disk = D_CONSISTENT;
2062		else
2063			ns.disk = D_OUTDATED;
2064	} else {
2065		ns.disk = D_INCONSISTENT;
2066	}
2067
2068	if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
2069		ns.pdsk = D_OUTDATED;
2070
2071	rcu_read_lock();
2072	if (ns.disk == D_CONSISTENT &&
2073	    (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
2074		ns.disk = D_UP_TO_DATE;
2075
2076	/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2077	   MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2078	   this point, because drbd_request_state() modifies these
2079	   flags. */
2080
2081	if (rcu_dereference(device->ldev->disk_conf)->al_updates)
2082		device->ldev->md.flags &= ~MDF_AL_DISABLED;
2083	else
2084		device->ldev->md.flags |= MDF_AL_DISABLED;
2085
2086	rcu_read_unlock();
2087
2088	/* In case we are C_CONNECTED postpone any decision on the new disk
2089	   state after the negotiation phase. */
2090	if (device->state.conn == C_CONNECTED) {
2091		device->new_state_tmp.i = ns.i;
2092		ns.i = os.i;
2093		ns.disk = D_NEGOTIATING;
2094
2095		/* We expect to receive up-to-date UUIDs soon.
2096		   To avoid a race in receive_state, free p_uuid while
2097		   holding req_lock. I.e. atomic with the state change */
2098		kfree(device->p_uuid);
2099		device->p_uuid = NULL;
2100	}
2101
2102	rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
2103	spin_unlock_irq(&device->resource->req_lock);
2104
2105	if (rv < SS_SUCCESS)
2106		goto force_diskless_dec;
2107
2108	mod_timer(&device->request_timer, jiffies + HZ);
2109
2110	if (device->state.role == R_PRIMARY)
2111		device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
2112	else
2113		device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
2114
2115	drbd_md_mark_dirty(device);
2116	drbd_md_sync(device);
2117
2118	kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
2119	put_ldev(device);
2120	conn_reconfig_done(connection);
2121	mutex_unlock(&adm_ctx.resource->adm_mutex);
2122	drbd_adm_finish(&adm_ctx, info, retcode);
2123	return 0;
2124
2125 force_diskless_dec:
2126	put_ldev(device);
2127 force_diskless:
2128	drbd_force_state(device, NS(disk, D_DISKLESS));
2129	drbd_md_sync(device);
2130 fail:
2131	conn_reconfig_done(connection);
2132	if (nbc) {
2133		close_backing_dev(device, nbc->md_bdev,
2134			  nbc->disk_conf->meta_dev_idx < 0 ?
2135				(void *)device : (void *)drbd_m_holder,
2136			  nbc->md_bdev != nbc->backing_bdev);
2137		close_backing_dev(device, nbc->backing_bdev, device, true);
2138		kfree(nbc);
2139	}
2140	kfree(new_disk_conf);
2141	lc_destroy(resync_lru);
2142	kfree(new_plan);
2143	mutex_unlock(&adm_ctx.resource->adm_mutex);
2144 finish:
2145	drbd_adm_finish(&adm_ctx, info, retcode);
2146	return 0;
2147}
2148
2149static int adm_detach(struct drbd_device *device, int force)
2150{
2151	if (force) {
2152		set_bit(FORCE_DETACH, &device->flags);
2153		drbd_force_state(device, NS(disk, D_FAILED));
2154		return SS_SUCCESS;
2155	}
2156
2157	return drbd_request_detach_interruptible(device);
2158}
2159
2160/* Detaching the disk is a process in multiple stages.  First we need to lock
2161 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2162 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2163 * internal references as well.
2164 * Only then we have finally detached. */
2165int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
2166{
2167	struct drbd_config_context adm_ctx;
2168	enum drbd_ret_code retcode;
2169	struct detach_parms parms = { };
2170	int err;
2171
2172	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2173	if (!adm_ctx.reply_skb)
2174		return retcode;
2175	if (retcode != NO_ERROR)
2176		goto out;
2177
2178	if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
2179		err = detach_parms_from_attrs(&parms, info);
2180		if (err) {
2181			retcode = ERR_MANDATORY_TAG;
2182			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2183			goto out;
2184		}
2185	}
2186
2187	mutex_lock(&adm_ctx.resource->adm_mutex);
2188	retcode = adm_detach(adm_ctx.device, parms.force_detach);
2189	mutex_unlock(&adm_ctx.resource->adm_mutex);
2190out:
2191	drbd_adm_finish(&adm_ctx, info, retcode);
2192	return 0;
2193}
2194
2195static bool conn_resync_running(struct drbd_connection *connection)
2196{
2197	struct drbd_peer_device *peer_device;
2198	bool rv = false;
2199	int vnr;
2200
2201	rcu_read_lock();
2202	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2203		struct drbd_device *device = peer_device->device;
2204		if (device->state.conn == C_SYNC_SOURCE ||
2205		    device->state.conn == C_SYNC_TARGET ||
2206		    device->state.conn == C_PAUSED_SYNC_S ||
2207		    device->state.conn == C_PAUSED_SYNC_T) {
2208			rv = true;
2209			break;
2210		}
2211	}
2212	rcu_read_unlock();
2213
2214	return rv;
2215}
2216
2217static bool conn_ov_running(struct drbd_connection *connection)
2218{
2219	struct drbd_peer_device *peer_device;
2220	bool rv = false;
2221	int vnr;
2222
2223	rcu_read_lock();
2224	idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
2225		struct drbd_device *device = peer_device->device;
2226		if (device->state.conn == C_VERIFY_S ||
2227		    device->state.conn == C_VERIFY_T) {
2228			rv = true;
2229			break;
2230		}
2231	}
2232	rcu_read_unlock();
2233
2234	return rv;
2235}
2236
2237static enum drbd_ret_code
2238_check_net_options(struct drbd_connection *connection, struct net_conf *old_net_conf, struct net_conf *new_net_conf)
2239{
2240	struct drbd_peer_device *peer_device;
2241	int i;
2242
2243	if (old_net_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
2244		if (new_net_conf->wire_protocol != old_net_conf->wire_protocol)
2245			return ERR_NEED_APV_100;
2246
2247		if (new_net_conf->two_primaries != old_net_conf->two_primaries)
2248			return ERR_NEED_APV_100;
2249
2250		if (strcmp(new_net_conf->integrity_alg, old_net_conf->integrity_alg))
2251			return ERR_NEED_APV_100;
2252	}
2253
2254	if (!new_net_conf->two_primaries &&
2255	    conn_highest_role(connection) == R_PRIMARY &&
2256	    conn_highest_peer(connection) == R_PRIMARY)
2257		return ERR_NEED_ALLOW_TWO_PRI;
2258
2259	if (new_net_conf->two_primaries &&
2260	    (new_net_conf->wire_protocol != DRBD_PROT_C))
2261		return ERR_NOT_PROTO_C;
2262
2263	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2264		struct drbd_device *device = peer_device->device;
2265		if (get_ldev(device)) {
2266			enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
2267			put_ldev(device);
2268			if (new_net_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
2269				return ERR_STONITH_AND_PROT_A;
2270		}
2271		if (device->state.role == R_PRIMARY && new_net_conf->discard_my_data)
2272			return ERR_DISCARD_IMPOSSIBLE;
2273	}
2274
2275	if (new_net_conf->on_congestion != OC_BLOCK && new_net_conf->wire_protocol != DRBD_PROT_A)
2276		return ERR_CONG_NOT_PROTO_A;
2277
2278	return NO_ERROR;
2279}
2280
2281static enum drbd_ret_code
2282check_net_options(struct drbd_connection *connection, struct net_conf *new_net_conf)
2283{
2284	enum drbd_ret_code rv;
2285	struct drbd_peer_device *peer_device;
2286	int i;
2287
2288	rcu_read_lock();
2289	rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_net_conf);
2290	rcu_read_unlock();
2291
2292	/* connection->peer_devices protected by genl_lock() here */
2293	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2294		struct drbd_device *device = peer_device->device;
2295		if (!device->bitmap) {
2296			if (drbd_bm_init(device))
2297				return ERR_NOMEM;
2298		}
2299	}
2300
2301	return rv;
2302}
2303
2304struct crypto {
2305	struct crypto_shash *verify_tfm;
2306	struct crypto_shash *csums_tfm;
2307	struct crypto_shash *cram_hmac_tfm;
2308	struct crypto_shash *integrity_tfm;
2309};
2310
2311static int
2312alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
2313{
2314	if (!tfm_name[0])
2315		return NO_ERROR;
2316
2317	*tfm = crypto_alloc_shash(tfm_name, 0, 0);
2318	if (IS_ERR(*tfm)) {
2319		*tfm = NULL;
2320		return err_alg;
2321	}
2322
2323	return NO_ERROR;
2324}
2325
2326static enum drbd_ret_code
2327alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
2328{
2329	char hmac_name[CRYPTO_MAX_ALG_NAME];
2330	enum drbd_ret_code rv;
2331
2332	rv = alloc_shash(&crypto->csums_tfm, new_net_conf->csums_alg,
2333			 ERR_CSUMS_ALG);
2334	if (rv != NO_ERROR)
2335		return rv;
2336	rv = alloc_shash(&crypto->verify_tfm, new_net_conf->verify_alg,
2337			 ERR_VERIFY_ALG);
2338	if (rv != NO_ERROR)
2339		return rv;
2340	rv = alloc_shash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
2341			 ERR_INTEGRITY_ALG);
2342	if (rv != NO_ERROR)
2343		return rv;
2344	if (new_net_conf->cram_hmac_alg[0] != 0) {
2345		snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
2346			 new_net_conf->cram_hmac_alg);
2347
2348		rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
2349				 ERR_AUTH_ALG);
2350	}
2351
2352	return rv;
2353}
2354
2355static void free_crypto(struct crypto *crypto)
2356{
2357	crypto_free_shash(crypto->cram_hmac_tfm);
2358	crypto_free_shash(crypto->integrity_tfm);
2359	crypto_free_shash(crypto->csums_tfm);
2360	crypto_free_shash(crypto->verify_tfm);
2361}
2362
2363int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
2364{
2365	struct drbd_config_context adm_ctx;
2366	enum drbd_ret_code retcode;
2367	struct drbd_connection *connection;
2368	struct net_conf *old_net_conf, *new_net_conf = NULL;
2369	int err;
2370	int ovr; /* online verify running */
2371	int rsr; /* re-sync running */
2372	struct crypto crypto = { };
2373
2374	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2375	if (!adm_ctx.reply_skb)
2376		return retcode;
2377	if (retcode != NO_ERROR)
2378		goto finish;
2379
2380	connection = adm_ctx.connection;
2381	mutex_lock(&adm_ctx.resource->adm_mutex);
2382
2383	new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
2384	if (!new_net_conf) {
2385		retcode = ERR_NOMEM;
2386		goto out;
2387	}
2388
2389	conn_reconfig_start(connection);
2390
2391	mutex_lock(&connection->data.mutex);
2392	mutex_lock(&connection->resource->conf_update);
2393	old_net_conf = connection->net_conf;
2394
2395	if (!old_net_conf) {
2396		drbd_msg_put_info(adm_ctx.reply_skb, "net conf missing, try connect");
2397		retcode = ERR_INVALID_REQUEST;
2398		goto fail;
2399	}
2400
2401	*new_net_conf = *old_net_conf;
2402	if (should_set_defaults(info))
2403		set_net_conf_defaults(new_net_conf);
2404
2405	err = net_conf_from_attrs_for_change(new_net_conf, info);
2406	if (err && err != -ENOMSG) {
2407		retcode = ERR_MANDATORY_TAG;
2408		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2409		goto fail;
2410	}
2411
2412	retcode = check_net_options(connection, new_net_conf);
2413	if (retcode != NO_ERROR)
2414		goto fail;
2415
2416	/* re-sync running */
2417	rsr = conn_resync_running(connection);
2418	if (rsr && strcmp(new_net_conf->csums_alg, old_net_conf->csums_alg)) {
2419		retcode = ERR_CSUMS_RESYNC_RUNNING;
2420		goto fail;
2421	}
2422
2423	/* online verify running */
2424	ovr = conn_ov_running(connection);
2425	if (ovr && strcmp(new_net_conf->verify_alg, old_net_conf->verify_alg)) {
2426		retcode = ERR_VERIFY_RUNNING;
2427		goto fail;
2428	}
2429
2430	retcode = alloc_crypto(&crypto, new_net_conf);
2431	if (retcode != NO_ERROR)
2432		goto fail;
2433
2434	rcu_assign_pointer(connection->net_conf, new_net_conf);
2435
2436	if (!rsr) {
2437		crypto_free_shash(connection->csums_tfm);
2438		connection->csums_tfm = crypto.csums_tfm;
2439		crypto.csums_tfm = NULL;
2440	}
2441	if (!ovr) {
2442		crypto_free_shash(connection->verify_tfm);
2443		connection->verify_tfm = crypto.verify_tfm;
2444		crypto.verify_tfm = NULL;
2445	}
2446
2447	crypto_free_shash(connection->integrity_tfm);
2448	connection->integrity_tfm = crypto.integrity_tfm;
2449	if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
2450		/* Do this without trying to take connection->data.mutex again.  */
2451		__drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
2452
2453	crypto_free_shash(connection->cram_hmac_tfm);
2454	connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2455
2456	mutex_unlock(&connection->resource->conf_update);
2457	mutex_unlock(&connection->data.mutex);
2458	kvfree_rcu_mightsleep(old_net_conf);
2459
2460	if (connection->cstate >= C_WF_REPORT_PARAMS) {
2461		struct drbd_peer_device *peer_device;
2462		int vnr;
2463
2464		idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
2465			drbd_send_sync_param(peer_device);
2466	}
2467
2468	goto done;
2469
2470 fail:
2471	mutex_unlock(&connection->resource->conf_update);
2472	mutex_unlock(&connection->data.mutex);
2473	free_crypto(&crypto);
2474	kfree(new_net_conf);
2475 done:
2476	conn_reconfig_done(connection);
2477 out:
2478	mutex_unlock(&adm_ctx.resource->adm_mutex);
2479 finish:
2480	drbd_adm_finish(&adm_ctx, info, retcode);
2481	return 0;
2482}
2483
2484static void connection_to_info(struct connection_info *info,
2485			       struct drbd_connection *connection)
2486{
2487	info->conn_connection_state = connection->cstate;
2488	info->conn_role = conn_highest_peer(connection);
2489}
2490
2491static void peer_device_to_info(struct peer_device_info *info,
2492				struct drbd_peer_device *peer_device)
2493{
2494	struct drbd_device *device = peer_device->device;
2495
2496	info->peer_repl_state =
2497		max_t(enum drbd_conns, C_WF_REPORT_PARAMS, device->state.conn);
2498	info->peer_disk_state = device->state.pdsk;
2499	info->peer_resync_susp_user = device->state.user_isp;
2500	info->peer_resync_susp_peer = device->state.peer_isp;
2501	info->peer_resync_susp_dependency = device->state.aftr_isp;
2502}
2503
2504int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2505{
2506	struct connection_info connection_info;
2507	enum drbd_notification_type flags;
2508	unsigned int peer_devices = 0;
2509	struct drbd_config_context adm_ctx;
2510	struct drbd_peer_device *peer_device;
2511	struct net_conf *old_net_conf, *new_net_conf = NULL;
2512	struct crypto crypto = { };
2513	struct drbd_resource *resource;
2514	struct drbd_connection *connection;
2515	enum drbd_ret_code retcode;
2516	enum drbd_state_rv rv;
2517	int i;
2518	int err;
2519
2520	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2521
2522	if (!adm_ctx.reply_skb)
2523		return retcode;
2524	if (retcode != NO_ERROR)
2525		goto out;
2526	if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2527		drbd_msg_put_info(adm_ctx.reply_skb, "connection endpoint(s) missing");
2528		retcode = ERR_INVALID_REQUEST;
2529		goto out;
2530	}
2531
2532	/* No need for _rcu here. All reconfiguration is
2533	 * strictly serialized on genl_lock(). We are protected against
2534	 * concurrent reconfiguration/addition/deletion */
2535	for_each_resource(resource, &drbd_resources) {
2536		for_each_connection(connection, resource) {
2537			if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
2538			    !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
2539				    connection->my_addr_len)) {
2540				retcode = ERR_LOCAL_ADDR;
2541				goto out;
2542			}
2543
2544			if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
2545			    !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
2546				    connection->peer_addr_len)) {
2547				retcode = ERR_PEER_ADDR;
2548				goto out;
2549			}
2550		}
2551	}
2552
2553	mutex_lock(&adm_ctx.resource->adm_mutex);
2554	connection = first_connection(adm_ctx.resource);
2555	conn_reconfig_start(connection);
2556
2557	if (connection->cstate > C_STANDALONE) {
2558		retcode = ERR_NET_CONFIGURED;
2559		goto fail;
2560	}
2561
2562	/* allocation not in the IO path, drbdsetup / netlink process context */
2563	new_net_conf = kzalloc(sizeof(*new_net_conf), GFP_KERNEL);
2564	if (!new_net_conf) {
2565		retcode = ERR_NOMEM;
2566		goto fail;
2567	}
2568
2569	set_net_conf_defaults(new_net_conf);
2570
2571	err = net_conf_from_attrs(new_net_conf, info);
2572	if (err && err != -ENOMSG) {
2573		retcode = ERR_MANDATORY_TAG;
2574		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2575		goto fail;
2576	}
2577
2578	retcode = check_net_options(connection, new_net_conf);
2579	if (retcode != NO_ERROR)
2580		goto fail;
2581
2582	retcode = alloc_crypto(&crypto, new_net_conf);
2583	if (retcode != NO_ERROR)
2584		goto fail;
2585
2586	((char *)new_net_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2587
2588	drbd_flush_workqueue(&connection->sender_work);
2589
2590	mutex_lock(&adm_ctx.resource->conf_update);
2591	old_net_conf = connection->net_conf;
2592	if (old_net_conf) {
2593		retcode = ERR_NET_CONFIGURED;
2594		mutex_unlock(&adm_ctx.resource->conf_update);
2595		goto fail;
2596	}
2597	rcu_assign_pointer(connection->net_conf, new_net_conf);
2598
2599	conn_free_crypto(connection);
2600	connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
2601	connection->integrity_tfm = crypto.integrity_tfm;
2602	connection->csums_tfm = crypto.csums_tfm;
2603	connection->verify_tfm = crypto.verify_tfm;
2604
2605	connection->my_addr_len = nla_len(adm_ctx.my_addr);
2606	memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
2607	connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
2608	memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
2609
2610	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2611		peer_devices++;
2612	}
2613
2614	connection_to_info(&connection_info, connection);
2615	flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2616	mutex_lock(&notification_mutex);
2617	notify_connection_state(NULL, 0, connection, &connection_info, NOTIFY_CREATE | flags);
2618	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2619		struct peer_device_info peer_device_info;
2620
2621		peer_device_to_info(&peer_device_info, peer_device);
2622		flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
2623		notify_peer_device_state(NULL, 0, peer_device, &peer_device_info, NOTIFY_CREATE | flags);
2624	}
2625	mutex_unlock(&notification_mutex);
2626	mutex_unlock(&adm_ctx.resource->conf_update);
2627
2628	rcu_read_lock();
2629	idr_for_each_entry(&connection->peer_devices, peer_device, i) {
2630		struct drbd_device *device = peer_device->device;
2631		device->send_cnt = 0;
2632		device->recv_cnt = 0;
2633	}
2634	rcu_read_unlock();
2635
2636	rv = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2637
2638	conn_reconfig_done(connection);
2639	mutex_unlock(&adm_ctx.resource->adm_mutex);
2640	drbd_adm_finish(&adm_ctx, info, rv);
2641	return 0;
2642
2643fail:
2644	free_crypto(&crypto);
2645	kfree(new_net_conf);
2646
2647	conn_reconfig_done(connection);
2648	mutex_unlock(&adm_ctx.resource->adm_mutex);
2649out:
2650	drbd_adm_finish(&adm_ctx, info, retcode);
2651	return 0;
2652}
2653
2654static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
2655{
2656	enum drbd_conns cstate;
2657	enum drbd_state_rv rv;
2658
2659repeat:
2660	rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2661			force ? CS_HARD : 0);
2662
2663	switch (rv) {
2664	case SS_NOTHING_TO_DO:
2665		break;
2666	case SS_ALREADY_STANDALONE:
2667		return SS_SUCCESS;
2668	case SS_PRIMARY_NOP:
2669		/* Our state checking code wants to see the peer outdated. */
2670		rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
2671
2672		if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
2673			rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
2674
2675		break;
2676	case SS_CW_FAILED_BY_PEER:
2677		spin_lock_irq(&connection->resource->req_lock);
2678		cstate = connection->cstate;
2679		spin_unlock_irq(&connection->resource->req_lock);
2680		if (cstate <= C_WF_CONNECTION)
2681			goto repeat;
2682		/* The peer probably wants to see us outdated. */
2683		rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
2684							disk, D_OUTDATED), 0);
2685		if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2686			rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
2687					CS_HARD);
2688		}
2689		break;
2690	default:;
2691		/* no special handling necessary */
2692	}
2693
2694	if (rv >= SS_SUCCESS) {
2695		enum drbd_state_rv rv2;
2696		/* No one else can reconfigure the network while I am here.
2697		 * The state handling only uses drbd_thread_stop_nowait(),
2698		 * we want to really wait here until the receiver is no more.
2699		 */
2700		drbd_thread_stop(&connection->receiver);
2701
2702		/* Race breaker.  This additional state change request may be
2703		 * necessary, if this was a forced disconnect during a receiver
2704		 * restart.  We may have "killed" the receiver thread just
2705		 * after drbd_receiver() returned.  Typically, we should be
2706		 * C_STANDALONE already, now, and this becomes a no-op.
2707		 */
2708		rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
2709				CS_VERBOSE | CS_HARD);
2710		if (rv2 < SS_SUCCESS)
2711			drbd_err(connection,
2712				"unexpected rv2=%d in conn_try_disconnect()\n",
2713				rv2);
2714		/* Unlike in DRBD 9, the state engine has generated
2715		 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2716	}
2717	return rv;
2718}
2719
2720int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2721{
2722	struct drbd_config_context adm_ctx;
2723	struct disconnect_parms parms;
2724	struct drbd_connection *connection;
2725	enum drbd_state_rv rv;
2726	enum drbd_ret_code retcode;
2727	int err;
2728
2729	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_CONNECTION);
2730	if (!adm_ctx.reply_skb)
2731		return retcode;
2732	if (retcode != NO_ERROR)
2733		goto fail;
2734
2735	connection = adm_ctx.connection;
2736	memset(&parms, 0, sizeof(parms));
2737	if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2738		err = disconnect_parms_from_attrs(&parms, info);
2739		if (err) {
2740			retcode = ERR_MANDATORY_TAG;
2741			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2742			goto fail;
2743		}
2744	}
2745
2746	mutex_lock(&adm_ctx.resource->adm_mutex);
2747	rv = conn_try_disconnect(connection, parms.force_disconnect);
2748	mutex_unlock(&adm_ctx.resource->adm_mutex);
2749	if (rv < SS_SUCCESS) {
2750		drbd_adm_finish(&adm_ctx, info, rv);
2751		return 0;
2752	}
2753	retcode = NO_ERROR;
2754 fail:
2755	drbd_adm_finish(&adm_ctx, info, retcode);
2756	return 0;
2757}
2758
2759void resync_after_online_grow(struct drbd_device *device)
2760{
2761	int iass; /* I am sync source */
2762
2763	drbd_info(device, "Resync of new storage after online grow\n");
2764	if (device->state.role != device->state.peer)
2765		iass = (device->state.role == R_PRIMARY);
2766	else
2767		iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
2768
2769	if (iass)
2770		drbd_start_resync(device, C_SYNC_SOURCE);
2771	else
2772		_drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2773}
2774
2775int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2776{
2777	struct drbd_config_context adm_ctx;
2778	struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2779	struct resize_parms rs;
2780	struct drbd_device *device;
2781	enum drbd_ret_code retcode;
2782	enum determine_dev_size dd;
2783	bool change_al_layout = false;
2784	enum dds_flags ddsf;
2785	sector_t u_size;
2786	int err;
2787
2788	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2789	if (!adm_ctx.reply_skb)
2790		return retcode;
2791	if (retcode != NO_ERROR)
2792		goto finish;
2793
2794	mutex_lock(&adm_ctx.resource->adm_mutex);
2795	device = adm_ctx.device;
2796	if (!get_ldev(device)) {
2797		retcode = ERR_NO_DISK;
2798		goto fail;
2799	}
2800
2801	memset(&rs, 0, sizeof(struct resize_parms));
2802	rs.al_stripes = device->ldev->md.al_stripes;
2803	rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
2804	if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2805		err = resize_parms_from_attrs(&rs, info);
2806		if (err) {
2807			retcode = ERR_MANDATORY_TAG;
2808			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2809			goto fail_ldev;
2810		}
2811	}
2812
2813	if (device->state.conn > C_CONNECTED) {
2814		retcode = ERR_RESIZE_RESYNC;
2815		goto fail_ldev;
2816	}
2817
2818	if (device->state.role == R_SECONDARY &&
2819	    device->state.peer == R_SECONDARY) {
2820		retcode = ERR_NO_PRIMARY;
2821		goto fail_ldev;
2822	}
2823
2824	if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
2825		retcode = ERR_NEED_APV_93;
2826		goto fail_ldev;
2827	}
2828
2829	rcu_read_lock();
2830	u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
2831	rcu_read_unlock();
2832	if (u_size != (sector_t)rs.resize_size) {
2833		new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2834		if (!new_disk_conf) {
2835			retcode = ERR_NOMEM;
2836			goto fail_ldev;
2837		}
2838	}
2839
2840	if (device->ldev->md.al_stripes != rs.al_stripes ||
2841	    device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
2842		u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
2843
2844		if (al_size_k > (16 * 1024 * 1024)) {
2845			retcode = ERR_MD_LAYOUT_TOO_BIG;
2846			goto fail_ldev;
2847		}
2848
2849		if (al_size_k < MD_32kB_SECT/2) {
2850			retcode = ERR_MD_LAYOUT_TOO_SMALL;
2851			goto fail_ldev;
2852		}
2853
2854		if (device->state.conn != C_CONNECTED && !rs.resize_force) {
2855			retcode = ERR_MD_LAYOUT_CONNECTED;
2856			goto fail_ldev;
2857		}
2858
2859		change_al_layout = true;
2860	}
2861
2862	if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
2863		device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
2864
2865	if (new_disk_conf) {
2866		mutex_lock(&device->resource->conf_update);
2867		old_disk_conf = device->ldev->disk_conf;
2868		*new_disk_conf = *old_disk_conf;
2869		new_disk_conf->disk_size = (sector_t)rs.resize_size;
2870		rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
2871		mutex_unlock(&device->resource->conf_update);
2872		kvfree_rcu_mightsleep(old_disk_conf);
2873		new_disk_conf = NULL;
2874	}
2875
2876	ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2877	dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
2878	drbd_md_sync(device);
2879	put_ldev(device);
2880	if (dd == DS_ERROR) {
2881		retcode = ERR_NOMEM_BITMAP;
2882		goto fail;
2883	} else if (dd == DS_ERROR_SPACE_MD) {
2884		retcode = ERR_MD_LAYOUT_NO_FIT;
2885		goto fail;
2886	} else if (dd == DS_ERROR_SHRINK) {
2887		retcode = ERR_IMPLICIT_SHRINK;
2888		goto fail;
2889	}
2890
2891	if (device->state.conn == C_CONNECTED) {
2892		if (dd == DS_GREW)
2893			set_bit(RESIZE_PENDING, &device->flags);
2894
2895		drbd_send_uuids(first_peer_device(device));
2896		drbd_send_sizes(first_peer_device(device), 1, ddsf);
2897	}
2898
2899 fail:
2900	mutex_unlock(&adm_ctx.resource->adm_mutex);
2901 finish:
2902	drbd_adm_finish(&adm_ctx, info, retcode);
2903	return 0;
2904
2905 fail_ldev:
2906	put_ldev(device);
2907	kfree(new_disk_conf);
2908	goto fail;
2909}
2910
2911int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2912{
2913	struct drbd_config_context adm_ctx;
2914	enum drbd_ret_code retcode;
2915	struct res_opts res_opts;
2916	int err;
2917
2918	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
2919	if (!adm_ctx.reply_skb)
2920		return retcode;
2921	if (retcode != NO_ERROR)
2922		goto fail;
2923
2924	res_opts = adm_ctx.resource->res_opts;
2925	if (should_set_defaults(info))
2926		set_res_opts_defaults(&res_opts);
2927
2928	err = res_opts_from_attrs(&res_opts, info);
2929	if (err && err != -ENOMSG) {
2930		retcode = ERR_MANDATORY_TAG;
2931		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
2932		goto fail;
2933	}
2934
2935	mutex_lock(&adm_ctx.resource->adm_mutex);
2936	err = set_resource_options(adm_ctx.resource, &res_opts);
2937	if (err) {
2938		retcode = ERR_INVALID_REQUEST;
2939		if (err == -ENOMEM)
2940			retcode = ERR_NOMEM;
2941	}
2942	mutex_unlock(&adm_ctx.resource->adm_mutex);
2943
2944fail:
2945	drbd_adm_finish(&adm_ctx, info, retcode);
2946	return 0;
2947}
2948
2949int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2950{
2951	struct drbd_config_context adm_ctx;
2952	struct drbd_device *device;
2953	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2954
2955	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
2956	if (!adm_ctx.reply_skb)
2957		return retcode;
2958	if (retcode != NO_ERROR)
2959		goto out;
2960
2961	device = adm_ctx.device;
2962	if (!get_ldev(device)) {
2963		retcode = ERR_NO_DISK;
2964		goto out;
2965	}
2966
2967	mutex_lock(&adm_ctx.resource->adm_mutex);
2968
2969	/* If there is still bitmap IO pending, probably because of a previous
2970	 * resync just being finished, wait for it before requesting a new resync.
2971	 * Also wait for it's after_state_ch(). */
2972	drbd_suspend_io(device);
2973	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
2974	drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
2975
2976	/* If we happen to be C_STANDALONE R_SECONDARY, just change to
2977	 * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
2978	 * try to start a resync handshake as sync target for full sync.
2979	 */
2980	if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
2981		retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
2982		if (retcode >= SS_SUCCESS) {
2983			if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
2984				"set_n_write from invalidate", BM_LOCKED_MASK, NULL))
2985				retcode = ERR_IO_MD_DISK;
2986		}
2987	} else
2988		retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
2989	drbd_resume_io(device);
2990	mutex_unlock(&adm_ctx.resource->adm_mutex);
2991	put_ldev(device);
2992out:
2993	drbd_adm_finish(&adm_ctx, info, retcode);
2994	return 0;
2995}
2996
2997static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2998		union drbd_state mask, union drbd_state val)
2999{
3000	struct drbd_config_context adm_ctx;
3001	enum drbd_ret_code retcode;
3002
3003	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3004	if (!adm_ctx.reply_skb)
3005		return retcode;
3006	if (retcode != NO_ERROR)
3007		goto out;
3008
3009	mutex_lock(&adm_ctx.resource->adm_mutex);
3010	retcode = drbd_request_state(adm_ctx.device, mask, val);
3011	mutex_unlock(&adm_ctx.resource->adm_mutex);
3012out:
3013	drbd_adm_finish(&adm_ctx, info, retcode);
3014	return 0;
3015}
3016
3017static int drbd_bmio_set_susp_al(struct drbd_device *device,
3018		struct drbd_peer_device *peer_device) __must_hold(local)
3019{
3020	int rv;
3021
3022	rv = drbd_bmio_set_n_write(device, peer_device);
3023	drbd_suspend_al(device);
3024	return rv;
3025}
3026
3027int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
3028{
3029	struct drbd_config_context adm_ctx;
3030	int retcode; /* drbd_ret_code, drbd_state_rv */
3031	struct drbd_device *device;
3032
3033	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3034	if (!adm_ctx.reply_skb)
3035		return retcode;
3036	if (retcode != NO_ERROR)
3037		goto out;
3038
3039	device = adm_ctx.device;
3040	if (!get_ldev(device)) {
3041		retcode = ERR_NO_DISK;
3042		goto out;
3043	}
3044
3045	mutex_lock(&adm_ctx.resource->adm_mutex);
3046
3047	/* If there is still bitmap IO pending, probably because of a previous
3048	 * resync just being finished, wait for it before requesting a new resync.
3049	 * Also wait for it's after_state_ch(). */
3050	drbd_suspend_io(device);
3051	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
3052	drbd_flush_workqueue(&first_peer_device(device)->connection->sender_work);
3053
3054	/* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3055	 * in the bitmap.  Otherwise, try to start a resync handshake
3056	 * as sync source for full sync.
3057	 */
3058	if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
3059		/* The peer will get a resync upon connect anyways. Just make that
3060		   into a full resync. */
3061		retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
3062		if (retcode >= SS_SUCCESS) {
3063			if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
3064				"set_n_write from invalidate_peer",
3065				BM_LOCKED_SET_ALLOWED, NULL))
3066				retcode = ERR_IO_MD_DISK;
3067		}
3068	} else
3069		retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
3070	drbd_resume_io(device);
3071	mutex_unlock(&adm_ctx.resource->adm_mutex);
3072	put_ldev(device);
3073out:
3074	drbd_adm_finish(&adm_ctx, info, retcode);
3075	return 0;
3076}
3077
3078int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
3079{
3080	struct drbd_config_context adm_ctx;
3081	enum drbd_ret_code retcode;
3082
3083	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3084	if (!adm_ctx.reply_skb)
3085		return retcode;
3086	if (retcode != NO_ERROR)
3087		goto out;
3088
3089	mutex_lock(&adm_ctx.resource->adm_mutex);
3090	if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
3091		retcode = ERR_PAUSE_IS_SET;
3092	mutex_unlock(&adm_ctx.resource->adm_mutex);
3093out:
3094	drbd_adm_finish(&adm_ctx, info, retcode);
3095	return 0;
3096}
3097
3098int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
3099{
3100	struct drbd_config_context adm_ctx;
3101	union drbd_dev_state s;
3102	enum drbd_ret_code retcode;
3103
3104	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3105	if (!adm_ctx.reply_skb)
3106		return retcode;
3107	if (retcode != NO_ERROR)
3108		goto out;
3109
3110	mutex_lock(&adm_ctx.resource->adm_mutex);
3111	if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
3112		s = adm_ctx.device->state;
3113		if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
3114			retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
3115				  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
3116		} else {
3117			retcode = ERR_PAUSE_IS_CLEAR;
3118		}
3119	}
3120	mutex_unlock(&adm_ctx.resource->adm_mutex);
3121out:
3122	drbd_adm_finish(&adm_ctx, info, retcode);
3123	return 0;
3124}
3125
3126int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
3127{
3128	return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
3129}
3130
3131int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
3132{
3133	struct drbd_config_context adm_ctx;
3134	struct drbd_device *device;
3135	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3136
3137	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3138	if (!adm_ctx.reply_skb)
3139		return retcode;
3140	if (retcode != NO_ERROR)
3141		goto out;
3142
3143	mutex_lock(&adm_ctx.resource->adm_mutex);
3144	device = adm_ctx.device;
3145	if (test_bit(NEW_CUR_UUID, &device->flags)) {
3146		if (get_ldev_if_state(device, D_ATTACHING)) {
3147			drbd_uuid_new_current(device);
3148			put_ldev(device);
3149		} else {
3150			/* This is effectively a multi-stage "forced down".
3151			 * The NEW_CUR_UUID bit is supposedly only set, if we
3152			 * lost the replication connection, and are configured
3153			 * to freeze IO and wait for some fence-peer handler.
3154			 * So we still don't have a replication connection.
3155			 * And now we don't have a local disk either.  After
3156			 * resume, we will fail all pending and new IO, because
3157			 * we don't have any data anymore.  Which means we will
3158			 * eventually be able to terminate all users of this
3159			 * device, and then take it down.  By bumping the
3160			 * "effective" data uuid, we make sure that you really
3161			 * need to tear down before you reconfigure, we will
3162			 * the refuse to re-connect or re-attach (because no
3163			 * matching real data uuid exists).
3164			 */
3165			u64 val;
3166			get_random_bytes(&val, sizeof(u64));
3167			drbd_set_ed_uuid(device, val);
3168			drbd_warn(device, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3169		}
3170		clear_bit(NEW_CUR_UUID, &device->flags);
3171	}
3172	drbd_suspend_io(device);
3173	retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
3174	if (retcode == SS_SUCCESS) {
3175		if (device->state.conn < C_CONNECTED)
3176			tl_clear(first_peer_device(device)->connection);
3177		if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
3178			tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
3179	}
3180	drbd_resume_io(device);
3181	mutex_unlock(&adm_ctx.resource->adm_mutex);
3182out:
3183	drbd_adm_finish(&adm_ctx, info, retcode);
3184	return 0;
3185}
3186
3187int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
3188{
3189	return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
3190}
3191
3192static int nla_put_drbd_cfg_context(struct sk_buff *skb,
3193				    struct drbd_resource *resource,
3194				    struct drbd_connection *connection,
3195				    struct drbd_device *device)
3196{
3197	struct nlattr *nla;
3198	nla = nla_nest_start_noflag(skb, DRBD_NLA_CFG_CONTEXT);
3199	if (!nla)
3200		goto nla_put_failure;
3201	if (device &&
3202	    nla_put_u32(skb, T_ctx_volume, device->vnr))
3203		goto nla_put_failure;
3204	if (nla_put_string(skb, T_ctx_resource_name, resource->name))
3205		goto nla_put_failure;
3206	if (connection) {
3207		if (connection->my_addr_len &&
3208		    nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
3209			goto nla_put_failure;
3210		if (connection->peer_addr_len &&
3211		    nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
3212			goto nla_put_failure;
3213	}
3214	nla_nest_end(skb, nla);
3215	return 0;
3216
3217nla_put_failure:
3218	if (nla)
3219		nla_nest_cancel(skb, nla);
3220	return -EMSGSIZE;
3221}
3222
3223/*
3224 * The generic netlink dump callbacks are called outside the genl_lock(), so
3225 * they cannot use the simple attribute parsing code which uses global
3226 * attribute tables.
3227 */
3228static struct nlattr *find_cfg_context_attr(const struct nlmsghdr *nlh, int attr)
3229{
3230	const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3231	const int maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
3232	struct nlattr *nla;
3233
3234	nla = nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen),
3235		       DRBD_NLA_CFG_CONTEXT);
3236	if (!nla)
3237		return NULL;
3238	return drbd_nla_find_nested(maxtype, nla, __nla_type(attr));
3239}
3240
3241static void resource_to_info(struct resource_info *, struct drbd_resource *);
3242
3243int drbd_adm_dump_resources(struct sk_buff *skb, struct netlink_callback *cb)
3244{
3245	struct drbd_genlmsghdr *dh;
3246	struct drbd_resource *resource;
3247	struct resource_info resource_info;
3248	struct resource_statistics resource_statistics;
3249	int err;
3250
3251	rcu_read_lock();
3252	if (cb->args[0]) {
3253		for_each_resource_rcu(resource, &drbd_resources)
3254			if (resource == (struct drbd_resource *)cb->args[0])
3255				goto found_resource;
3256		err = 0;  /* resource was probably deleted */
3257		goto out;
3258	}
3259	resource = list_entry(&drbd_resources,
3260			      struct drbd_resource, resources);
3261
3262found_resource:
3263	list_for_each_entry_continue_rcu(resource, &drbd_resources, resources) {
3264		goto put_result;
3265	}
3266	err = 0;
3267	goto out;
3268
3269put_result:
3270	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3271			cb->nlh->nlmsg_seq, &drbd_genl_family,
3272			NLM_F_MULTI, DRBD_ADM_GET_RESOURCES);
3273	err = -ENOMEM;
3274	if (!dh)
3275		goto out;
3276	dh->minor = -1U;
3277	dh->ret_code = NO_ERROR;
3278	err = nla_put_drbd_cfg_context(skb, resource, NULL, NULL);
3279	if (err)
3280		goto out;
3281	err = res_opts_to_skb(skb, &resource->res_opts, !capable(CAP_SYS_ADMIN));
3282	if (err)
3283		goto out;
3284	resource_to_info(&resource_info, resource);
3285	err = resource_info_to_skb(skb, &resource_info, !capable(CAP_SYS_ADMIN));
3286	if (err)
3287		goto out;
3288	resource_statistics.res_stat_write_ordering = resource->write_ordering;
3289	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
3290	if (err)
3291		goto out;
3292	cb->args[0] = (long)resource;
3293	genlmsg_end(skb, dh);
3294	err = 0;
3295
3296out:
3297	rcu_read_unlock();
3298	if (err)
3299		return err;
3300	return skb->len;
3301}
3302
3303static void device_to_statistics(struct device_statistics *s,
3304				 struct drbd_device *device)
3305{
3306	memset(s, 0, sizeof(*s));
3307	s->dev_upper_blocked = !may_inc_ap_bio(device);
3308	if (get_ldev(device)) {
3309		struct drbd_md *md = &device->ldev->md;
3310		u64 *history_uuids = (u64 *)s->history_uuids;
3311		int n;
3312
3313		spin_lock_irq(&md->uuid_lock);
3314		s->dev_current_uuid = md->uuid[UI_CURRENT];
3315		BUILD_BUG_ON(sizeof(s->history_uuids) < UI_HISTORY_END - UI_HISTORY_START + 1);
3316		for (n = 0; n < UI_HISTORY_END - UI_HISTORY_START + 1; n++)
3317			history_uuids[n] = md->uuid[UI_HISTORY_START + n];
3318		for (; n < HISTORY_UUIDS; n++)
3319			history_uuids[n] = 0;
3320		s->history_uuids_len = HISTORY_UUIDS;
3321		spin_unlock_irq(&md->uuid_lock);
3322
3323		s->dev_disk_flags = md->flags;
3324		put_ldev(device);
3325	}
3326	s->dev_size = get_capacity(device->vdisk);
3327	s->dev_read = device->read_cnt;
3328	s->dev_write = device->writ_cnt;
3329	s->dev_al_writes = device->al_writ_cnt;
3330	s->dev_bm_writes = device->bm_writ_cnt;
3331	s->dev_upper_pending = atomic_read(&device->ap_bio_cnt);
3332	s->dev_lower_pending = atomic_read(&device->local_cnt);
3333	s->dev_al_suspended = test_bit(AL_SUSPENDED, &device->flags);
3334	s->dev_exposed_data_uuid = device->ed_uuid;
3335}
3336
3337static int put_resource_in_arg0(struct netlink_callback *cb, int holder_nr)
3338{
3339	if (cb->args[0]) {
3340		struct drbd_resource *resource =
3341			(struct drbd_resource *)cb->args[0];
3342		kref_put(&resource->kref, drbd_destroy_resource);
3343	}
3344
3345	return 0;
3346}
3347
3348int drbd_adm_dump_devices_done(struct netlink_callback *cb) {
3349	return put_resource_in_arg0(cb, 7);
3350}
3351
3352static void device_to_info(struct device_info *, struct drbd_device *);
3353
3354int drbd_adm_dump_devices(struct sk_buff *skb, struct netlink_callback *cb)
3355{
3356	struct nlattr *resource_filter;
3357	struct drbd_resource *resource;
3358	struct drbd_device *device;
3359	int minor, err, retcode;
3360	struct drbd_genlmsghdr *dh;
3361	struct device_info device_info;
3362	struct device_statistics device_statistics;
3363	struct idr *idr_to_search;
3364
3365	resource = (struct drbd_resource *)cb->args[0];
3366	if (!cb->args[0] && !cb->args[1]) {
3367		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3368		if (resource_filter) {
3369			retcode = ERR_RES_NOT_KNOWN;
3370			resource = drbd_find_resource(nla_data(resource_filter));
3371			if (!resource)
3372				goto put_result;
3373			cb->args[0] = (long)resource;
3374		}
3375	}
3376
3377	rcu_read_lock();
3378	minor = cb->args[1];
3379	idr_to_search = resource ? &resource->devices : &drbd_devices;
3380	device = idr_get_next(idr_to_search, &minor);
3381	if (!device) {
3382		err = 0;
3383		goto out;
3384	}
3385	idr_for_each_entry_continue(idr_to_search, device, minor) {
3386		retcode = NO_ERROR;
3387		goto put_result;  /* only one iteration */
3388	}
3389	err = 0;
3390	goto out;  /* no more devices */
3391
3392put_result:
3393	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3394			cb->nlh->nlmsg_seq, &drbd_genl_family,
3395			NLM_F_MULTI, DRBD_ADM_GET_DEVICES);
3396	err = -ENOMEM;
3397	if (!dh)
3398		goto out;
3399	dh->ret_code = retcode;
3400	dh->minor = -1U;
3401	if (retcode == NO_ERROR) {
3402		dh->minor = device->minor;
3403		err = nla_put_drbd_cfg_context(skb, device->resource, NULL, device);
3404		if (err)
3405			goto out;
3406		if (get_ldev(device)) {
3407			struct disk_conf *disk_conf =
3408				rcu_dereference(device->ldev->disk_conf);
3409
3410			err = disk_conf_to_skb(skb, disk_conf, !capable(CAP_SYS_ADMIN));
3411			put_ldev(device);
3412			if (err)
3413				goto out;
3414		}
3415		device_to_info(&device_info, device);
3416		err = device_info_to_skb(skb, &device_info, !capable(CAP_SYS_ADMIN));
3417		if (err)
3418			goto out;
3419
3420		device_to_statistics(&device_statistics, device);
3421		err = device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
3422		if (err)
3423			goto out;
3424		cb->args[1] = minor + 1;
3425	}
3426	genlmsg_end(skb, dh);
3427	err = 0;
3428
3429out:
3430	rcu_read_unlock();
3431	if (err)
3432		return err;
3433	return skb->len;
3434}
3435
3436int drbd_adm_dump_connections_done(struct netlink_callback *cb)
3437{
3438	return put_resource_in_arg0(cb, 6);
3439}
3440
3441enum { SINGLE_RESOURCE, ITERATE_RESOURCES };
3442
3443int drbd_adm_dump_connections(struct sk_buff *skb, struct netlink_callback *cb)
3444{
3445	struct nlattr *resource_filter;
3446	struct drbd_resource *resource = NULL, *next_resource;
3447	struct drbd_connection *connection;
3448	int err = 0, retcode;
3449	struct drbd_genlmsghdr *dh;
3450	struct connection_info connection_info;
3451	struct connection_statistics connection_statistics;
3452
3453	rcu_read_lock();
3454	resource = (struct drbd_resource *)cb->args[0];
3455	if (!cb->args[0]) {
3456		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3457		if (resource_filter) {
3458			retcode = ERR_RES_NOT_KNOWN;
3459			resource = drbd_find_resource(nla_data(resource_filter));
3460			if (!resource)
3461				goto put_result;
3462			cb->args[0] = (long)resource;
3463			cb->args[1] = SINGLE_RESOURCE;
3464		}
3465	}
3466	if (!resource) {
3467		if (list_empty(&drbd_resources))
3468			goto out;
3469		resource = list_first_entry(&drbd_resources, struct drbd_resource, resources);
3470		kref_get(&resource->kref);
3471		cb->args[0] = (long)resource;
3472		cb->args[1] = ITERATE_RESOURCES;
3473	}
3474
3475    next_resource:
3476	rcu_read_unlock();
3477	mutex_lock(&resource->conf_update);
3478	rcu_read_lock();
3479	if (cb->args[2]) {
3480		for_each_connection_rcu(connection, resource)
3481			if (connection == (struct drbd_connection *)cb->args[2])
3482				goto found_connection;
3483		/* connection was probably deleted */
3484		goto no_more_connections;
3485	}
3486	connection = list_entry(&resource->connections, struct drbd_connection, connections);
3487
3488found_connection:
3489	list_for_each_entry_continue_rcu(connection, &resource->connections, connections) {
3490		if (!has_net_conf(connection))
3491			continue;
3492		retcode = NO_ERROR;
3493		goto put_result;  /* only one iteration */
3494	}
3495
3496no_more_connections:
3497	if (cb->args[1] == ITERATE_RESOURCES) {
3498		for_each_resource_rcu(next_resource, &drbd_resources) {
3499			if (next_resource == resource)
3500				goto found_resource;
3501		}
3502		/* resource was probably deleted */
3503	}
3504	goto out;
3505
3506found_resource:
3507	list_for_each_entry_continue_rcu(next_resource, &drbd_resources, resources) {
3508		mutex_unlock(&resource->conf_update);
3509		kref_put(&resource->kref, drbd_destroy_resource);
3510		resource = next_resource;
3511		kref_get(&resource->kref);
3512		cb->args[0] = (long)resource;
3513		cb->args[2] = 0;
3514		goto next_resource;
3515	}
3516	goto out;  /* no more resources */
3517
3518put_result:
3519	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3520			cb->nlh->nlmsg_seq, &drbd_genl_family,
3521			NLM_F_MULTI, DRBD_ADM_GET_CONNECTIONS);
3522	err = -ENOMEM;
3523	if (!dh)
3524		goto out;
3525	dh->ret_code = retcode;
3526	dh->minor = -1U;
3527	if (retcode == NO_ERROR) {
3528		struct net_conf *net_conf;
3529
3530		err = nla_put_drbd_cfg_context(skb, resource, connection, NULL);
3531		if (err)
3532			goto out;
3533		net_conf = rcu_dereference(connection->net_conf);
3534		if (net_conf) {
3535			err = net_conf_to_skb(skb, net_conf, !capable(CAP_SYS_ADMIN));
3536			if (err)
3537				goto out;
3538		}
3539		connection_to_info(&connection_info, connection);
3540		err = connection_info_to_skb(skb, &connection_info, !capable(CAP_SYS_ADMIN));
3541		if (err)
3542			goto out;
3543		connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
3544		err = connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
3545		if (err)
3546			goto out;
3547		cb->args[2] = (long)connection;
3548	}
3549	genlmsg_end(skb, dh);
3550	err = 0;
3551
3552out:
3553	rcu_read_unlock();
3554	if (resource)
3555		mutex_unlock(&resource->conf_update);
3556	if (err)
3557		return err;
3558	return skb->len;
3559}
3560
3561enum mdf_peer_flag {
3562	MDF_PEER_CONNECTED =	1 << 0,
3563	MDF_PEER_OUTDATED =	1 << 1,
3564	MDF_PEER_FENCING =	1 << 2,
3565	MDF_PEER_FULL_SYNC =	1 << 3,
3566};
3567
3568static void peer_device_to_statistics(struct peer_device_statistics *s,
3569				      struct drbd_peer_device *peer_device)
3570{
3571	struct drbd_device *device = peer_device->device;
3572
3573	memset(s, 0, sizeof(*s));
3574	s->peer_dev_received = device->recv_cnt;
3575	s->peer_dev_sent = device->send_cnt;
3576	s->peer_dev_pending = atomic_read(&device->ap_pending_cnt) +
3577			      atomic_read(&device->rs_pending_cnt);
3578	s->peer_dev_unacked = atomic_read(&device->unacked_cnt);
3579	s->peer_dev_out_of_sync = drbd_bm_total_weight(device) << (BM_BLOCK_SHIFT - 9);
3580	s->peer_dev_resync_failed = device->rs_failed << (BM_BLOCK_SHIFT - 9);
3581	if (get_ldev(device)) {
3582		struct drbd_md *md = &device->ldev->md;
3583
3584		spin_lock_irq(&md->uuid_lock);
3585		s->peer_dev_bitmap_uuid = md->uuid[UI_BITMAP];
3586		spin_unlock_irq(&md->uuid_lock);
3587		s->peer_dev_flags =
3588			(drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND) ?
3589				MDF_PEER_CONNECTED : 0) +
3590			(drbd_md_test_flag(device->ldev, MDF_CONSISTENT) &&
3591			 !drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE) ?
3592				MDF_PEER_OUTDATED : 0) +
3593			/* FIXME: MDF_PEER_FENCING? */
3594			(drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ?
3595				MDF_PEER_FULL_SYNC : 0);
3596		put_ldev(device);
3597	}
3598}
3599
3600int drbd_adm_dump_peer_devices_done(struct netlink_callback *cb)
3601{
3602	return put_resource_in_arg0(cb, 9);
3603}
3604
3605int drbd_adm_dump_peer_devices(struct sk_buff *skb, struct netlink_callback *cb)
3606{
3607	struct nlattr *resource_filter;
3608	struct drbd_resource *resource;
3609	struct drbd_device *device;
3610	struct drbd_peer_device *peer_device = NULL;
3611	int minor, err, retcode;
3612	struct drbd_genlmsghdr *dh;
3613	struct idr *idr_to_search;
3614
3615	resource = (struct drbd_resource *)cb->args[0];
3616	if (!cb->args[0] && !cb->args[1]) {
3617		resource_filter = find_cfg_context_attr(cb->nlh, T_ctx_resource_name);
3618		if (resource_filter) {
3619			retcode = ERR_RES_NOT_KNOWN;
3620			resource = drbd_find_resource(nla_data(resource_filter));
3621			if (!resource)
3622				goto put_result;
3623		}
3624		cb->args[0] = (long)resource;
3625	}
3626
3627	rcu_read_lock();
3628	minor = cb->args[1];
3629	idr_to_search = resource ? &resource->devices : &drbd_devices;
3630	device = idr_find(idr_to_search, minor);
3631	if (!device) {
3632next_device:
3633		minor++;
3634		cb->args[2] = 0;
3635		device = idr_get_next(idr_to_search, &minor);
3636		if (!device) {
3637			err = 0;
3638			goto out;
3639		}
3640	}
3641	if (cb->args[2]) {
3642		for_each_peer_device(peer_device, device)
3643			if (peer_device == (struct drbd_peer_device *)cb->args[2])
3644				goto found_peer_device;
3645		/* peer device was probably deleted */
3646		goto next_device;
3647	}
3648	/* Make peer_device point to the list head (not the first entry). */
3649	peer_device = list_entry(&device->peer_devices, struct drbd_peer_device, peer_devices);
3650
3651found_peer_device:
3652	list_for_each_entry_continue_rcu(peer_device, &device->peer_devices, peer_devices) {
3653		if (!has_net_conf(peer_device->connection))
3654			continue;
3655		retcode = NO_ERROR;
3656		goto put_result;  /* only one iteration */
3657	}
3658	goto next_device;
3659
3660put_result:
3661	dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3662			cb->nlh->nlmsg_seq, &drbd_genl_family,
3663			NLM_F_MULTI, DRBD_ADM_GET_PEER_DEVICES);
3664	err = -ENOMEM;
3665	if (!dh)
3666		goto out;
3667	dh->ret_code = retcode;
3668	dh->minor = -1U;
3669	if (retcode == NO_ERROR) {
3670		struct peer_device_info peer_device_info;
3671		struct peer_device_statistics peer_device_statistics;
3672
3673		dh->minor = minor;
3674		err = nla_put_drbd_cfg_context(skb, device->resource, peer_device->connection, device);
3675		if (err)
3676			goto out;
3677		peer_device_to_info(&peer_device_info, peer_device);
3678		err = peer_device_info_to_skb(skb, &peer_device_info, !capable(CAP_SYS_ADMIN));
3679		if (err)
3680			goto out;
3681		peer_device_to_statistics(&peer_device_statistics, peer_device);
3682		err = peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
3683		if (err)
3684			goto out;
3685		cb->args[1] = minor;
3686		cb->args[2] = (long)peer_device;
3687	}
3688	genlmsg_end(skb, dh);
3689	err = 0;
3690
3691out:
3692	rcu_read_unlock();
3693	if (err)
3694		return err;
3695	return skb->len;
3696}
3697/*
3698 * Return the connection of @resource if @resource has exactly one connection.
3699 */
3700static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
3701{
3702	struct list_head *connections = &resource->connections;
3703
3704	if (list_empty(connections) || connections->next->next != connections)
3705		return NULL;
3706	return list_first_entry(&resource->connections, struct drbd_connection, connections);
3707}
3708
3709static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
3710		const struct sib_info *sib)
3711{
3712	struct drbd_resource *resource = device->resource;
3713	struct state_info *si = NULL; /* for sizeof(si->member); */
3714	struct nlattr *nla;
3715	int got_ldev;
3716	int err = 0;
3717	int exclude_sensitive;
3718
3719	/* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3720	 * to.  So we better exclude_sensitive information.
3721	 *
3722	 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3723	 * in the context of the requesting user process. Exclude sensitive
3724	 * information, unless current has superuser.
3725	 *
3726	 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3727	 * relies on the current implementation of netlink_dump(), which
3728	 * executes the dump callback successively from netlink_recvmsg(),
3729	 * always in the context of the receiving process */
3730	exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
3731
3732	got_ldev = get_ldev(device);
3733
3734	/* We need to add connection name and volume number information still.
3735	 * Minor number is in drbd_genlmsghdr. */
3736	if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
3737		goto nla_put_failure;
3738
3739	if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
3740		goto nla_put_failure;
3741
3742	rcu_read_lock();
3743	if (got_ldev) {
3744		struct disk_conf *disk_conf;
3745
3746		disk_conf = rcu_dereference(device->ldev->disk_conf);
3747		err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
3748	}
3749	if (!err) {
3750		struct net_conf *nc;
3751
3752		nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3753		if (nc)
3754			err = net_conf_to_skb(skb, nc, exclude_sensitive);
3755	}
3756	rcu_read_unlock();
3757	if (err)
3758		goto nla_put_failure;
3759
3760	nla = nla_nest_start_noflag(skb, DRBD_NLA_STATE_INFO);
3761	if (!nla)
3762		goto nla_put_failure;
3763	if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
3764	    nla_put_u32(skb, T_current_state, device->state.i) ||
3765	    nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) ||
3766	    nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) ||
3767	    nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) ||
3768	    nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) ||
3769	    nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) ||
3770	    nla_put_u64_0pad(skb, T_writ_cnt, device->writ_cnt) ||
3771	    nla_put_u64_0pad(skb, T_al_writ_cnt, device->al_writ_cnt) ||
3772	    nla_put_u64_0pad(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
3773	    nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
3774	    nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
3775	    nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
3776		goto nla_put_failure;
3777
3778	if (got_ldev) {
3779		int err;
3780
3781		spin_lock_irq(&device->ldev->md.uuid_lock);
3782		err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
3783		spin_unlock_irq(&device->ldev->md.uuid_lock);
3784
3785		if (err)
3786			goto nla_put_failure;
3787
3788		if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
3789		    nla_put_u64_0pad(skb, T_bits_total, drbd_bm_bits(device)) ||
3790		    nla_put_u64_0pad(skb, T_bits_oos,
3791				     drbd_bm_total_weight(device)))
3792			goto nla_put_failure;
3793		if (C_SYNC_SOURCE <= device->state.conn &&
3794		    C_PAUSED_SYNC_T >= device->state.conn) {
3795			if (nla_put_u64_0pad(skb, T_bits_rs_total,
3796					     device->rs_total) ||
3797			    nla_put_u64_0pad(skb, T_bits_rs_failed,
3798					     device->rs_failed))
3799				goto nla_put_failure;
3800		}
3801	}
3802
3803	if (sib) {
3804		switch(sib->sib_reason) {
3805		case SIB_SYNC_PROGRESS:
3806		case SIB_GET_STATUS_REPLY:
3807			break;
3808		case SIB_STATE_CHANGE:
3809			if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
3810			    nla_put_u32(skb, T_new_state, sib->ns.i))
3811				goto nla_put_failure;
3812			break;
3813		case SIB_HELPER_POST:
3814			if (nla_put_u32(skb, T_helper_exit_code,
3815					sib->helper_exit_code))
3816				goto nla_put_failure;
3817			fallthrough;
3818		case SIB_HELPER_PRE:
3819			if (nla_put_string(skb, T_helper, sib->helper_name))
3820				goto nla_put_failure;
3821			break;
3822		}
3823	}
3824	nla_nest_end(skb, nla);
3825
3826	if (0)
3827nla_put_failure:
3828		err = -EMSGSIZE;
3829	if (got_ldev)
3830		put_ldev(device);
3831	return err;
3832}
3833
3834int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
3835{
3836	struct drbd_config_context adm_ctx;
3837	enum drbd_ret_code retcode;
3838	int err;
3839
3840	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
3841	if (!adm_ctx.reply_skb)
3842		return retcode;
3843	if (retcode != NO_ERROR)
3844		goto out;
3845
3846	err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
3847	if (err) {
3848		nlmsg_free(adm_ctx.reply_skb);
3849		return err;
3850	}
3851out:
3852	drbd_adm_finish(&adm_ctx, info, retcode);
3853	return 0;
3854}
3855
3856static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
3857{
3858	struct drbd_device *device;
3859	struct drbd_genlmsghdr *dh;
3860	struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
3861	struct drbd_resource *resource = NULL;
3862	struct drbd_resource *tmp;
3863	unsigned volume = cb->args[1];
3864
3865	/* Open coded, deferred, iteration:
3866	 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3867	 *      connection = "first connection of resource or undefined";
3868	 *	idr_for_each_entry(&resource->devices, device, i) {
3869	 *	  ...
3870	 *	}
3871	 * }
3872	 * where resource is cb->args[0];
3873	 * and i is cb->args[1];
3874	 *
3875	 * cb->args[2] indicates if we shall loop over all resources,
3876	 * or just dump all volumes of a single resource.
3877	 *
3878	 * This may miss entries inserted after this dump started,
3879	 * or entries deleted before they are reached.
3880	 *
3881	 * We need to make sure the device won't disappear while
3882	 * we are looking at it, and revalidate our iterators
3883	 * on each iteration.
3884	 */
3885
3886	/* synchronize with conn_create()/drbd_destroy_connection() */
3887	rcu_read_lock();
3888	/* revalidate iterator position */
3889	for_each_resource_rcu(tmp, &drbd_resources) {
3890		if (pos == NULL) {
3891			/* first iteration */
3892			pos = tmp;
3893			resource = pos;
3894			break;
3895		}
3896		if (tmp == pos) {
3897			resource = pos;
3898			break;
3899		}
3900	}
3901	if (resource) {
3902next_resource:
3903		device = idr_get_next(&resource->devices, &volume);
3904		if (!device) {
3905			/* No more volumes to dump on this resource.
3906			 * Advance resource iterator. */
3907			pos = list_entry_rcu(resource->resources.next,
3908					     struct drbd_resource, resources);
3909			/* Did we dump any volume of this resource yet? */
3910			if (volume != 0) {
3911				/* If we reached the end of the list,
3912				 * or only a single resource dump was requested,
3913				 * we are done. */
3914				if (&pos->resources == &drbd_resources || cb->args[2])
3915					goto out;
3916				volume = 0;
3917				resource = pos;
3918				goto next_resource;
3919			}
3920		}
3921
3922		dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
3923				cb->nlh->nlmsg_seq, &drbd_genl_family,
3924				NLM_F_MULTI, DRBD_ADM_GET_STATUS);
3925		if (!dh)
3926			goto out;
3927
3928		if (!device) {
3929			/* This is a connection without a single volume.
3930			 * Suprisingly enough, it may have a network
3931			 * configuration. */
3932			struct drbd_connection *connection;
3933
3934			dh->minor = -1U;
3935			dh->ret_code = NO_ERROR;
3936			connection = the_only_connection(resource);
3937			if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
3938				goto cancel;
3939			if (connection) {
3940				struct net_conf *nc;
3941
3942				nc = rcu_dereference(connection->net_conf);
3943				if (nc && net_conf_to_skb(skb, nc, 1) != 0)
3944					goto cancel;
3945			}
3946			goto done;
3947		}
3948
3949		D_ASSERT(device, device->vnr == volume);
3950		D_ASSERT(device, device->resource == resource);
3951
3952		dh->minor = device_to_minor(device);
3953		dh->ret_code = NO_ERROR;
3954
3955		if (nla_put_status_info(skb, device, NULL)) {
3956cancel:
3957			genlmsg_cancel(skb, dh);
3958			goto out;
3959		}
3960done:
3961		genlmsg_end(skb, dh);
3962	}
3963
3964out:
3965	rcu_read_unlock();
3966	/* where to start the next iteration */
3967	cb->args[0] = (long)pos;
3968	cb->args[1] = (pos == resource) ? volume + 1 : 0;
3969
3970	/* No more resources/volumes/minors found results in an empty skb.
3971	 * Which will terminate the dump. */
3972        return skb->len;
3973}
3974
3975/*
3976 * Request status of all resources, or of all volumes within a single resource.
3977 *
3978 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3979 * Which means we cannot use the family->attrbuf or other such members, because
3980 * dump is NOT protected by the genl_lock().  During dump, we only have access
3981 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3982 *
3983 * Once things are setup properly, we call into get_one_status().
3984 */
3985int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
3986{
3987	const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
3988	struct nlattr *nla;
3989	const char *resource_name;
3990	struct drbd_resource *resource;
3991	int maxtype;
3992
3993	/* Is this a followup call? */
3994	if (cb->args[0]) {
3995		/* ... of a single resource dump,
3996		 * and the resource iterator has been advanced already? */
3997		if (cb->args[2] && cb->args[2] != cb->args[0])
3998			return 0; /* DONE. */
3999		goto dump;
4000	}
4001
4002	/* First call (from netlink_dump_start).  We need to figure out
4003	 * which resource(s) the user wants us to dump. */
4004	nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
4005			nlmsg_attrlen(cb->nlh, hdrlen),
4006			DRBD_NLA_CFG_CONTEXT);
4007
4008	/* No explicit context given.  Dump all. */
4009	if (!nla)
4010		goto dump;
4011	maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
4012	nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
4013	if (IS_ERR(nla))
4014		return PTR_ERR(nla);
4015	/* context given, but no name present? */
4016	if (!nla)
4017		return -EINVAL;
4018	resource_name = nla_data(nla);
4019	if (!*resource_name)
4020		return -ENODEV;
4021	resource = drbd_find_resource(resource_name);
4022	if (!resource)
4023		return -ENODEV;
4024
4025	kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
4026
4027	/* prime iterators, and set "filter" mode mark:
4028	 * only dump this connection. */
4029	cb->args[0] = (long)resource;
4030	/* cb->args[1] = 0; passed in this way. */
4031	cb->args[2] = (long)resource;
4032
4033dump:
4034	return get_one_status(skb, cb);
4035}
4036
4037int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
4038{
4039	struct drbd_config_context adm_ctx;
4040	enum drbd_ret_code retcode;
4041	struct timeout_parms tp;
4042	int err;
4043
4044	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4045	if (!adm_ctx.reply_skb)
4046		return retcode;
4047	if (retcode != NO_ERROR)
4048		goto out;
4049
4050	tp.timeout_type =
4051		adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
4052		test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
4053		UT_DEFAULT;
4054
4055	err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
4056	if (err) {
4057		nlmsg_free(adm_ctx.reply_skb);
4058		return err;
4059	}
4060out:
4061	drbd_adm_finish(&adm_ctx, info, retcode);
4062	return 0;
4063}
4064
4065int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
4066{
4067	struct drbd_config_context adm_ctx;
4068	struct drbd_device *device;
4069	enum drbd_ret_code retcode;
4070	struct start_ov_parms parms;
4071
4072	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4073	if (!adm_ctx.reply_skb)
4074		return retcode;
4075	if (retcode != NO_ERROR)
4076		goto out;
4077
4078	device = adm_ctx.device;
4079
4080	/* resume from last known position, if possible */
4081	parms.ov_start_sector = device->ov_start_sector;
4082	parms.ov_stop_sector = ULLONG_MAX;
4083	if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
4084		int err = start_ov_parms_from_attrs(&parms, info);
4085		if (err) {
4086			retcode = ERR_MANDATORY_TAG;
4087			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4088			goto out;
4089		}
4090	}
4091	mutex_lock(&adm_ctx.resource->adm_mutex);
4092
4093	/* w_make_ov_request expects position to be aligned */
4094	device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
4095	device->ov_stop_sector = parms.ov_stop_sector;
4096
4097	/* If there is still bitmap IO pending, e.g. previous resync or verify
4098	 * just being finished, wait for it before requesting a new resync. */
4099	drbd_suspend_io(device);
4100	wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
4101	retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
4102	drbd_resume_io(device);
4103
4104	mutex_unlock(&adm_ctx.resource->adm_mutex);
4105out:
4106	drbd_adm_finish(&adm_ctx, info, retcode);
4107	return 0;
4108}
4109
4110
4111int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
4112{
4113	struct drbd_config_context adm_ctx;
4114	struct drbd_device *device;
4115	enum drbd_ret_code retcode;
4116	int skip_initial_sync = 0;
4117	int err;
4118	struct new_c_uuid_parms args;
4119
4120	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4121	if (!adm_ctx.reply_skb)
4122		return retcode;
4123	if (retcode != NO_ERROR)
4124		goto out_nolock;
4125
4126	device = adm_ctx.device;
4127	memset(&args, 0, sizeof(args));
4128	if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
4129		err = new_c_uuid_parms_from_attrs(&args, info);
4130		if (err) {
4131			retcode = ERR_MANDATORY_TAG;
4132			drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4133			goto out_nolock;
4134		}
4135	}
4136
4137	mutex_lock(&adm_ctx.resource->adm_mutex);
4138	mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
4139
4140	if (!get_ldev(device)) {
4141		retcode = ERR_NO_DISK;
4142		goto out;
4143	}
4144
4145	/* this is "skip initial sync", assume to be clean */
4146	if (device->state.conn == C_CONNECTED &&
4147	    first_peer_device(device)->connection->agreed_pro_version >= 90 &&
4148	    device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
4149		drbd_info(device, "Preparing to skip initial sync\n");
4150		skip_initial_sync = 1;
4151	} else if (device->state.conn != C_STANDALONE) {
4152		retcode = ERR_CONNECTED;
4153		goto out_dec;
4154	}
4155
4156	drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
4157	drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
4158
4159	if (args.clear_bm) {
4160		err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
4161			"clear_n_write from new_c_uuid", BM_LOCKED_MASK, NULL);
4162		if (err) {
4163			drbd_err(device, "Writing bitmap failed with %d\n", err);
4164			retcode = ERR_IO_MD_DISK;
4165		}
4166		if (skip_initial_sync) {
4167			drbd_send_uuids_skip_initial_sync(first_peer_device(device));
4168			_drbd_uuid_set(device, UI_BITMAP, 0);
4169			drbd_print_uuids(device, "cleared bitmap UUID");
4170			spin_lock_irq(&device->resource->req_lock);
4171			_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
4172					CS_VERBOSE, NULL);
4173			spin_unlock_irq(&device->resource->req_lock);
4174		}
4175	}
4176
4177	drbd_md_sync(device);
4178out_dec:
4179	put_ldev(device);
4180out:
4181	mutex_unlock(device->state_mutex);
4182	mutex_unlock(&adm_ctx.resource->adm_mutex);
4183out_nolock:
4184	drbd_adm_finish(&adm_ctx, info, retcode);
4185	return 0;
4186}
4187
4188static enum drbd_ret_code
4189drbd_check_resource_name(struct drbd_config_context *adm_ctx)
4190{
4191	const char *name = adm_ctx->resource_name;
4192	if (!name || !name[0]) {
4193		drbd_msg_put_info(adm_ctx->reply_skb, "resource name missing");
4194		return ERR_MANDATORY_TAG;
4195	}
4196	/* if we want to use these in sysfs/configfs/debugfs some day,
4197	 * we must not allow slashes */
4198	if (strchr(name, '/')) {
4199		drbd_msg_put_info(adm_ctx->reply_skb, "invalid resource name");
4200		return ERR_INVALID_REQUEST;
4201	}
4202	return NO_ERROR;
4203}
4204
4205static void resource_to_info(struct resource_info *info,
4206			     struct drbd_resource *resource)
4207{
4208	info->res_role = conn_highest_role(first_connection(resource));
4209	info->res_susp = resource->susp;
4210	info->res_susp_nod = resource->susp_nod;
4211	info->res_susp_fen = resource->susp_fen;
4212}
4213
4214int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
4215{
4216	struct drbd_connection *connection;
4217	struct drbd_config_context adm_ctx;
4218	enum drbd_ret_code retcode;
4219	struct res_opts res_opts;
4220	int err;
4221
4222	retcode = drbd_adm_prepare(&adm_ctx, skb, info, 0);
4223	if (!adm_ctx.reply_skb)
4224		return retcode;
4225	if (retcode != NO_ERROR)
4226		goto out;
4227
4228	set_res_opts_defaults(&res_opts);
4229	err = res_opts_from_attrs(&res_opts, info);
4230	if (err && err != -ENOMSG) {
4231		retcode = ERR_MANDATORY_TAG;
4232		drbd_msg_put_info(adm_ctx.reply_skb, from_attrs_err_to_txt(err));
4233		goto out;
4234	}
4235
4236	retcode = drbd_check_resource_name(&adm_ctx);
4237	if (retcode != NO_ERROR)
4238		goto out;
4239
4240	if (adm_ctx.resource) {
4241		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
4242			retcode = ERR_INVALID_REQUEST;
4243			drbd_msg_put_info(adm_ctx.reply_skb, "resource exists");
4244		}
4245		/* else: still NO_ERROR */
4246		goto out;
4247	}
4248
4249	/* not yet safe for genl_family.parallel_ops */
4250	mutex_lock(&resources_mutex);
4251	connection = conn_create(adm_ctx.resource_name, &res_opts);
4252	mutex_unlock(&resources_mutex);
4253
4254	if (connection) {
4255		struct resource_info resource_info;
4256
4257		mutex_lock(&notification_mutex);
4258		resource_to_info(&resource_info, connection->resource);
4259		notify_resource_state(NULL, 0, connection->resource,
4260				      &resource_info, NOTIFY_CREATE);
4261		mutex_unlock(&notification_mutex);
4262	} else
4263		retcode = ERR_NOMEM;
4264
4265out:
4266	drbd_adm_finish(&adm_ctx, info, retcode);
4267	return 0;
4268}
4269
4270static void device_to_info(struct device_info *info,
4271			   struct drbd_device *device)
4272{
4273	info->dev_disk_state = device->state.disk;
4274}
4275
4276
4277int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
4278{
4279	struct drbd_config_context adm_ctx;
4280	struct drbd_genlmsghdr *dh = genl_info_userhdr(info);
4281	enum drbd_ret_code retcode;
4282
4283	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4284	if (!adm_ctx.reply_skb)
4285		return retcode;
4286	if (retcode != NO_ERROR)
4287		goto out;
4288
4289	if (dh->minor > MINORMASK) {
4290		drbd_msg_put_info(adm_ctx.reply_skb, "requested minor out of range");
4291		retcode = ERR_INVALID_REQUEST;
4292		goto out;
4293	}
4294	if (adm_ctx.volume > DRBD_VOLUME_MAX) {
4295		drbd_msg_put_info(adm_ctx.reply_skb, "requested volume id out of range");
4296		retcode = ERR_INVALID_REQUEST;
4297		goto out;
4298	}
4299
4300	/* drbd_adm_prepare made sure already
4301	 * that first_peer_device(device)->connection and device->vnr match the request. */
4302	if (adm_ctx.device) {
4303		if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
4304			retcode = ERR_MINOR_OR_VOLUME_EXISTS;
4305		/* else: still NO_ERROR */
4306		goto out;
4307	}
4308
4309	mutex_lock(&adm_ctx.resource->adm_mutex);
4310	retcode = drbd_create_device(&adm_ctx, dh->minor);
4311	if (retcode == NO_ERROR) {
4312		struct drbd_device *device;
4313		struct drbd_peer_device *peer_device;
4314		struct device_info info;
4315		unsigned int peer_devices = 0;
4316		enum drbd_notification_type flags;
4317
4318		device = minor_to_device(dh->minor);
4319		for_each_peer_device(peer_device, device) {
4320			if (!has_net_conf(peer_device->connection))
4321				continue;
4322			peer_devices++;
4323		}
4324
4325		device_to_info(&info, device);
4326		mutex_lock(&notification_mutex);
4327		flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4328		notify_device_state(NULL, 0, device, &info, NOTIFY_CREATE | flags);
4329		for_each_peer_device(peer_device, device) {
4330			struct peer_device_info peer_device_info;
4331
4332			if (!has_net_conf(peer_device->connection))
4333				continue;
4334			peer_device_to_info(&peer_device_info, peer_device);
4335			flags = (peer_devices--) ? NOTIFY_CONTINUES : 0;
4336			notify_peer_device_state(NULL, 0, peer_device, &peer_device_info,
4337						 NOTIFY_CREATE | flags);
4338		}
4339		mutex_unlock(&notification_mutex);
4340	}
4341	mutex_unlock(&adm_ctx.resource->adm_mutex);
4342out:
4343	drbd_adm_finish(&adm_ctx, info, retcode);
4344	return 0;
4345}
4346
4347static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
4348{
4349	struct drbd_peer_device *peer_device;
4350
4351	if (device->state.disk == D_DISKLESS &&
4352	    /* no need to be device->state.conn == C_STANDALONE &&
4353	     * we may want to delete a minor from a live replication group.
4354	     */
4355	    device->state.role == R_SECONDARY) {
4356		struct drbd_connection *connection =
4357			first_connection(device->resource);
4358
4359		_drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
4360				    CS_VERBOSE + CS_WAIT_COMPLETE);
4361
4362		/* If the state engine hasn't stopped the sender thread yet, we
4363		 * need to flush the sender work queue before generating the
4364		 * DESTROY events here. */
4365		if (get_t_state(&connection->worker) == RUNNING)
4366			drbd_flush_workqueue(&connection->sender_work);
4367
4368		mutex_lock(&notification_mutex);
4369		for_each_peer_device(peer_device, device) {
4370			if (!has_net_conf(peer_device->connection))
4371				continue;
4372			notify_peer_device_state(NULL, 0, peer_device, NULL,
4373						 NOTIFY_DESTROY | NOTIFY_CONTINUES);
4374		}
4375		notify_device_state(NULL, 0, device, NULL, NOTIFY_DESTROY);
4376		mutex_unlock(&notification_mutex);
4377
4378		drbd_delete_device(device);
4379		return NO_ERROR;
4380	} else
4381		return ERR_MINOR_CONFIGURED;
4382}
4383
4384int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
4385{
4386	struct drbd_config_context adm_ctx;
4387	enum drbd_ret_code retcode;
4388
4389	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_MINOR);
4390	if (!adm_ctx.reply_skb)
4391		return retcode;
4392	if (retcode != NO_ERROR)
4393		goto out;
4394
4395	mutex_lock(&adm_ctx.resource->adm_mutex);
4396	retcode = adm_del_minor(adm_ctx.device);
4397	mutex_unlock(&adm_ctx.resource->adm_mutex);
4398out:
4399	drbd_adm_finish(&adm_ctx, info, retcode);
4400	return 0;
4401}
4402
4403static int adm_del_resource(struct drbd_resource *resource)
4404{
4405	struct drbd_connection *connection;
4406
4407	for_each_connection(connection, resource) {
4408		if (connection->cstate > C_STANDALONE)
4409			return ERR_NET_CONFIGURED;
4410	}
4411	if (!idr_is_empty(&resource->devices))
4412		return ERR_RES_IN_USE;
4413
4414	/* The state engine has stopped the sender thread, so we don't
4415	 * need to flush the sender work queue before generating the
4416	 * DESTROY event here. */
4417	mutex_lock(&notification_mutex);
4418	notify_resource_state(NULL, 0, resource, NULL, NOTIFY_DESTROY);
4419	mutex_unlock(&notification_mutex);
4420
4421	mutex_lock(&resources_mutex);
4422	list_del_rcu(&resource->resources);
4423	mutex_unlock(&resources_mutex);
4424	/* Make sure all threads have actually stopped: state handling only
4425	 * does drbd_thread_stop_nowait(). */
4426	list_for_each_entry(connection, &resource->connections, connections)
4427		drbd_thread_stop(&connection->worker);
4428	synchronize_rcu();
4429	drbd_free_resource(resource);
4430	return NO_ERROR;
4431}
4432
4433int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
4434{
4435	struct drbd_config_context adm_ctx;
4436	struct drbd_resource *resource;
4437	struct drbd_connection *connection;
4438	struct drbd_device *device;
4439	int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4440	unsigned i;
4441
4442	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4443	if (!adm_ctx.reply_skb)
4444		return retcode;
4445	if (retcode != NO_ERROR)
4446		goto finish;
4447
4448	resource = adm_ctx.resource;
4449	mutex_lock(&resource->adm_mutex);
4450	/* demote */
4451	for_each_connection(connection, resource) {
4452		struct drbd_peer_device *peer_device;
4453
4454		idr_for_each_entry(&connection->peer_devices, peer_device, i) {
4455			retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
4456			if (retcode < SS_SUCCESS) {
4457				drbd_msg_put_info(adm_ctx.reply_skb, "failed to demote");
4458				goto out;
4459			}
4460		}
4461
4462		retcode = conn_try_disconnect(connection, 0);
4463		if (retcode < SS_SUCCESS) {
4464			drbd_msg_put_info(adm_ctx.reply_skb, "failed to disconnect");
4465			goto out;
4466		}
4467	}
4468
4469	/* detach */
4470	idr_for_each_entry(&resource->devices, device, i) {
4471		retcode = adm_detach(device, 0);
4472		if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
4473			drbd_msg_put_info(adm_ctx.reply_skb, "failed to detach");
4474			goto out;
4475		}
4476	}
4477
4478	/* delete volumes */
4479	idr_for_each_entry(&resource->devices, device, i) {
4480		retcode = adm_del_minor(device);
4481		if (retcode != NO_ERROR) {
4482			/* "can not happen" */
4483			drbd_msg_put_info(adm_ctx.reply_skb, "failed to delete volume");
4484			goto out;
4485		}
4486	}
4487
4488	retcode = adm_del_resource(resource);
4489out:
4490	mutex_unlock(&resource->adm_mutex);
4491finish:
4492	drbd_adm_finish(&adm_ctx, info, retcode);
4493	return 0;
4494}
4495
4496int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
4497{
4498	struct drbd_config_context adm_ctx;
4499	struct drbd_resource *resource;
4500	enum drbd_ret_code retcode;
4501
4502	retcode = drbd_adm_prepare(&adm_ctx, skb, info, DRBD_ADM_NEED_RESOURCE);
4503	if (!adm_ctx.reply_skb)
4504		return retcode;
4505	if (retcode != NO_ERROR)
4506		goto finish;
4507	resource = adm_ctx.resource;
4508
4509	mutex_lock(&resource->adm_mutex);
4510	retcode = adm_del_resource(resource);
4511	mutex_unlock(&resource->adm_mutex);
4512finish:
4513	drbd_adm_finish(&adm_ctx, info, retcode);
4514	return 0;
4515}
4516
4517void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
4518{
4519	struct sk_buff *msg;
4520	struct drbd_genlmsghdr *d_out;
4521	unsigned seq;
4522	int err = -ENOMEM;
4523
4524	seq = atomic_inc_return(&drbd_genl_seq);
4525	msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4526	if (!msg)
4527		goto failed;
4528
4529	err = -EMSGSIZE;
4530	d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
4531	if (!d_out) /* cannot happen, but anyways. */
4532		goto nla_put_failure;
4533	d_out->minor = device_to_minor(device);
4534	d_out->ret_code = NO_ERROR;
4535
4536	if (nla_put_status_info(msg, device, sib))
4537		goto nla_put_failure;
4538	genlmsg_end(msg, d_out);
4539	err = drbd_genl_multicast_events(msg, GFP_NOWAIT);
4540	/* msg has been consumed or freed in netlink_broadcast() */
4541	if (err && err != -ESRCH)
4542		goto failed;
4543
4544	return;
4545
4546nla_put_failure:
4547	nlmsg_free(msg);
4548failed:
4549	drbd_err(device, "Error %d while broadcasting event. "
4550			"Event seq:%u sib_reason:%u\n",
4551			err, seq, sib->sib_reason);
4552}
4553
4554static int nla_put_notification_header(struct sk_buff *msg,
4555				       enum drbd_notification_type type)
4556{
4557	struct drbd_notification_header nh = {
4558		.nh_type = type,
4559	};
4560
4561	return drbd_notification_header_to_skb(msg, &nh, true);
4562}
4563
4564int notify_resource_state(struct sk_buff *skb,
4565			   unsigned int seq,
4566			   struct drbd_resource *resource,
4567			   struct resource_info *resource_info,
4568			   enum drbd_notification_type type)
4569{
4570	struct resource_statistics resource_statistics;
4571	struct drbd_genlmsghdr *dh;
4572	bool multicast = false;
4573	int err;
4574
4575	if (!skb) {
4576		seq = atomic_inc_return(&notify_genl_seq);
4577		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4578		err = -ENOMEM;
4579		if (!skb)
4580			goto failed;
4581		multicast = true;
4582	}
4583
4584	err = -EMSGSIZE;
4585	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_RESOURCE_STATE);
4586	if (!dh)
4587		goto nla_put_failure;
4588	dh->minor = -1U;
4589	dh->ret_code = NO_ERROR;
4590	if (nla_put_drbd_cfg_context(skb, resource, NULL, NULL) ||
4591	    nla_put_notification_header(skb, type) ||
4592	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4593	     resource_info_to_skb(skb, resource_info, true)))
4594		goto nla_put_failure;
4595	resource_statistics.res_stat_write_ordering = resource->write_ordering;
4596	err = resource_statistics_to_skb(skb, &resource_statistics, !capable(CAP_SYS_ADMIN));
4597	if (err)
4598		goto nla_put_failure;
4599	genlmsg_end(skb, dh);
4600	if (multicast) {
4601		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4602		/* skb has been consumed or freed in netlink_broadcast() */
4603		if (err && err != -ESRCH)
4604			goto failed;
4605	}
4606	return 0;
4607
4608nla_put_failure:
4609	nlmsg_free(skb);
4610failed:
4611	drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4612			err, seq);
4613	return err;
4614}
4615
4616int notify_device_state(struct sk_buff *skb,
4617			 unsigned int seq,
4618			 struct drbd_device *device,
4619			 struct device_info *device_info,
4620			 enum drbd_notification_type type)
4621{
4622	struct device_statistics device_statistics;
4623	struct drbd_genlmsghdr *dh;
4624	bool multicast = false;
4625	int err;
4626
4627	if (!skb) {
4628		seq = atomic_inc_return(&notify_genl_seq);
4629		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4630		err = -ENOMEM;
4631		if (!skb)
4632			goto failed;
4633		multicast = true;
4634	}
4635
4636	err = -EMSGSIZE;
4637	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_DEVICE_STATE);
4638	if (!dh)
4639		goto nla_put_failure;
4640	dh->minor = device->minor;
4641	dh->ret_code = NO_ERROR;
4642	if (nla_put_drbd_cfg_context(skb, device->resource, NULL, device) ||
4643	    nla_put_notification_header(skb, type) ||
4644	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4645	     device_info_to_skb(skb, device_info, true)))
4646		goto nla_put_failure;
4647	device_to_statistics(&device_statistics, device);
4648	device_statistics_to_skb(skb, &device_statistics, !capable(CAP_SYS_ADMIN));
4649	genlmsg_end(skb, dh);
4650	if (multicast) {
4651		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4652		/* skb has been consumed or freed in netlink_broadcast() */
4653		if (err && err != -ESRCH)
4654			goto failed;
4655	}
4656	return 0;
4657
4658nla_put_failure:
4659	nlmsg_free(skb);
4660failed:
4661	drbd_err(device, "Error %d while broadcasting event. Event seq:%u\n",
4662		 err, seq);
4663	return err;
4664}
4665
4666int notify_connection_state(struct sk_buff *skb,
4667			     unsigned int seq,
4668			     struct drbd_connection *connection,
4669			     struct connection_info *connection_info,
4670			     enum drbd_notification_type type)
4671{
4672	struct connection_statistics connection_statistics;
4673	struct drbd_genlmsghdr *dh;
4674	bool multicast = false;
4675	int err;
4676
4677	if (!skb) {
4678		seq = atomic_inc_return(&notify_genl_seq);
4679		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4680		err = -ENOMEM;
4681		if (!skb)
4682			goto failed;
4683		multicast = true;
4684	}
4685
4686	err = -EMSGSIZE;
4687	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_CONNECTION_STATE);
4688	if (!dh)
4689		goto nla_put_failure;
4690	dh->minor = -1U;
4691	dh->ret_code = NO_ERROR;
4692	if (nla_put_drbd_cfg_context(skb, connection->resource, connection, NULL) ||
4693	    nla_put_notification_header(skb, type) ||
4694	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4695	     connection_info_to_skb(skb, connection_info, true)))
4696		goto nla_put_failure;
4697	connection_statistics.conn_congested = test_bit(NET_CONGESTED, &connection->flags);
4698	connection_statistics_to_skb(skb, &connection_statistics, !capable(CAP_SYS_ADMIN));
4699	genlmsg_end(skb, dh);
4700	if (multicast) {
4701		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4702		/* skb has been consumed or freed in netlink_broadcast() */
4703		if (err && err != -ESRCH)
4704			goto failed;
4705	}
4706	return 0;
4707
4708nla_put_failure:
4709	nlmsg_free(skb);
4710failed:
4711	drbd_err(connection, "Error %d while broadcasting event. Event seq:%u\n",
4712		 err, seq);
4713	return err;
4714}
4715
4716int notify_peer_device_state(struct sk_buff *skb,
4717			      unsigned int seq,
4718			      struct drbd_peer_device *peer_device,
4719			      struct peer_device_info *peer_device_info,
4720			      enum drbd_notification_type type)
4721{
4722	struct peer_device_statistics peer_device_statistics;
4723	struct drbd_resource *resource = peer_device->device->resource;
4724	struct drbd_genlmsghdr *dh;
4725	bool multicast = false;
4726	int err;
4727
4728	if (!skb) {
4729		seq = atomic_inc_return(&notify_genl_seq);
4730		skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4731		err = -ENOMEM;
4732		if (!skb)
4733			goto failed;
4734		multicast = true;
4735	}
4736
4737	err = -EMSGSIZE;
4738	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_PEER_DEVICE_STATE);
4739	if (!dh)
4740		goto nla_put_failure;
4741	dh->minor = -1U;
4742	dh->ret_code = NO_ERROR;
4743	if (nla_put_drbd_cfg_context(skb, resource, peer_device->connection, peer_device->device) ||
4744	    nla_put_notification_header(skb, type) ||
4745	    ((type & ~NOTIFY_FLAGS) != NOTIFY_DESTROY &&
4746	     peer_device_info_to_skb(skb, peer_device_info, true)))
4747		goto nla_put_failure;
4748	peer_device_to_statistics(&peer_device_statistics, peer_device);
4749	peer_device_statistics_to_skb(skb, &peer_device_statistics, !capable(CAP_SYS_ADMIN));
4750	genlmsg_end(skb, dh);
4751	if (multicast) {
4752		err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4753		/* skb has been consumed or freed in netlink_broadcast() */
4754		if (err && err != -ESRCH)
4755			goto failed;
4756	}
4757	return 0;
4758
4759nla_put_failure:
4760	nlmsg_free(skb);
4761failed:
4762	drbd_err(peer_device, "Error %d while broadcasting event. Event seq:%u\n",
4763		 err, seq);
4764	return err;
4765}
4766
4767void notify_helper(enum drbd_notification_type type,
4768		   struct drbd_device *device, struct drbd_connection *connection,
4769		   const char *name, int status)
4770{
4771	struct drbd_resource *resource = device ? device->resource : connection->resource;
4772	struct drbd_helper_info helper_info;
4773	unsigned int seq = atomic_inc_return(&notify_genl_seq);
4774	struct sk_buff *skb = NULL;
4775	struct drbd_genlmsghdr *dh;
4776	int err;
4777
4778	strscpy(helper_info.helper_name, name, sizeof(helper_info.helper_name));
4779	helper_info.helper_name_len = min(strlen(name), sizeof(helper_info.helper_name));
4780	helper_info.helper_status = status;
4781
4782	skb = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
4783	err = -ENOMEM;
4784	if (!skb)
4785		goto fail;
4786
4787	err = -EMSGSIZE;
4788	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_HELPER);
4789	if (!dh)
4790		goto fail;
4791	dh->minor = device ? device->minor : -1;
4792	dh->ret_code = NO_ERROR;
4793	mutex_lock(&notification_mutex);
4794	if (nla_put_drbd_cfg_context(skb, resource, connection, device) ||
4795	    nla_put_notification_header(skb, type) ||
4796	    drbd_helper_info_to_skb(skb, &helper_info, true))
4797		goto unlock_fail;
4798	genlmsg_end(skb, dh);
4799	err = drbd_genl_multicast_events(skb, GFP_NOWAIT);
4800	skb = NULL;
4801	/* skb has been consumed or freed in netlink_broadcast() */
4802	if (err && err != -ESRCH)
4803		goto unlock_fail;
4804	mutex_unlock(&notification_mutex);
4805	return;
4806
4807unlock_fail:
4808	mutex_unlock(&notification_mutex);
4809fail:
4810	nlmsg_free(skb);
4811	drbd_err(resource, "Error %d while broadcasting event. Event seq:%u\n",
4812		 err, seq);
4813}
4814
4815static int notify_initial_state_done(struct sk_buff *skb, unsigned int seq)
4816{
4817	struct drbd_genlmsghdr *dh;
4818	int err;
4819
4820	err = -EMSGSIZE;
4821	dh = genlmsg_put(skb, 0, seq, &drbd_genl_family, 0, DRBD_INITIAL_STATE_DONE);
4822	if (!dh)
4823		goto nla_put_failure;
4824	dh->minor = -1U;
4825	dh->ret_code = NO_ERROR;
4826	if (nla_put_notification_header(skb, NOTIFY_EXISTS))
4827		goto nla_put_failure;
4828	genlmsg_end(skb, dh);
4829	return 0;
4830
4831nla_put_failure:
4832	nlmsg_free(skb);
4833	pr_err("Error %d sending event. Event seq:%u\n", err, seq);
4834	return err;
4835}
4836
4837static void free_state_changes(struct list_head *list)
4838{
4839	while (!list_empty(list)) {
4840		struct drbd_state_change *state_change =
4841			list_first_entry(list, struct drbd_state_change, list);
4842		list_del(&state_change->list);
4843		forget_state_change(state_change);
4844	}
4845}
4846
4847static unsigned int notifications_for_state_change(struct drbd_state_change *state_change)
4848{
4849	return 1 +
4850	       state_change->n_connections +
4851	       state_change->n_devices +
4852	       state_change->n_devices * state_change->n_connections;
4853}
4854
4855static int get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4856{
4857	struct drbd_state_change *state_change = (struct drbd_state_change *)cb->args[0];
4858	unsigned int seq = cb->args[2];
4859	unsigned int n;
4860	enum drbd_notification_type flags = 0;
4861	int err = 0;
4862
4863	/* There is no need for taking notification_mutex here: it doesn't
4864	   matter if the initial state events mix with later state chage
4865	   events; we can always tell the events apart by the NOTIFY_EXISTS
4866	   flag. */
4867
4868	cb->args[5]--;
4869	if (cb->args[5] == 1) {
4870		err = notify_initial_state_done(skb, seq);
4871		goto out;
4872	}
4873	n = cb->args[4]++;
4874	if (cb->args[4] < cb->args[3])
4875		flags |= NOTIFY_CONTINUES;
4876	if (n < 1) {
4877		err = notify_resource_state_change(skb, seq, state_change->resource,
4878					     NOTIFY_EXISTS | flags);
4879		goto next;
4880	}
4881	n--;
4882	if (n < state_change->n_connections) {
4883		err = notify_connection_state_change(skb, seq, &state_change->connections[n],
4884					       NOTIFY_EXISTS | flags);
4885		goto next;
4886	}
4887	n -= state_change->n_connections;
4888	if (n < state_change->n_devices) {
4889		err = notify_device_state_change(skb, seq, &state_change->devices[n],
4890					   NOTIFY_EXISTS | flags);
4891		goto next;
4892	}
4893	n -= state_change->n_devices;
4894	if (n < state_change->n_devices * state_change->n_connections) {
4895		err = notify_peer_device_state_change(skb, seq, &state_change->peer_devices[n],
4896						NOTIFY_EXISTS | flags);
4897		goto next;
4898	}
4899
4900next:
4901	if (cb->args[4] == cb->args[3]) {
4902		struct drbd_state_change *next_state_change =
4903			list_entry(state_change->list.next,
4904				   struct drbd_state_change, list);
4905		cb->args[0] = (long)next_state_change;
4906		cb->args[3] = notifications_for_state_change(next_state_change);
4907		cb->args[4] = 0;
4908	}
4909out:
4910	if (err)
4911		return err;
4912	else
4913		return skb->len;
4914}
4915
4916int drbd_adm_get_initial_state(struct sk_buff *skb, struct netlink_callback *cb)
4917{
4918	struct drbd_resource *resource;
4919	LIST_HEAD(head);
4920
4921	if (cb->args[5] >= 1) {
4922		if (cb->args[5] > 1)
4923			return get_initial_state(skb, cb);
4924		if (cb->args[0]) {
4925			struct drbd_state_change *state_change =
4926				(struct drbd_state_change *)cb->args[0];
4927
4928			/* connect list to head */
4929			list_add(&head, &state_change->list);
4930			free_state_changes(&head);
4931		}
4932		return 0;
4933	}
4934
4935	cb->args[5] = 2;  /* number of iterations */
4936	mutex_lock(&resources_mutex);
4937	for_each_resource(resource, &drbd_resources) {
4938		struct drbd_state_change *state_change;
4939
4940		state_change = remember_old_state(resource, GFP_KERNEL);
4941		if (!state_change) {
4942			if (!list_empty(&head))
4943				free_state_changes(&head);
4944			mutex_unlock(&resources_mutex);
4945			return -ENOMEM;
4946		}
4947		copy_old_to_new_state_change(state_change);
4948		list_add_tail(&state_change->list, &head);
4949		cb->args[5] += notifications_for_state_change(state_change);
4950	}
4951	mutex_unlock(&resources_mutex);
4952
4953	if (!list_empty(&head)) {
4954		struct drbd_state_change *state_change =
4955			list_entry(head.next, struct drbd_state_change, list);
4956		cb->args[0] = (long)state_change;
4957		cb->args[3] = notifications_for_state_change(state_change);
4958		list_del(&head);  /* detach list from head */
4959	}
4960
4961	cb->args[2] = cb->nlh->nlmsg_seq;
4962	return get_initial_state(skb, cb);
4963}
4964