1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3  drbd_int.h
4
5  This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6
7  Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8  Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9  Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10
11
12*/
13
14#ifndef _DRBD_INT_H
15#define _DRBD_INT_H
16
17#include <crypto/hash.h>
18#include <linux/compiler.h>
19#include <linux/types.h>
20#include <linux/list.h>
21#include <linux/sched/signal.h>
22#include <linux/bitops.h>
23#include <linux/slab.h>
24#include <linux/ratelimit.h>
25#include <linux/tcp.h>
26#include <linux/mutex.h>
27#include <linux/major.h>
28#include <linux/blkdev.h>
29#include <linux/backing-dev.h>
30#include <linux/idr.h>
31#include <linux/dynamic_debug.h>
32#include <net/tcp.h>
33#include <linux/lru_cache.h>
34#include <linux/prefetch.h>
35#include <linux/drbd_genl_api.h>
36#include <linux/drbd.h>
37#include <linux/drbd_config.h>
38#include "drbd_strings.h"
39#include "drbd_state.h"
40#include "drbd_protocol.h"
41#include "drbd_polymorph_printk.h"
42
43/* shared module parameters, defined in drbd_main.c */
44#ifdef CONFIG_DRBD_FAULT_INJECTION
45extern int drbd_enable_faults;
46extern int drbd_fault_rate;
47#endif
48
49extern unsigned int drbd_minor_count;
50extern char drbd_usermode_helper[];
51extern int drbd_proc_details;
52
53
54/* This is used to stop/restart our threads.
55 * Cannot use SIGTERM nor SIGKILL, since these
56 * are sent out by init on runlevel changes
57 * I choose SIGHUP for now.
58 */
59#define DRBD_SIGKILL SIGHUP
60
61#define ID_IN_SYNC      (4711ULL)
62#define ID_OUT_OF_SYNC  (4712ULL)
63#define ID_SYNCER (-1ULL)
64
65#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
66
67struct drbd_device;
68struct drbd_connection;
69struct drbd_peer_device;
70
71/* Defines to control fault insertion */
72enum {
73	DRBD_FAULT_MD_WR = 0,	/* meta data write */
74	DRBD_FAULT_MD_RD = 1,	/*           read  */
75	DRBD_FAULT_RS_WR = 2,	/* resync          */
76	DRBD_FAULT_RS_RD = 3,
77	DRBD_FAULT_DT_WR = 4,	/* data            */
78	DRBD_FAULT_DT_RD = 5,
79	DRBD_FAULT_DT_RA = 6,	/* data read ahead */
80	DRBD_FAULT_BM_ALLOC = 7,	/* bitmap allocation */
81	DRBD_FAULT_AL_EE = 8,	/* alloc ee */
82	DRBD_FAULT_RECEIVE = 9, /* Changes some bytes upon receiving a [rs]data block */
83
84	DRBD_FAULT_MAX,
85};
86
87extern unsigned int
88_drbd_insert_fault(struct drbd_device *device, unsigned int type);
89
90static inline int
91drbd_insert_fault(struct drbd_device *device, unsigned int type) {
92#ifdef CONFIG_DRBD_FAULT_INJECTION
93	return drbd_fault_rate &&
94		(drbd_enable_faults & (1<<type)) &&
95		_drbd_insert_fault(device, type);
96#else
97	return 0;
98#endif
99}
100
101/* integer division, round _UP_ to the next integer */
102#define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
103/* usual integer division */
104#define div_floor(A, B) ((A)/(B))
105
106extern struct ratelimit_state drbd_ratelimit_state;
107extern struct idr drbd_devices; /* RCU, updates: genl_lock() */
108extern struct list_head drbd_resources; /* RCU, updates: genl_lock() */
109
110extern const char *cmdname(enum drbd_packet cmd);
111
112/* for sending/receiving the bitmap,
113 * possibly in some encoding scheme */
114struct bm_xfer_ctx {
115	/* "const"
116	 * stores total bits and long words
117	 * of the bitmap, so we don't need to
118	 * call the accessor functions over and again. */
119	unsigned long bm_bits;
120	unsigned long bm_words;
121	/* during xfer, current position within the bitmap */
122	unsigned long bit_offset;
123	unsigned long word_offset;
124
125	/* statistics; index: (h->command == P_BITMAP) */
126	unsigned packets[2];
127	unsigned bytes[2];
128};
129
130extern void INFO_bm_xfer_stats(struct drbd_peer_device *peer_device,
131			       const char *direction, struct bm_xfer_ctx *c);
132
133static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
134{
135	/* word_offset counts "native long words" (32 or 64 bit),
136	 * aligned at 64 bit.
137	 * Encoded packet may end at an unaligned bit offset.
138	 * In case a fallback clear text packet is transmitted in
139	 * between, we adjust this offset back to the last 64bit
140	 * aligned "native long word", which makes coding and decoding
141	 * the plain text bitmap much more convenient.  */
142#if BITS_PER_LONG == 64
143	c->word_offset = c->bit_offset >> 6;
144#elif BITS_PER_LONG == 32
145	c->word_offset = c->bit_offset >> 5;
146	c->word_offset &= ~(1UL);
147#else
148# error "unsupported BITS_PER_LONG"
149#endif
150}
151
152extern unsigned int drbd_header_size(struct drbd_connection *connection);
153
154/**********************************************************************/
155enum drbd_thread_state {
156	NONE,
157	RUNNING,
158	EXITING,
159	RESTARTING
160};
161
162struct drbd_thread {
163	spinlock_t t_lock;
164	struct task_struct *task;
165	struct completion stop;
166	enum drbd_thread_state t_state;
167	int (*function) (struct drbd_thread *);
168	struct drbd_resource *resource;
169	struct drbd_connection *connection;
170	int reset_cpu_mask;
171	const char *name;
172};
173
174static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
175{
176	/* THINK testing the t_state seems to be uncritical in all cases
177	 * (but thread_{start,stop}), so we can read it *without* the lock.
178	 *	--lge */
179
180	smp_rmb();
181	return thi->t_state;
182}
183
184struct drbd_work {
185	struct list_head list;
186	int (*cb)(struct drbd_work *, int cancel);
187};
188
189struct drbd_device_work {
190	struct drbd_work w;
191	struct drbd_device *device;
192};
193
194#include "drbd_interval.h"
195
196extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
197
198extern void lock_all_resources(void);
199extern void unlock_all_resources(void);
200
201struct drbd_request {
202	struct drbd_work w;
203	struct drbd_device *device;
204
205	/* if local IO is not allowed, will be NULL.
206	 * if local IO _is_ allowed, holds the locally submitted bio clone,
207	 * or, after local IO completion, the ERR_PTR(error).
208	 * see drbd_request_endio(). */
209	struct bio *private_bio;
210
211	struct drbd_interval i;
212
213	/* epoch: used to check on "completion" whether this req was in
214	 * the current epoch, and we therefore have to close it,
215	 * causing a p_barrier packet to be send, starting a new epoch.
216	 *
217	 * This corresponds to "barrier" in struct p_barrier[_ack],
218	 * and to "barrier_nr" in struct drbd_epoch (and various
219	 * comments/function parameters/local variable names).
220	 */
221	unsigned int epoch;
222
223	struct list_head tl_requests; /* ring list in the transfer log */
224	struct bio *master_bio;       /* master bio pointer */
225
226	/* see struct drbd_device */
227	struct list_head req_pending_master_completion;
228	struct list_head req_pending_local;
229
230	/* for generic IO accounting */
231	unsigned long start_jif;
232
233	/* for DRBD internal statistics */
234
235	/* Minimal set of time stamps to determine if we wait for activity log
236	 * transactions, local disk or peer.  32 bit "jiffies" are good enough,
237	 * we don't expect a DRBD request to be stalled for several month.
238	 */
239
240	/* before actual request processing */
241	unsigned long in_actlog_jif;
242
243	/* local disk */
244	unsigned long pre_submit_jif;
245
246	/* per connection */
247	unsigned long pre_send_jif;
248	unsigned long acked_jif;
249	unsigned long net_done_jif;
250
251	/* Possibly even more detail to track each phase:
252	 *  master_completion_jif
253	 *      how long did it take to complete the master bio
254	 *      (application visible latency)
255	 *  allocated_jif
256	 *      how long the master bio was blocked until we finally allocated
257	 *      a tracking struct
258	 *  in_actlog_jif
259	 *      how long did we wait for activity log transactions
260	 *
261	 *  net_queued_jif
262	 *      when did we finally queue it for sending
263	 *  pre_send_jif
264	 *      when did we start sending it
265	 *  post_send_jif
266	 *      how long did we block in the network stack trying to send it
267	 *  acked_jif
268	 *      when did we receive (or fake, in protocol A) a remote ACK
269	 *  net_done_jif
270	 *      when did we receive final acknowledgement (P_BARRIER_ACK),
271	 *      or decide, e.g. on connection loss, that we do no longer expect
272	 *      anything from this peer for this request.
273	 *
274	 *  pre_submit_jif
275	 *  post_sub_jif
276	 *      when did we start submiting to the lower level device,
277	 *      and how long did we block in that submit function
278	 *  local_completion_jif
279	 *      how long did it take the lower level device to complete this request
280	 */
281
282
283	/* once it hits 0, we may complete the master_bio */
284	atomic_t completion_ref;
285	/* once it hits 0, we may destroy this drbd_request object */
286	struct kref kref;
287
288	unsigned rq_state; /* see comments above _req_mod() */
289};
290
291struct drbd_epoch {
292	struct drbd_connection *connection;
293	struct list_head list;
294	unsigned int barrier_nr;
295	atomic_t epoch_size; /* increased on every request added. */
296	atomic_t active;     /* increased on every req. added, and dec on every finished. */
297	unsigned long flags;
298};
299
300/* Prototype declaration of function defined in drbd_receiver.c */
301int drbdd_init(struct drbd_thread *);
302int drbd_asender(struct drbd_thread *);
303
304/* drbd_epoch flag bits */
305enum {
306	DE_HAVE_BARRIER_NUMBER,
307};
308
309enum epoch_event {
310	EV_PUT,
311	EV_GOT_BARRIER_NR,
312	EV_BECAME_LAST,
313	EV_CLEANUP = 32, /* used as flag */
314};
315
316struct digest_info {
317	int digest_size;
318	void *digest;
319};
320
321struct drbd_peer_request {
322	struct drbd_work w;
323	struct drbd_peer_device *peer_device;
324	struct drbd_epoch *epoch; /* for writes */
325	struct page *pages;
326	blk_opf_t opf;
327	atomic_t pending_bios;
328	struct drbd_interval i;
329	/* see comments on ee flag bits below */
330	unsigned long flags;
331	unsigned long submit_jif;
332	union {
333		u64 block_id;
334		struct digest_info *digest;
335	};
336};
337
338/* Equivalent to bio_op and req_op. */
339#define peer_req_op(peer_req) \
340	((peer_req)->opf & REQ_OP_MASK)
341
342/* ee flag bits.
343 * While corresponding bios are in flight, the only modification will be
344 * set_bit WAS_ERROR, which has to be atomic.
345 * If no bios are in flight yet, or all have been completed,
346 * non-atomic modification to ee->flags is ok.
347 */
348enum {
349	__EE_CALL_AL_COMPLETE_IO,
350	__EE_MAY_SET_IN_SYNC,
351
352	/* is this a TRIM aka REQ_OP_DISCARD? */
353	__EE_TRIM,
354	/* explicit zero-out requested, or
355	 * our lower level cannot handle trim,
356	 * and we want to fall back to zeroout instead */
357	__EE_ZEROOUT,
358
359	/* In case a barrier failed,
360	 * we need to resubmit without the barrier flag. */
361	__EE_RESUBMITTED,
362
363	/* we may have several bios per peer request.
364	 * if any of those fail, we set this flag atomically
365	 * from the endio callback */
366	__EE_WAS_ERROR,
367
368	/* This ee has a pointer to a digest instead of a block id */
369	__EE_HAS_DIGEST,
370
371	/* Conflicting local requests need to be restarted after this request */
372	__EE_RESTART_REQUESTS,
373
374	/* The peer wants a write ACK for this (wire proto C) */
375	__EE_SEND_WRITE_ACK,
376
377	/* Is set when net_conf had two_primaries set while creating this peer_req */
378	__EE_IN_INTERVAL_TREE,
379
380	/* for debugfs: */
381	/* has this been submitted, or does it still wait for something else? */
382	__EE_SUBMITTED,
383
384	/* this is/was a write request */
385	__EE_WRITE,
386
387	/* this is/was a write same request */
388	__EE_WRITE_SAME,
389
390	/* this originates from application on peer
391	 * (not some resync or verify or other DRBD internal request) */
392	__EE_APPLICATION,
393
394	/* If it contains only 0 bytes, send back P_RS_DEALLOCATED */
395	__EE_RS_THIN_REQ,
396};
397#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
398#define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
399#define EE_TRIM                (1<<__EE_TRIM)
400#define EE_ZEROOUT             (1<<__EE_ZEROOUT)
401#define EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
402#define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
403#define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
404#define EE_RESTART_REQUESTS	(1<<__EE_RESTART_REQUESTS)
405#define EE_SEND_WRITE_ACK	(1<<__EE_SEND_WRITE_ACK)
406#define EE_IN_INTERVAL_TREE	(1<<__EE_IN_INTERVAL_TREE)
407#define EE_SUBMITTED		(1<<__EE_SUBMITTED)
408#define EE_WRITE		(1<<__EE_WRITE)
409#define EE_WRITE_SAME		(1<<__EE_WRITE_SAME)
410#define EE_APPLICATION		(1<<__EE_APPLICATION)
411#define EE_RS_THIN_REQ		(1<<__EE_RS_THIN_REQ)
412
413/* flag bits per device */
414enum {
415	UNPLUG_REMOTE,		/* sending a "UnplugRemote" could help */
416	MD_DIRTY,		/* current uuids and flags not yet on disk */
417	USE_DEGR_WFC_T,		/* degr-wfc-timeout instead of wfc-timeout. */
418	CL_ST_CHG_SUCCESS,
419	CL_ST_CHG_FAIL,
420	CRASHED_PRIMARY,	/* This node was a crashed primary.
421				 * Gets cleared when the state.conn
422				 * goes into C_CONNECTED state. */
423	CONSIDER_RESYNC,
424
425	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
426
427	BITMAP_IO,		/* suspend application io;
428				   once no more io in flight, start bitmap io */
429	BITMAP_IO_QUEUED,       /* Started bitmap IO */
430	WAS_IO_ERROR,		/* Local disk failed, returned IO error */
431	WAS_READ_ERROR,		/* Local disk READ failed (set additionally to the above) */
432	FORCE_DETACH,		/* Force-detach from local disk, aborting any pending local IO */
433	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
434	RESIZE_PENDING,		/* Size change detected locally, waiting for the response from
435				 * the peer, if it changed there as well. */
436	NEW_CUR_UUID,		/* Create new current UUID when thawing IO */
437	AL_SUSPENDED,		/* Activity logging is currently suspended. */
438	AHEAD_TO_SYNC_SOURCE,   /* Ahead -> SyncSource queued */
439	B_RS_H_DONE,		/* Before resync handler done (already executed) */
440	DISCARD_MY_DATA,	/* discard_my_data flag per volume */
441	READ_BALANCE_RR,
442
443	FLUSH_PENDING,		/* if set, device->flush_jif is when we submitted that flush
444				 * from drbd_flush_after_epoch() */
445
446	/* cleared only after backing device related structures have been destroyed. */
447	GOING_DISKLESS,		/* Disk is being detached, because of io-error, or admin request. */
448
449	/* to be used in drbd_device_post_work() */
450	GO_DISKLESS,		/* tell worker to schedule cleanup before detach */
451	DESTROY_DISK,		/* tell worker to close backing devices and destroy related structures. */
452	MD_SYNC,		/* tell worker to call drbd_md_sync() */
453	RS_START,		/* tell worker to start resync/OV */
454	RS_PROGRESS,		/* tell worker that resync made significant progress */
455	RS_DONE,		/* tell worker that resync is done */
456};
457
458struct drbd_bitmap; /* opaque for drbd_device */
459
460/* definition of bits in bm_flags to be used in drbd_bm_lock
461 * and drbd_bitmap_io and friends. */
462enum bm_flag {
463	/* currently locked for bulk operation */
464	BM_LOCKED_MASK = 0xf,
465
466	/* in detail, that is: */
467	BM_DONT_CLEAR = 0x1,
468	BM_DONT_SET   = 0x2,
469	BM_DONT_TEST  = 0x4,
470
471	/* so we can mark it locked for bulk operation,
472	 * and still allow all non-bulk operations */
473	BM_IS_LOCKED  = 0x8,
474
475	/* (test bit, count bit) allowed (common case) */
476	BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
477
478	/* testing bits, as well as setting new bits allowed, but clearing bits
479	 * would be unexpected.  Used during bitmap receive.  Setting new bits
480	 * requires sending of "out-of-sync" information, though. */
481	BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
482
483	/* for drbd_bm_write_copy_pages, everything is allowed,
484	 * only concurrent bulk operations are locked out. */
485	BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
486};
487
488struct drbd_work_queue {
489	struct list_head q;
490	spinlock_t q_lock;  /* to protect the list. */
491	wait_queue_head_t q_wait;
492};
493
494struct drbd_socket {
495	struct mutex mutex;
496	struct socket    *socket;
497	/* this way we get our
498	 * send/receive buffers off the stack */
499	void *sbuf;
500	void *rbuf;
501};
502
503struct drbd_md {
504	u64 md_offset;		/* sector offset to 'super' block */
505
506	u64 la_size_sect;	/* last agreed size, unit sectors */
507	spinlock_t uuid_lock;
508	u64 uuid[UI_SIZE];
509	u64 device_uuid;
510	u32 flags;
511	u32 md_size_sect;
512
513	s32 al_offset;	/* signed relative sector offset to activity log */
514	s32 bm_offset;	/* signed relative sector offset to bitmap */
515
516	/* cached value of bdev->disk_conf->meta_dev_idx (see below) */
517	s32 meta_dev_idx;
518
519	/* see al_tr_number_to_on_disk_sector() */
520	u32 al_stripes;
521	u32 al_stripe_size_4k;
522	u32 al_size_4k; /* cached product of the above */
523};
524
525struct drbd_backing_dev {
526	struct block_device *backing_bdev;
527	struct block_device *md_bdev;
528	struct drbd_md md;
529	struct disk_conf *disk_conf; /* RCU, for updates: resource->conf_update */
530	sector_t known_size; /* last known size of that backing device */
531};
532
533struct drbd_md_io {
534	struct page *page;
535	unsigned long start_jif;	/* last call to drbd_md_get_buffer */
536	unsigned long submit_jif;	/* last _drbd_md_sync_page_io() submit */
537	const char *current_use;
538	atomic_t in_use;
539	unsigned int done;
540	int error;
541};
542
543struct bm_io_work {
544	struct drbd_work w;
545	struct drbd_peer_device *peer_device;
546	char *why;
547	enum bm_flag flags;
548	int (*io_fn)(struct drbd_device *device, struct drbd_peer_device *peer_device);
549	void (*done)(struct drbd_device *device, int rv);
550};
551
552struct fifo_buffer {
553	unsigned int head_index;
554	unsigned int size;
555	int total; /* sum of all values */
556	int values[];
557};
558extern struct fifo_buffer *fifo_alloc(unsigned int fifo_size);
559
560/* flag bits per connection */
561enum {
562	NET_CONGESTED,		/* The data socket is congested */
563	RESOLVE_CONFLICTS,	/* Set on one node, cleared on the peer! */
564	SEND_PING,
565	GOT_PING_ACK,		/* set when we receive a ping_ack packet, ping_wait gets woken */
566	CONN_WD_ST_CHG_REQ,	/* A cluster wide state change on the connection is active */
567	CONN_WD_ST_CHG_OKAY,
568	CONN_WD_ST_CHG_FAIL,
569	CONN_DRY_RUN,		/* Expect disconnect after resync handshake. */
570	CREATE_BARRIER,		/* next P_DATA is preceded by a P_BARRIER */
571	STATE_SENT,		/* Do not change state/UUIDs while this is set */
572	CALLBACK_PENDING,	/* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
573				 * pending, from drbd worker context.
574				 */
575	DISCONNECT_SENT,
576
577	DEVICE_WORK_PENDING,	/* tell worker that some device has pending work */
578};
579
580enum which_state { NOW, OLD = NOW, NEW };
581
582struct drbd_resource {
583	char *name;
584#ifdef CONFIG_DEBUG_FS
585	struct dentry *debugfs_res;
586	struct dentry *debugfs_res_volumes;
587	struct dentry *debugfs_res_connections;
588	struct dentry *debugfs_res_in_flight_summary;
589#endif
590	struct kref kref;
591	struct idr devices;		/* volume number to device mapping */
592	struct list_head connections;
593	struct list_head resources;
594	struct res_opts res_opts;
595	struct mutex conf_update;	/* mutex for ready-copy-update of net_conf and disk_conf */
596	struct mutex adm_mutex;		/* mutex to serialize administrative requests */
597	spinlock_t req_lock;
598
599	unsigned susp:1;		/* IO suspended by user */
600	unsigned susp_nod:1;		/* IO suspended because no data */
601	unsigned susp_fen:1;		/* IO suspended because fence peer handler runs */
602
603	enum write_ordering_e write_ordering;
604
605	cpumask_var_t cpu_mask;
606};
607
608struct drbd_thread_timing_details
609{
610	unsigned long start_jif;
611	void *cb_addr;
612	const char *caller_fn;
613	unsigned int line;
614	unsigned int cb_nr;
615};
616
617struct drbd_connection {
618	struct list_head connections;
619	struct drbd_resource *resource;
620#ifdef CONFIG_DEBUG_FS
621	struct dentry *debugfs_conn;
622	struct dentry *debugfs_conn_callback_history;
623	struct dentry *debugfs_conn_oldest_requests;
624#endif
625	struct kref kref;
626	struct idr peer_devices;	/* volume number to peer device mapping */
627	enum drbd_conns cstate;		/* Only C_STANDALONE to C_WF_REPORT_PARAMS */
628	struct mutex cstate_mutex;	/* Protects graceful disconnects */
629	unsigned int connect_cnt;	/* Inc each time a connection is established */
630
631	unsigned long flags;
632	struct net_conf *net_conf;	/* content protected by rcu */
633	wait_queue_head_t ping_wait;	/* Woken upon reception of a ping, and a state change */
634
635	struct sockaddr_storage my_addr;
636	int my_addr_len;
637	struct sockaddr_storage peer_addr;
638	int peer_addr_len;
639
640	struct drbd_socket data;	/* data/barrier/cstate/parameter packets */
641	struct drbd_socket meta;	/* ping/ack (metadata) packets */
642	int agreed_pro_version;		/* actually used protocol version */
643	u32 agreed_features;
644	unsigned long last_received;	/* in jiffies, either socket */
645	unsigned int ko_count;
646
647	struct list_head transfer_log;	/* all requests not yet fully processed */
648
649	struct crypto_shash *cram_hmac_tfm;
650	struct crypto_shash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
651	struct crypto_shash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
652	struct crypto_shash *csums_tfm;
653	struct crypto_shash *verify_tfm;
654	void *int_dig_in;
655	void *int_dig_vv;
656
657	/* receiver side */
658	struct drbd_epoch *current_epoch;
659	spinlock_t epoch_lock;
660	unsigned int epochs;
661	atomic_t current_tle_nr;	/* transfer log epoch number */
662	unsigned current_tle_writes;	/* writes seen within this tl epoch */
663
664	unsigned long last_reconnect_jif;
665	/* empty member on older kernels without blk_start_plug() */
666	struct blk_plug receiver_plug;
667	struct drbd_thread receiver;
668	struct drbd_thread worker;
669	struct drbd_thread ack_receiver;
670	struct workqueue_struct *ack_sender;
671
672	/* cached pointers,
673	 * so we can look up the oldest pending requests more quickly.
674	 * protected by resource->req_lock */
675	struct drbd_request *req_next; /* DRBD 9: todo.req_next */
676	struct drbd_request *req_ack_pending;
677	struct drbd_request *req_not_net_done;
678
679	/* sender side */
680	struct drbd_work_queue sender_work;
681
682#define DRBD_THREAD_DETAILS_HIST	16
683	unsigned int w_cb_nr; /* keeps counting up */
684	unsigned int r_cb_nr; /* keeps counting up */
685	struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
686	struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
687
688	struct {
689		unsigned long last_sent_barrier_jif;
690
691		/* whether this sender thread
692		 * has processed a single write yet. */
693		bool seen_any_write_yet;
694
695		/* Which barrier number to send with the next P_BARRIER */
696		int current_epoch_nr;
697
698		/* how many write requests have been sent
699		 * with req->epoch == current_epoch_nr.
700		 * If none, no P_BARRIER will be sent. */
701		unsigned current_epoch_writes;
702	} send;
703};
704
705static inline bool has_net_conf(struct drbd_connection *connection)
706{
707	bool has_net_conf;
708
709	rcu_read_lock();
710	has_net_conf = rcu_dereference(connection->net_conf);
711	rcu_read_unlock();
712
713	return has_net_conf;
714}
715
716void __update_timing_details(
717		struct drbd_thread_timing_details *tdp,
718		unsigned int *cb_nr,
719		void *cb,
720		const char *fn, const unsigned int line);
721
722#define update_worker_timing_details(c, cb) \
723	__update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
724#define update_receiver_timing_details(c, cb) \
725	__update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
726
727struct submit_worker {
728	struct workqueue_struct *wq;
729	struct work_struct worker;
730
731	/* protected by ..->resource->req_lock */
732	struct list_head writes;
733};
734
735struct drbd_peer_device {
736	struct list_head peer_devices;
737	struct drbd_device *device;
738	struct drbd_connection *connection;
739	struct work_struct send_acks_work;
740#ifdef CONFIG_DEBUG_FS
741	struct dentry *debugfs_peer_dev;
742#endif
743};
744
745struct drbd_device {
746	struct drbd_resource *resource;
747	struct list_head peer_devices;
748	struct list_head pending_bitmap_io;
749
750	unsigned long flush_jif;
751#ifdef CONFIG_DEBUG_FS
752	struct dentry *debugfs_minor;
753	struct dentry *debugfs_vol;
754	struct dentry *debugfs_vol_oldest_requests;
755	struct dentry *debugfs_vol_act_log_extents;
756	struct dentry *debugfs_vol_resync_extents;
757	struct dentry *debugfs_vol_data_gen_id;
758	struct dentry *debugfs_vol_ed_gen_id;
759#endif
760
761	unsigned int vnr;	/* volume number within the connection */
762	unsigned int minor;	/* device minor number */
763
764	struct kref kref;
765
766	/* things that are stored as / read from meta data on disk */
767	unsigned long flags;
768
769	/* configured by drbdsetup */
770	struct drbd_backing_dev *ldev;
771
772	sector_t p_size;     /* partner's disk size */
773	struct request_queue *rq_queue;
774	struct gendisk	    *vdisk;
775
776	unsigned long last_reattach_jif;
777	struct drbd_work resync_work;
778	struct drbd_work unplug_work;
779	struct timer_list resync_timer;
780	struct timer_list md_sync_timer;
781	struct timer_list start_resync_timer;
782	struct timer_list request_timer;
783
784	/* Used after attach while negotiating new disk state. */
785	union drbd_state new_state_tmp;
786
787	union drbd_dev_state state;
788	wait_queue_head_t misc_wait;
789	wait_queue_head_t state_wait;  /* upon each state change. */
790	unsigned int send_cnt;
791	unsigned int recv_cnt;
792	unsigned int read_cnt;
793	unsigned int writ_cnt;
794	unsigned int al_writ_cnt;
795	unsigned int bm_writ_cnt;
796	atomic_t ap_bio_cnt;	 /* Requests we need to complete */
797	atomic_t ap_actlog_cnt;  /* Requests waiting for activity log */
798	atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */
799	atomic_t rs_pending_cnt; /* RS request/data packets on the wire */
800	atomic_t unacked_cnt;	 /* Need to send replies for */
801	atomic_t local_cnt;	 /* Waiting for local completion */
802	atomic_t suspend_cnt;
803
804	/* Interval tree of pending local requests */
805	struct rb_root read_requests;
806	struct rb_root write_requests;
807
808	/* for statistics and timeouts */
809	/* [0] read, [1] write */
810	struct list_head pending_master_completion[2];
811	struct list_head pending_completion[2];
812
813	/* use checksums for *this* resync */
814	bool use_csums;
815	/* blocks to resync in this run [unit BM_BLOCK_SIZE] */
816	unsigned long rs_total;
817	/* number of resync blocks that failed in this run */
818	unsigned long rs_failed;
819	/* Syncer's start time [unit jiffies] */
820	unsigned long rs_start;
821	/* cumulated time in PausedSyncX state [unit jiffies] */
822	unsigned long rs_paused;
823	/* skipped because csum was equal [unit BM_BLOCK_SIZE] */
824	unsigned long rs_same_csum;
825#define DRBD_SYNC_MARKS 8
826#define DRBD_SYNC_MARK_STEP (3*HZ)
827	/* block not up-to-date at mark [unit BM_BLOCK_SIZE] */
828	unsigned long rs_mark_left[DRBD_SYNC_MARKS];
829	/* marks's time [unit jiffies] */
830	unsigned long rs_mark_time[DRBD_SYNC_MARKS];
831	/* current index into rs_mark_{left,time} */
832	int rs_last_mark;
833	unsigned long rs_last_bcast; /* [unit jiffies] */
834
835	/* where does the admin want us to start? (sector) */
836	sector_t ov_start_sector;
837	sector_t ov_stop_sector;
838	/* where are we now? (sector) */
839	sector_t ov_position;
840	/* Start sector of out of sync range (to merge printk reporting). */
841	sector_t ov_last_oos_start;
842	/* size of out-of-sync range in sectors. */
843	sector_t ov_last_oos_size;
844	unsigned long ov_left; /* in bits */
845
846	struct drbd_bitmap *bitmap;
847	unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */
848
849	/* Used to track operations of resync... */
850	struct lru_cache *resync;
851	/* Number of locked elements in resync LRU */
852	unsigned int resync_locked;
853	/* resync extent number waiting for application requests */
854	unsigned int resync_wenr;
855
856	int open_cnt;
857	u64 *p_uuid;
858
859	struct list_head active_ee; /* IO in progress (P_DATA gets written to disk) */
860	struct list_head sync_ee;   /* IO in progress (P_RS_DATA_REPLY gets written to disk) */
861	struct list_head done_ee;   /* need to send P_WRITE_ACK */
862	struct list_head read_ee;   /* [RS]P_DATA_REQUEST being read */
863	struct list_head net_ee;    /* zero-copy network send in progress */
864
865	int next_barrier_nr;
866	struct list_head resync_reads;
867	atomic_t pp_in_use;		/* allocated from page pool */
868	atomic_t pp_in_use_by_net;	/* sendpage()d, still referenced by tcp */
869	wait_queue_head_t ee_wait;
870	struct drbd_md_io md_io;
871	spinlock_t al_lock;
872	wait_queue_head_t al_wait;
873	struct lru_cache *act_log;	/* activity log */
874	unsigned int al_tr_number;
875	int al_tr_cycle;
876	wait_queue_head_t seq_wait;
877	atomic_t packet_seq;
878	unsigned int peer_seq;
879	spinlock_t peer_seq_lock;
880	unsigned long comm_bm_set; /* communicated number of set bits. */
881	struct bm_io_work bm_io_work;
882	u64 ed_uuid; /* UUID of the exposed data */
883	struct mutex own_state_mutex;
884	struct mutex *state_mutex; /* either own_state_mutex or first_peer_device(device)->connection->cstate_mutex */
885	char congestion_reason;  /* Why we where congested... */
886	atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
887	atomic_t rs_sect_ev; /* for submitted resync data rate, both */
888	int rs_last_sect_ev; /* counter to compare with */
889	int rs_last_events;  /* counter of read or write "events" (unit sectors)
890			      * on the lower level device when we last looked. */
891	int c_sync_rate; /* current resync rate after syncer throttle magic */
892	struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
893	int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
894	atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
895	unsigned int peer_max_bio_size;
896	unsigned int local_max_bio_size;
897
898	/* any requests that would block in drbd_make_request()
899	 * are deferred to this single-threaded work queue */
900	struct submit_worker submit;
901};
902
903struct drbd_bm_aio_ctx {
904	struct drbd_device *device;
905	struct list_head list; /* on device->pending_bitmap_io */;
906	unsigned long start_jif;
907	atomic_t in_flight;
908	unsigned int done;
909	unsigned flags;
910#define BM_AIO_COPY_PAGES	1
911#define BM_AIO_WRITE_HINTED	2
912#define BM_AIO_WRITE_ALL_PAGES	4
913#define BM_AIO_READ		8
914	int error;
915	struct kref kref;
916};
917
918struct drbd_config_context {
919	/* assigned from drbd_genlmsghdr */
920	unsigned int minor;
921	/* assigned from request attributes, if present */
922	unsigned int volume;
923#define VOLUME_UNSPECIFIED		(-1U)
924	/* pointer into the request skb,
925	 * limited lifetime! */
926	char *resource_name;
927	struct nlattr *my_addr;
928	struct nlattr *peer_addr;
929
930	/* reply buffer */
931	struct sk_buff *reply_skb;
932	/* pointer into reply buffer */
933	struct drbd_genlmsghdr *reply_dh;
934	/* resolved from attributes, if possible */
935	struct drbd_device *device;
936	struct drbd_resource *resource;
937	struct drbd_connection *connection;
938};
939
940static inline struct drbd_device *minor_to_device(unsigned int minor)
941{
942	return (struct drbd_device *)idr_find(&drbd_devices, minor);
943}
944
945static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
946{
947	return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
948}
949
950static inline struct drbd_peer_device *
951conn_peer_device(struct drbd_connection *connection, int volume_number)
952{
953	return idr_find(&connection->peer_devices, volume_number);
954}
955
956#define for_each_resource(resource, _resources) \
957	list_for_each_entry(resource, _resources, resources)
958
959#define for_each_resource_rcu(resource, _resources) \
960	list_for_each_entry_rcu(resource, _resources, resources)
961
962#define for_each_resource_safe(resource, tmp, _resources) \
963	list_for_each_entry_safe(resource, tmp, _resources, resources)
964
965#define for_each_connection(connection, resource) \
966	list_for_each_entry(connection, &resource->connections, connections)
967
968#define for_each_connection_rcu(connection, resource) \
969	list_for_each_entry_rcu(connection, &resource->connections, connections)
970
971#define for_each_connection_safe(connection, tmp, resource) \
972	list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
973
974#define for_each_peer_device(peer_device, device) \
975	list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
976
977#define for_each_peer_device_rcu(peer_device, device) \
978	list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
979
980#define for_each_peer_device_safe(peer_device, tmp, device) \
981	list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
982
983static inline unsigned int device_to_minor(struct drbd_device *device)
984{
985	return device->minor;
986}
987
988/*
989 * function declarations
990 *************************/
991
992/* drbd_main.c */
993
994enum dds_flags {
995	DDSF_FORCED    = 1,
996	DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */
997};
998
999extern void drbd_init_set_defaults(struct drbd_device *device);
1000extern int  drbd_thread_start(struct drbd_thread *thi);
1001extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1002#ifdef CONFIG_SMP
1003extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1004#else
1005#define drbd_thread_current_set_cpu(A) ({})
1006#endif
1007extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1008		       unsigned int set_size);
1009extern void tl_clear(struct drbd_connection *);
1010extern void drbd_free_sock(struct drbd_connection *connection);
1011extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1012		     void *buf, size_t size, unsigned msg_flags);
1013extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1014			 unsigned);
1015
1016extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1017extern int drbd_send_protocol(struct drbd_connection *connection);
1018extern int drbd_send_uuids(struct drbd_peer_device *);
1019extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1020extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1021extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1022extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1023extern int drbd_send_current_state(struct drbd_peer_device *);
1024extern int drbd_send_sync_param(struct drbd_peer_device *);
1025extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1026			    u32 set_size);
1027extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1028			 struct drbd_peer_request *);
1029extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1030			     struct p_block_req *rp);
1031extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1032			     struct p_data *dp, int data_size);
1033extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1034			    sector_t sector, int blksize, u64 block_id);
1035extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1036extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1037			   struct drbd_peer_request *);
1038extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1039extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1040			      sector_t sector, int size, u64 block_id);
1041extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1042				   int size, void *digest, int digest_size,
1043				   enum drbd_packet cmd);
1044extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1045
1046extern int drbd_send_bitmap(struct drbd_device *device, struct drbd_peer_device *peer_device);
1047extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1048extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1049extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1050extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1051extern void drbd_device_cleanup(struct drbd_device *device);
1052extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1053extern void drbd_queue_unplug(struct drbd_device *device);
1054
1055extern void conn_md_sync(struct drbd_connection *connection);
1056extern void drbd_md_write(struct drbd_device *device, void *buffer);
1057extern void drbd_md_sync(struct drbd_device *device);
1058extern int  drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1059extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1060extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1061extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1062extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1063extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1064extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1065extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1066extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1067extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1068extern void drbd_md_mark_dirty(struct drbd_device *device);
1069extern void drbd_queue_bitmap_io(struct drbd_device *device,
1070				 int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
1071				 void (*done)(struct drbd_device *, int),
1072				 char *why, enum bm_flag flags,
1073				 struct drbd_peer_device *peer_device);
1074extern int drbd_bitmap_io(struct drbd_device *device,
1075		int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
1076		char *why, enum bm_flag flags,
1077		struct drbd_peer_device *peer_device);
1078extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1079		int (*io_fn)(struct drbd_device *, struct drbd_peer_device *),
1080		char *why, enum bm_flag flags,
1081		struct drbd_peer_device *peer_device);
1082extern int drbd_bmio_set_n_write(struct drbd_device *device,
1083		struct drbd_peer_device *peer_device) __must_hold(local);
1084extern int drbd_bmio_clear_n_write(struct drbd_device *device,
1085		struct drbd_peer_device *peer_device) __must_hold(local);
1086
1087/* Meta data layout
1088 *
1089 * We currently have two possible layouts.
1090 * Offsets in (512 byte) sectors.
1091 * external:
1092 *   |----------- md_size_sect ------------------|
1093 *   [ 4k superblock ][ activity log ][  Bitmap  ]
1094 *   | al_offset == 8 |
1095 *   | bm_offset = al_offset + X      |
1096 *  ==> bitmap sectors = md_size_sect - bm_offset
1097 *
1098 *  Variants:
1099 *     old, indexed fixed size meta data:
1100 *
1101 * internal:
1102 *            |----------- md_size_sect ------------------|
1103 * [data.....][  Bitmap  ][ activity log ][ 4k superblock ][padding*]
1104 *                        | al_offset < 0 |
1105 *            | bm_offset = al_offset - Y |
1106 *  ==> bitmap sectors = Y = al_offset - bm_offset
1107 *
1108 *  [padding*] are zero or up to 7 unused 512 Byte sectors to the
1109 *  end of the device, so that the [4k superblock] will be 4k aligned.
1110 *
1111 *  The activity log consists of 4k transaction blocks,
1112 *  which are written in a ring-buffer, or striped ring-buffer like fashion,
1113 *  which are writtensize used to be fixed 32kB,
1114 *  but is about to become configurable.
1115 */
1116
1117/* Our old fixed size meta data layout
1118 * allows up to about 3.8TB, so if you want more,
1119 * you need to use the "flexible" meta data format. */
1120#define MD_128MB_SECT (128LLU << 11)  /* 128 MB, unit sectors */
1121#define MD_4kB_SECT	 8
1122#define MD_32kB_SECT	64
1123
1124/* One activity log extent represents 4M of storage */
1125#define AL_EXTENT_SHIFT 22
1126#define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1127
1128/* We could make these currently hardcoded constants configurable
1129 * variables at create-md time (or even re-configurable at runtime?).
1130 * Which will require some more changes to the DRBD "super block"
1131 * and attach code.
1132 *
1133 * updates per transaction:
1134 *   This many changes to the active set can be logged with one transaction.
1135 *   This number is arbitrary.
1136 * context per transaction:
1137 *   This many context extent numbers are logged with each transaction.
1138 *   This number is resulting from the transaction block size (4k), the layout
1139 *   of the transaction header, and the number of updates per transaction.
1140 *   See drbd_actlog.c:struct al_transaction_on_disk
1141 * */
1142#define AL_UPDATES_PER_TRANSACTION	 64	// arbitrary
1143#define AL_CONTEXT_PER_TRANSACTION	919	// (4096 - 36 - 6*64)/4
1144
1145#if BITS_PER_LONG == 32
1146#define LN2_BPL 5
1147#define cpu_to_lel(A) cpu_to_le32(A)
1148#define lel_to_cpu(A) le32_to_cpu(A)
1149#elif BITS_PER_LONG == 64
1150#define LN2_BPL 6
1151#define cpu_to_lel(A) cpu_to_le64(A)
1152#define lel_to_cpu(A) le64_to_cpu(A)
1153#else
1154#error "LN2 of BITS_PER_LONG unknown!"
1155#endif
1156
1157/* resync bitmap */
1158/* 16MB sized 'bitmap extent' to track syncer usage */
1159struct bm_extent {
1160	int rs_left; /* number of bits set (out of sync) in this extent. */
1161	int rs_failed; /* number of failed resync requests in this extent. */
1162	unsigned long flags;
1163	struct lc_element lce;
1164};
1165
1166#define BME_NO_WRITES  0  /* bm_extent.flags: no more requests on this one! */
1167#define BME_LOCKED     1  /* bm_extent.flags: syncer active on this one. */
1168#define BME_PRIORITY   2  /* finish resync IO on this extent ASAP! App IO waiting! */
1169
1170/* drbd_bitmap.c */
1171/*
1172 * We need to store one bit for a block.
1173 * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap.
1174 * Bit 0 ==> local node thinks this block is binary identical on both nodes
1175 * Bit 1 ==> local node thinks this block needs to be synced.
1176 */
1177
1178#define SLEEP_TIME (HZ/10)
1179
1180/* We do bitmap IO in units of 4k blocks.
1181 * We also still have a hardcoded 4k per bit relation. */
1182#define BM_BLOCK_SHIFT	12			 /* 4k per bit */
1183#define BM_BLOCK_SIZE	 (1<<BM_BLOCK_SHIFT)
1184/* mostly arbitrarily set the represented size of one bitmap extent,
1185 * aka resync extent, to 16 MiB (which is also 512 Byte worth of bitmap
1186 * at 4k per bit resolution) */
1187#define BM_EXT_SHIFT	 24	/* 16 MiB per resync extent */
1188#define BM_EXT_SIZE	 (1<<BM_EXT_SHIFT)
1189
1190#if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1191#error "HAVE YOU FIXED drbdmeta AS WELL??"
1192#endif
1193
1194/* thus many _storage_ sectors are described by one bit */
1195#define BM_SECT_TO_BIT(x)   ((x)>>(BM_BLOCK_SHIFT-9))
1196#define BM_BIT_TO_SECT(x)   ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1197#define BM_SECT_PER_BIT     BM_BIT_TO_SECT(1)
1198
1199/* bit to represented kilo byte conversion */
1200#define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1201
1202/* in which _bitmap_ extent (resp. sector) the bit for a certain
1203 * _storage_ sector is located in */
1204#define BM_SECT_TO_EXT(x)   ((x)>>(BM_EXT_SHIFT-9))
1205#define BM_BIT_TO_EXT(x)    ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1206
1207/* first storage sector a bitmap extent corresponds to */
1208#define BM_EXT_TO_SECT(x)   ((sector_t)(x) << (BM_EXT_SHIFT-9))
1209/* how much _storage_ sectors we have per bitmap extent */
1210#define BM_SECT_PER_EXT     BM_EXT_TO_SECT(1)
1211/* how many bits are covered by one bitmap extent (resync extent) */
1212#define BM_BITS_PER_EXT     (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1213
1214#define BM_BLOCKS_PER_BM_EXT_MASK  (BM_BITS_PER_EXT - 1)
1215
1216
1217/* in one sector of the bitmap, we have this many activity_log extents. */
1218#define AL_EXT_PER_BM_SECT  (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1219
1220/* the extent in "PER_EXTENT" below is an activity log extent
1221 * we need that many (long words/bytes) to store the bitmap
1222 *		     of one AL_EXTENT_SIZE chunk of storage.
1223 * we can store the bitmap for that many AL_EXTENTS within
1224 * one sector of the _on_disk_ bitmap:
1225 * bit	 0	  bit 37   bit 38	     bit (512*8)-1
1226 *	     ...|........|........|.. // ..|........|
1227 * sect. 0	 `296	  `304			   ^(512*8*8)-1
1228 *
1229#define BM_WORDS_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG )
1230#define BM_BYTES_PER_EXT    ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 )  // 128
1231#define BM_EXT_PER_SECT	    ( 512 / BM_BYTES_PER_EXTENT )	 //   4
1232 */
1233
1234#define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1235/* we have a certain meta data variant that has a fixed on-disk size of 128
1236 * MiB, of which 4k are our "superblock", and 32k are the fixed size activity
1237 * log, leaving this many sectors for the bitmap.
1238 */
1239
1240#define DRBD_MAX_SECTORS_FIXED_BM \
1241	  ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1242#define DRBD_MAX_SECTORS      DRBD_MAX_SECTORS_FIXED_BM
1243/* 16 TB in units of sectors */
1244#if BITS_PER_LONG == 32
1245/* adjust by one page worth of bitmap,
1246 * so we won't wrap around in drbd_bm_find_next_bit.
1247 * you should use 64bit OS for that much storage, anyways. */
1248#define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1249#else
1250/* we allow up to 1 PiB now on 64bit architecture with "flexible" meta data */
1251#define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1252/* corresponds to (1UL << 38) bits right now. */
1253#endif
1254
1255/* Estimate max bio size as 256 * PAGE_SIZE,
1256 * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
1257 * Since we may live in a mixed-platform cluster,
1258 * we limit us to a platform agnostic constant here for now.
1259 * A followup commit may allow even bigger BIO sizes,
1260 * once we thought that through. */
1261#define DRBD_MAX_BIO_SIZE (1U << 20)
1262#if DRBD_MAX_BIO_SIZE > (BIO_MAX_VECS << PAGE_SHIFT)
1263#error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1264#endif
1265#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)       /* Works always = 4k */
1266
1267#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* Header 80 only allows packets up to 32KiB data */
1268#define DRBD_MAX_BIO_SIZE_P95    (1U << 17) /* Protocol 95 to 99 allows bios up to 128KiB */
1269
1270/* For now, don't allow more than half of what we can "activate" in one
1271 * activity log transaction to be discarded in one go. We may need to rework
1272 * drbd_al_begin_io() to allow for even larger discard ranges */
1273#define DRBD_MAX_BATCH_BIO_SIZE	 (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1274#define DRBD_MAX_BBIO_SECTORS    (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1275
1276extern int  drbd_bm_init(struct drbd_device *device);
1277extern int  drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1278extern void drbd_bm_cleanup(struct drbd_device *device);
1279extern void drbd_bm_set_all(struct drbd_device *device);
1280extern void drbd_bm_clear_all(struct drbd_device *device);
1281/* set/clear/test only a few bits at a time */
1282extern int  drbd_bm_set_bits(
1283		struct drbd_device *device, unsigned long s, unsigned long e);
1284extern int  drbd_bm_clear_bits(
1285		struct drbd_device *device, unsigned long s, unsigned long e);
1286extern int drbd_bm_count_bits(
1287	struct drbd_device *device, const unsigned long s, const unsigned long e);
1288/* bm_set_bits variant for use while holding drbd_bm_lock,
1289 * may process the whole bitmap in one go */
1290extern void _drbd_bm_set_bits(struct drbd_device *device,
1291		const unsigned long s, const unsigned long e);
1292extern int  drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1293extern int  drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1294extern int  drbd_bm_read(struct drbd_device *device,
1295		struct drbd_peer_device *peer_device) __must_hold(local);
1296extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1297extern int  drbd_bm_write(struct drbd_device *device,
1298		struct drbd_peer_device *peer_device) __must_hold(local);
1299extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1300extern int  drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1301extern int  drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1302extern int drbd_bm_write_all(struct drbd_device *device,
1303		struct drbd_peer_device *peer_device) __must_hold(local);
1304extern int  drbd_bm_write_copy_pages(struct drbd_device *device,
1305		struct drbd_peer_device *peer_device) __must_hold(local);
1306extern size_t	     drbd_bm_words(struct drbd_device *device);
1307extern unsigned long drbd_bm_bits(struct drbd_device *device);
1308extern sector_t      drbd_bm_capacity(struct drbd_device *device);
1309
1310#define DRBD_END_OF_BITMAP	(~(unsigned long)0)
1311extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1312/* bm_find_next variants for use while you hold drbd_bm_lock() */
1313extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1314extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1315extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1316extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1317/* for receive_bitmap */
1318extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1319		size_t number, unsigned long *buffer);
1320/* for _drbd_send_bitmap */
1321extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1322		size_t number, unsigned long *buffer);
1323
1324extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1325extern void drbd_bm_unlock(struct drbd_device *device);
1326/* drbd_main.c */
1327
1328extern struct kmem_cache *drbd_request_cache;
1329extern struct kmem_cache *drbd_ee_cache;	/* peer requests */
1330extern struct kmem_cache *drbd_bm_ext_cache;	/* bitmap extents */
1331extern struct kmem_cache *drbd_al_ext_cache;	/* activity log extents */
1332extern mempool_t drbd_request_mempool;
1333extern mempool_t drbd_ee_mempool;
1334
1335/* drbd's page pool, used to buffer data received from the peer,
1336 * or data requested by the peer.
1337 *
1338 * This does not have an emergency reserve.
1339 *
1340 * When allocating from this pool, it first takes pages from the pool.
1341 * Only if the pool is depleted will try to allocate from the system.
1342 *
1343 * The assumption is that pages taken from this pool will be processed,
1344 * and given back, "quickly", and then can be recycled, so we can avoid
1345 * frequent calls to alloc_page(), and still will be able to make progress even
1346 * under memory pressure.
1347 */
1348extern struct page *drbd_pp_pool;
1349extern spinlock_t   drbd_pp_lock;
1350extern int	    drbd_pp_vacant;
1351extern wait_queue_head_t drbd_pp_wait;
1352
1353/* We also need a standard (emergency-reserve backed) page pool
1354 * for meta data IO (activity log, bitmap).
1355 * We can keep it global, as long as it is used as "N pages at a time".
1356 * 128 should be plenty, currently we probably can get away with as few as 1.
1357 */
1358#define DRBD_MIN_POOL_PAGES	128
1359extern mempool_t drbd_md_io_page_pool;
1360
1361/* We also need to make sure we get a bio
1362 * when we need it for housekeeping purposes */
1363extern struct bio_set drbd_md_io_bio_set;
1364
1365/* And a bio_set for cloning */
1366extern struct bio_set drbd_io_bio_set;
1367
1368extern struct mutex resources_mutex;
1369
1370extern int conn_lowest_minor(struct drbd_connection *connection);
1371extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1372extern void drbd_destroy_device(struct kref *kref);
1373extern void drbd_delete_device(struct drbd_device *device);
1374
1375extern struct drbd_resource *drbd_create_resource(const char *name);
1376extern void drbd_free_resource(struct drbd_resource *resource);
1377
1378extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1379extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1380extern void drbd_destroy_connection(struct kref *kref);
1381extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1382					    void *peer_addr, int peer_addr_len);
1383extern struct drbd_resource *drbd_find_resource(const char *name);
1384extern void drbd_destroy_resource(struct kref *kref);
1385extern void conn_free_crypto(struct drbd_connection *connection);
1386
1387/* drbd_req */
1388extern void do_submit(struct work_struct *ws);
1389extern void __drbd_make_request(struct drbd_device *, struct bio *);
1390void drbd_submit_bio(struct bio *bio);
1391extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1392extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1393
1394
1395/* drbd_nl.c */
1396
1397extern struct mutex notification_mutex;
1398
1399extern void drbd_suspend_io(struct drbd_device *device);
1400extern void drbd_resume_io(struct drbd_device *device);
1401extern char *ppsize(char *buf, unsigned long long size);
1402extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1403enum determine_dev_size {
1404	DS_ERROR_SHRINK = -3,
1405	DS_ERROR_SPACE_MD = -2,
1406	DS_ERROR = -1,
1407	DS_UNCHANGED = 0,
1408	DS_SHRUNK = 1,
1409	DS_GREW = 2,
1410	DS_GREW_FROM_ZERO = 3,
1411};
1412extern enum determine_dev_size
1413drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1414extern void resync_after_online_grow(struct drbd_device *);
1415extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1416			struct drbd_backing_dev *bdev, struct o_qlim *o);
1417extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1418					enum drbd_role new_role,
1419					int force);
1420extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1421extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1422extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1423extern int drbd_khelper(struct drbd_device *device, char *cmd);
1424
1425/* drbd_worker.c */
1426/* bi_end_io handlers */
1427extern void drbd_md_endio(struct bio *bio);
1428extern void drbd_peer_request_endio(struct bio *bio);
1429extern void drbd_request_endio(struct bio *bio);
1430extern int drbd_worker(struct drbd_thread *thi);
1431enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1432void drbd_resync_after_changed(struct drbd_device *device);
1433extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1434extern void resume_next_sg(struct drbd_device *device);
1435extern void suspend_other_sg(struct drbd_device *device);
1436extern int drbd_resync_finished(struct drbd_peer_device *peer_device);
1437/* maybe rather drbd_main.c ? */
1438extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1439extern void drbd_md_put_buffer(struct drbd_device *device);
1440extern int drbd_md_sync_page_io(struct drbd_device *device,
1441		struct drbd_backing_dev *bdev, sector_t sector, enum req_op op);
1442extern void drbd_ov_out_of_sync_found(struct drbd_peer_device *peer_device,
1443		sector_t sector, int size);
1444extern void wait_until_done_or_force_detached(struct drbd_device *device,
1445		struct drbd_backing_dev *bdev, unsigned int *done);
1446extern void drbd_rs_controller_reset(struct drbd_peer_device *peer_device);
1447
1448static inline void ov_out_of_sync_print(struct drbd_peer_device *peer_device)
1449{
1450	struct drbd_device *device = peer_device->device;
1451
1452	if (device->ov_last_oos_size) {
1453		drbd_err(peer_device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1454		     (unsigned long long)device->ov_last_oos_start,
1455		     (unsigned long)device->ov_last_oos_size);
1456	}
1457	device->ov_last_oos_size = 0;
1458}
1459
1460
1461extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1462extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1463			 void *);
1464/* worker callbacks */
1465extern int w_e_end_data_req(struct drbd_work *, int);
1466extern int w_e_end_rsdata_req(struct drbd_work *, int);
1467extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1468extern int w_e_end_ov_reply(struct drbd_work *, int);
1469extern int w_e_end_ov_req(struct drbd_work *, int);
1470extern int w_ov_finished(struct drbd_work *, int);
1471extern int w_resync_timer(struct drbd_work *, int);
1472extern int w_send_write_hint(struct drbd_work *, int);
1473extern int w_send_dblock(struct drbd_work *, int);
1474extern int w_send_read_req(struct drbd_work *, int);
1475extern int w_e_reissue(struct drbd_work *, int);
1476extern int w_restart_disk_io(struct drbd_work *, int);
1477extern int w_send_out_of_sync(struct drbd_work *, int);
1478
1479extern void resync_timer_fn(struct timer_list *t);
1480extern void start_resync_timer_fn(struct timer_list *t);
1481
1482extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1483
1484/* drbd_receiver.c */
1485extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1486		sector_t start, unsigned int nr_sectors, int flags);
1487extern int drbd_receiver(struct drbd_thread *thi);
1488extern int drbd_ack_receiver(struct drbd_thread *thi);
1489extern void drbd_send_ping_wf(struct work_struct *ws);
1490extern void drbd_send_acks_wf(struct work_struct *ws);
1491extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1492extern bool drbd_rs_should_slow_down(struct drbd_peer_device *peer_device, sector_t sector,
1493		bool throttle_if_app_is_waiting);
1494extern int drbd_submit_peer_request(struct drbd_peer_request *peer_req);
1495extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1496extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1497						     sector_t, unsigned int,
1498						     unsigned int,
1499						     gfp_t) __must_hold(local);
1500extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1501				 int);
1502#define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1503#define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1504extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1505extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1506extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1507extern int drbd_connected(struct drbd_peer_device *);
1508
1509/* sets the number of 512 byte sectors of our virtual device */
1510void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1511
1512/*
1513 * used to submit our private bio
1514 */
1515static inline void drbd_submit_bio_noacct(struct drbd_device *device,
1516					     int fault_type, struct bio *bio)
1517{
1518	__release(local);
1519	if (!bio->bi_bdev) {
1520		drbd_err(device, "drbd_submit_bio_noacct: bio->bi_bdev == NULL\n");
1521		bio->bi_status = BLK_STS_IOERR;
1522		bio_endio(bio);
1523		return;
1524	}
1525
1526	if (drbd_insert_fault(device, fault_type))
1527		bio_io_error(bio);
1528	else
1529		submit_bio_noacct(bio);
1530}
1531
1532void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1533			      enum write_ordering_e wo);
1534
1535/* drbd_proc.c */
1536extern struct proc_dir_entry *drbd_proc;
1537int drbd_seq_show(struct seq_file *seq, void *v);
1538
1539/* drbd_actlog.c */
1540extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1541extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1542extern void drbd_al_begin_io_commit(struct drbd_device *device);
1543extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1544extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1545extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1546extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1547extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1548extern int drbd_try_rs_begin_io(struct drbd_peer_device *peer_device, sector_t sector);
1549extern void drbd_rs_cancel_all(struct drbd_device *device);
1550extern int drbd_rs_del_all(struct drbd_device *device);
1551extern void drbd_rs_failed_io(struct drbd_peer_device *peer_device,
1552		sector_t sector, int size);
1553extern void drbd_advance_rs_marks(struct drbd_peer_device *peer_device, unsigned long still_to_go);
1554
1555enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1556extern int __drbd_change_sync(struct drbd_peer_device *peer_device, sector_t sector, int size,
1557		enum update_sync_bits_mode mode);
1558#define drbd_set_in_sync(peer_device, sector, size) \
1559	__drbd_change_sync(peer_device, sector, size, SET_IN_SYNC)
1560#define drbd_set_out_of_sync(peer_device, sector, size) \
1561	__drbd_change_sync(peer_device, sector, size, SET_OUT_OF_SYNC)
1562#define drbd_rs_failed_io(peer_device, sector, size) \
1563	__drbd_change_sync(peer_device, sector, size, RECORD_RS_FAILED)
1564extern void drbd_al_shrink(struct drbd_device *device);
1565extern int drbd_al_initialize(struct drbd_device *, void *);
1566
1567/* drbd_nl.c */
1568/* state info broadcast */
1569struct sib_info {
1570	enum drbd_state_info_bcast_reason sib_reason;
1571	union {
1572		struct {
1573			char *helper_name;
1574			unsigned helper_exit_code;
1575		};
1576		struct {
1577			union drbd_state os;
1578			union drbd_state ns;
1579		};
1580	};
1581};
1582void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1583
1584extern int notify_resource_state(struct sk_buff *,
1585				  unsigned int,
1586				  struct drbd_resource *,
1587				  struct resource_info *,
1588				  enum drbd_notification_type);
1589extern int notify_device_state(struct sk_buff *,
1590				unsigned int,
1591				struct drbd_device *,
1592				struct device_info *,
1593				enum drbd_notification_type);
1594extern int notify_connection_state(struct sk_buff *,
1595				    unsigned int,
1596				    struct drbd_connection *,
1597				    struct connection_info *,
1598				    enum drbd_notification_type);
1599extern int notify_peer_device_state(struct sk_buff *,
1600				     unsigned int,
1601				     struct drbd_peer_device *,
1602				     struct peer_device_info *,
1603				     enum drbd_notification_type);
1604extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1605			  struct drbd_connection *, const char *, int);
1606
1607/*
1608 * inline helper functions
1609 *************************/
1610
1611/* see also page_chain_add and friends in drbd_receiver.c */
1612static inline struct page *page_chain_next(struct page *page)
1613{
1614	return (struct page *)page_private(page);
1615}
1616#define page_chain_for_each(page) \
1617	for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1618			page = page_chain_next(page))
1619#define page_chain_for_each_safe(page, n) \
1620	for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1621
1622
1623static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1624{
1625	struct page *page = peer_req->pages;
1626	page_chain_for_each(page) {
1627		if (page_count(page) > 1)
1628			return 1;
1629	}
1630	return 0;
1631}
1632
1633static inline union drbd_state drbd_read_state(struct drbd_device *device)
1634{
1635	struct drbd_resource *resource = device->resource;
1636	union drbd_state rv;
1637
1638	rv.i = device->state.i;
1639	rv.susp = resource->susp;
1640	rv.susp_nod = resource->susp_nod;
1641	rv.susp_fen = resource->susp_fen;
1642
1643	return rv;
1644}
1645
1646enum drbd_force_detach_flags {
1647	DRBD_READ_ERROR,
1648	DRBD_WRITE_ERROR,
1649	DRBD_META_IO_ERROR,
1650	DRBD_FORCE_DETACH,
1651};
1652
1653#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1654static inline void __drbd_chk_io_error_(struct drbd_device *device,
1655		enum drbd_force_detach_flags df,
1656		const char *where)
1657{
1658	enum drbd_io_error_p ep;
1659
1660	rcu_read_lock();
1661	ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1662	rcu_read_unlock();
1663	switch (ep) {
1664	case EP_PASS_ON: /* FIXME would this be better named "Ignore"? */
1665		if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1666			if (drbd_ratelimit())
1667				drbd_err(device, "Local IO failed in %s.\n", where);
1668			if (device->state.disk > D_INCONSISTENT)
1669				_drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1670			break;
1671		}
1672		fallthrough;	/* for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
1673	case EP_DETACH:
1674	case EP_CALL_HELPER:
1675		/* Remember whether we saw a READ or WRITE error.
1676		 *
1677		 * Recovery of the affected area for WRITE failure is covered
1678		 * by the activity log.
1679		 * READ errors may fall outside that area though. Certain READ
1680		 * errors can be "healed" by writing good data to the affected
1681		 * blocks, which triggers block re-allocation in lower layers.
1682		 *
1683		 * If we can not write the bitmap after a READ error,
1684		 * we may need to trigger a full sync (see w_go_diskless()).
1685		 *
1686		 * Force-detach is not really an IO error, but rather a
1687		 * desperate measure to try to deal with a completely
1688		 * unresponsive lower level IO stack.
1689		 * Still it should be treated as a WRITE error.
1690		 *
1691		 * Meta IO error is always WRITE error:
1692		 * we read meta data only once during attach,
1693		 * which will fail in case of errors.
1694		 */
1695		set_bit(WAS_IO_ERROR, &device->flags);
1696		if (df == DRBD_READ_ERROR)
1697			set_bit(WAS_READ_ERROR, &device->flags);
1698		if (df == DRBD_FORCE_DETACH)
1699			set_bit(FORCE_DETACH, &device->flags);
1700		if (device->state.disk > D_FAILED) {
1701			_drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1702			drbd_err(device,
1703				"Local IO failed in %s. Detaching...\n", where);
1704		}
1705		break;
1706	}
1707}
1708
1709/**
1710 * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers
1711 * @device:	 DRBD device.
1712 * @error:	 Error code passed to the IO completion callback
1713 * @forcedetach: Force detach. I.e. the error happened while accessing the meta data
1714 *
1715 * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED)
1716 */
1717#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1718static inline void drbd_chk_io_error_(struct drbd_device *device,
1719	int error, enum drbd_force_detach_flags forcedetach, const char *where)
1720{
1721	if (error) {
1722		unsigned long flags;
1723		spin_lock_irqsave(&device->resource->req_lock, flags);
1724		__drbd_chk_io_error_(device, forcedetach, where);
1725		spin_unlock_irqrestore(&device->resource->req_lock, flags);
1726	}
1727}
1728
1729
1730/**
1731 * drbd_md_first_sector() - Returns the first sector number of the meta data area
1732 * @bdev:	Meta data block device.
1733 *
1734 * BTW, for internal meta data, this happens to be the maximum capacity
1735 * we could agree upon with our peer node.
1736 */
1737static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1738{
1739	switch (bdev->md.meta_dev_idx) {
1740	case DRBD_MD_INDEX_INTERNAL:
1741	case DRBD_MD_INDEX_FLEX_INT:
1742		return bdev->md.md_offset + bdev->md.bm_offset;
1743	case DRBD_MD_INDEX_FLEX_EXT:
1744	default:
1745		return bdev->md.md_offset;
1746	}
1747}
1748
1749/**
1750 * drbd_md_last_sector() - Return the last sector number of the meta data area
1751 * @bdev:	Meta data block device.
1752 */
1753static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1754{
1755	switch (bdev->md.meta_dev_idx) {
1756	case DRBD_MD_INDEX_INTERNAL:
1757	case DRBD_MD_INDEX_FLEX_INT:
1758		return bdev->md.md_offset + MD_4kB_SECT -1;
1759	case DRBD_MD_INDEX_FLEX_EXT:
1760	default:
1761		return bdev->md.md_offset + bdev->md.md_size_sect -1;
1762	}
1763}
1764
1765/* Returns the number of 512 byte sectors of the device */
1766static inline sector_t drbd_get_capacity(struct block_device *bdev)
1767{
1768	return bdev ? bdev_nr_sectors(bdev) : 0;
1769}
1770
1771/**
1772 * drbd_get_max_capacity() - Returns the capacity we announce to out peer
1773 * @bdev:	Meta data block device.
1774 *
1775 * returns the capacity we announce to out peer.  we clip ourselves at the
1776 * various MAX_SECTORS, because if we don't, current implementation will
1777 * oops sooner or later
1778 */
1779static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1780{
1781	sector_t s;
1782
1783	switch (bdev->md.meta_dev_idx) {
1784	case DRBD_MD_INDEX_INTERNAL:
1785	case DRBD_MD_INDEX_FLEX_INT:
1786		s = drbd_get_capacity(bdev->backing_bdev)
1787			? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1788				drbd_md_first_sector(bdev))
1789			: 0;
1790		break;
1791	case DRBD_MD_INDEX_FLEX_EXT:
1792		s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1793				drbd_get_capacity(bdev->backing_bdev));
1794		/* clip at maximum size the meta device can support */
1795		s = min_t(sector_t, s,
1796			BM_EXT_TO_SECT(bdev->md.md_size_sect
1797				     - bdev->md.bm_offset));
1798		break;
1799	default:
1800		s = min_t(sector_t, DRBD_MAX_SECTORS,
1801				drbd_get_capacity(bdev->backing_bdev));
1802	}
1803	return s;
1804}
1805
1806/**
1807 * drbd_md_ss() - Return the sector number of our meta data super block
1808 * @bdev:	Meta data block device.
1809 */
1810static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1811{
1812	const int meta_dev_idx = bdev->md.meta_dev_idx;
1813
1814	if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1815		return 0;
1816
1817	/* Since drbd08, internal meta data is always "flexible".
1818	 * position: last 4k aligned block of 4k size */
1819	if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1820	    meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1821		return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1822
1823	/* external, some index; this is the old fixed size layout */
1824	return MD_128MB_SECT * bdev->md.meta_dev_idx;
1825}
1826
1827static inline void
1828drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1829{
1830	unsigned long flags;
1831	spin_lock_irqsave(&q->q_lock, flags);
1832	list_add_tail(&w->list, &q->q);
1833	spin_unlock_irqrestore(&q->q_lock, flags);
1834	wake_up(&q->q_wait);
1835}
1836
1837static inline void
1838drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1839{
1840	unsigned long flags;
1841	spin_lock_irqsave(&q->q_lock, flags);
1842	if (list_empty_careful(&w->list))
1843		list_add_tail(&w->list, &q->q);
1844	spin_unlock_irqrestore(&q->q_lock, flags);
1845	wake_up(&q->q_wait);
1846}
1847
1848static inline void
1849drbd_device_post_work(struct drbd_device *device, int work_bit)
1850{
1851	if (!test_and_set_bit(work_bit, &device->flags)) {
1852		struct drbd_connection *connection =
1853			first_peer_device(device)->connection;
1854		struct drbd_work_queue *q = &connection->sender_work;
1855		if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1856			wake_up(&q->q_wait);
1857	}
1858}
1859
1860extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1861
1862/* To get the ack_receiver out of the blocking network stack,
1863 * so it can change its sk_rcvtimeo from idle- to ping-timeout,
1864 * and send a ping, we need to send a signal.
1865 * Which signal we send is irrelevant. */
1866static inline void wake_ack_receiver(struct drbd_connection *connection)
1867{
1868	struct task_struct *task = connection->ack_receiver.task;
1869	if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1870		send_sig(SIGXCPU, task, 1);
1871}
1872
1873static inline void request_ping(struct drbd_connection *connection)
1874{
1875	set_bit(SEND_PING, &connection->flags);
1876	wake_ack_receiver(connection);
1877}
1878
1879extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1880extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1881extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1882			     enum drbd_packet, unsigned int, void *,
1883			     unsigned int);
1884extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1885			     enum drbd_packet, unsigned int, void *,
1886			     unsigned int);
1887
1888extern int drbd_send_ping(struct drbd_connection *connection);
1889extern int drbd_send_ping_ack(struct drbd_connection *connection);
1890extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1891extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1892
1893static inline void drbd_thread_stop(struct drbd_thread *thi)
1894{
1895	_drbd_thread_stop(thi, false, true);
1896}
1897
1898static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1899{
1900	_drbd_thread_stop(thi, false, false);
1901}
1902
1903static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1904{
1905	_drbd_thread_stop(thi, true, false);
1906}
1907
1908/* counts how many answer packets packets we expect from our peer,
1909 * for either explicit application requests,
1910 * or implicit barrier packets as necessary.
1911 * increased:
1912 *  w_send_barrier
1913 *  _req_mod(req, QUEUE_FOR_NET_WRITE or QUEUE_FOR_NET_READ);
1914 *    it is much easier and equally valid to count what we queue for the
1915 *    worker, even before it actually was queued or send.
1916 *    (drbd_make_request_common; recovery path on read io-error)
1917 * decreased:
1918 *  got_BarrierAck (respective tl_clear, tl_clear_barrier)
1919 *  _req_mod(req, DATA_RECEIVED)
1920 *     [from receive_DataReply]
1921 *  _req_mod(req, WRITE_ACKED_BY_PEER or RECV_ACKED_BY_PEER or NEG_ACKED)
1922 *     [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)]
1923 *     for some reason it is NOT decreased in got_NegAck,
1924 *     but in the resulting cleanup code from report_params.
1925 *     we should try to remember the reason for that...
1926 *  _req_mod(req, SEND_FAILED or SEND_CANCELED)
1927 *  _req_mod(req, CONNECTION_LOST_WHILE_PENDING)
1928 *     [from tl_clear_barrier]
1929 */
1930static inline void inc_ap_pending(struct drbd_device *device)
1931{
1932	atomic_inc(&device->ap_pending_cnt);
1933}
1934
1935#define dec_ap_pending(device) ((void)expect((device), __dec_ap_pending(device) >= 0))
1936static inline int __dec_ap_pending(struct drbd_device *device)
1937{
1938	int ap_pending_cnt = atomic_dec_return(&device->ap_pending_cnt);
1939
1940	if (ap_pending_cnt == 0)
1941		wake_up(&device->misc_wait);
1942	return ap_pending_cnt;
1943}
1944
1945/* counts how many resync-related answers we still expect from the peer
1946 *		     increase			decrease
1947 * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY)
1948 * C_SYNC_SOURCE sends P_RS_DATA_REPLY   (and expects P_WRITE_ACK with ID_SYNCER)
1949 *					   (or P_NEG_ACK with ID_SYNCER)
1950 */
1951static inline void inc_rs_pending(struct drbd_peer_device *peer_device)
1952{
1953	atomic_inc(&peer_device->device->rs_pending_cnt);
1954}
1955
1956#define dec_rs_pending(peer_device) \
1957	((void)expect((peer_device), __dec_rs_pending(peer_device) >= 0))
1958static inline int __dec_rs_pending(struct drbd_peer_device *peer_device)
1959{
1960	return atomic_dec_return(&peer_device->device->rs_pending_cnt);
1961}
1962
1963/* counts how many answers we still need to send to the peer.
1964 * increased on
1965 *  receive_Data	unless protocol A;
1966 *			we need to send a P_RECV_ACK (proto B)
1967 *			or P_WRITE_ACK (proto C)
1968 *  receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK
1969 *  receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA
1970 *  receive_Barrier_*	we need to send a P_BARRIER_ACK
1971 */
1972static inline void inc_unacked(struct drbd_device *device)
1973{
1974	atomic_inc(&device->unacked_cnt);
1975}
1976
1977#define dec_unacked(device) ((void)expect(device, __dec_unacked(device) >= 0))
1978static inline int __dec_unacked(struct drbd_device *device)
1979{
1980	return atomic_dec_return(&device->unacked_cnt);
1981}
1982
1983#define sub_unacked(device, n) ((void)expect(device, __sub_unacked(device) >= 0))
1984static inline int __sub_unacked(struct drbd_device *device, int n)
1985{
1986	return atomic_sub_return(n, &device->unacked_cnt);
1987}
1988
1989static inline bool is_sync_target_state(enum drbd_conns connection_state)
1990{
1991	return	connection_state == C_SYNC_TARGET ||
1992		connection_state == C_PAUSED_SYNC_T;
1993}
1994
1995static inline bool is_sync_source_state(enum drbd_conns connection_state)
1996{
1997	return	connection_state == C_SYNC_SOURCE ||
1998		connection_state == C_PAUSED_SYNC_S;
1999}
2000
2001static inline bool is_sync_state(enum drbd_conns connection_state)
2002{
2003	return	is_sync_source_state(connection_state) ||
2004		is_sync_target_state(connection_state);
2005}
2006
2007/**
2008 * get_ldev() - Increase the ref count on device->ldev. Returns 0 if there is no ldev
2009 * @_device:		DRBD device.
2010 * @_min_state:		Minimum device state required for success.
2011 *
2012 * You have to call put_ldev() when finished working with device->ldev.
2013 */
2014#define get_ldev_if_state(_device, _min_state)				\
2015	(_get_ldev_if_state((_device), (_min_state)) ?			\
2016	 ({ __acquire(x); true; }) : false)
2017#define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2018
2019static inline void put_ldev(struct drbd_device *device)
2020{
2021	enum drbd_disk_state disk_state = device->state.disk;
2022	/* We must check the state *before* the atomic_dec becomes visible,
2023	 * or we have a theoretical race where someone hitting zero,
2024	 * while state still D_FAILED, will then see D_DISKLESS in the
2025	 * condition below and calling into destroy, where he must not, yet. */
2026	int i = atomic_dec_return(&device->local_cnt);
2027
2028	/* This may be called from some endio handler,
2029	 * so we must not sleep here. */
2030
2031	__release(local);
2032	D_ASSERT(device, i >= 0);
2033	if (i == 0) {
2034		if (disk_state == D_DISKLESS)
2035			/* even internal references gone, safe to destroy */
2036			drbd_device_post_work(device, DESTROY_DISK);
2037		if (disk_state == D_FAILED)
2038			/* all application IO references gone. */
2039			if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2040				drbd_device_post_work(device, GO_DISKLESS);
2041		wake_up(&device->misc_wait);
2042	}
2043}
2044
2045#ifndef __CHECKER__
2046static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2047{
2048	int io_allowed;
2049
2050	/* never get a reference while D_DISKLESS */
2051	if (device->state.disk == D_DISKLESS)
2052		return 0;
2053
2054	atomic_inc(&device->local_cnt);
2055	io_allowed = (device->state.disk >= mins);
2056	if (!io_allowed)
2057		put_ldev(device);
2058	return io_allowed;
2059}
2060#else
2061extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2062#endif
2063
2064/* this throttles on-the-fly application requests
2065 * according to max_buffers settings;
2066 * maybe re-implement using semaphores? */
2067static inline int drbd_get_max_buffers(struct drbd_device *device)
2068{
2069	struct net_conf *nc;
2070	int mxb;
2071
2072	rcu_read_lock();
2073	nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2074	mxb = nc ? nc->max_buffers : 1000000;  /* arbitrary limit on open requests */
2075	rcu_read_unlock();
2076
2077	return mxb;
2078}
2079
2080static inline int drbd_state_is_stable(struct drbd_device *device)
2081{
2082	union drbd_dev_state s = device->state;
2083
2084	/* DO NOT add a default clause, we want the compiler to warn us
2085	 * for any newly introduced state we may have forgotten to add here */
2086
2087	switch ((enum drbd_conns)s.conn) {
2088	/* new io only accepted when there is no connection, ... */
2089	case C_STANDALONE:
2090	case C_WF_CONNECTION:
2091	/* ... or there is a well established connection. */
2092	case C_CONNECTED:
2093	case C_SYNC_SOURCE:
2094	case C_SYNC_TARGET:
2095	case C_VERIFY_S:
2096	case C_VERIFY_T:
2097	case C_PAUSED_SYNC_S:
2098	case C_PAUSED_SYNC_T:
2099	case C_AHEAD:
2100	case C_BEHIND:
2101		/* transitional states, IO allowed */
2102	case C_DISCONNECTING:
2103	case C_UNCONNECTED:
2104	case C_TIMEOUT:
2105	case C_BROKEN_PIPE:
2106	case C_NETWORK_FAILURE:
2107	case C_PROTOCOL_ERROR:
2108	case C_TEAR_DOWN:
2109	case C_WF_REPORT_PARAMS:
2110	case C_STARTING_SYNC_S:
2111	case C_STARTING_SYNC_T:
2112		break;
2113
2114		/* Allow IO in BM exchange states with new protocols */
2115	case C_WF_BITMAP_S:
2116		if (first_peer_device(device)->connection->agreed_pro_version < 96)
2117			return 0;
2118		break;
2119
2120		/* no new io accepted in these states */
2121	case C_WF_BITMAP_T:
2122	case C_WF_SYNC_UUID:
2123	case C_MASK:
2124		/* not "stable" */
2125		return 0;
2126	}
2127
2128	switch ((enum drbd_disk_state)s.disk) {
2129	case D_DISKLESS:
2130	case D_INCONSISTENT:
2131	case D_OUTDATED:
2132	case D_CONSISTENT:
2133	case D_UP_TO_DATE:
2134	case D_FAILED:
2135		/* disk state is stable as well. */
2136		break;
2137
2138	/* no new io accepted during transitional states */
2139	case D_ATTACHING:
2140	case D_NEGOTIATING:
2141	case D_UNKNOWN:
2142	case D_MASK:
2143		/* not "stable" */
2144		return 0;
2145	}
2146
2147	return 1;
2148}
2149
2150static inline int drbd_suspended(struct drbd_device *device)
2151{
2152	struct drbd_resource *resource = device->resource;
2153
2154	return resource->susp || resource->susp_fen || resource->susp_nod;
2155}
2156
2157static inline bool may_inc_ap_bio(struct drbd_device *device)
2158{
2159	int mxb = drbd_get_max_buffers(device);
2160
2161	if (drbd_suspended(device))
2162		return false;
2163	if (atomic_read(&device->suspend_cnt))
2164		return false;
2165
2166	/* to avoid potential deadlock or bitmap corruption,
2167	 * in various places, we only allow new application io
2168	 * to start during "stable" states. */
2169
2170	/* no new io accepted when attaching or detaching the disk */
2171	if (!drbd_state_is_stable(device))
2172		return false;
2173
2174	/* since some older kernels don't have atomic_add_unless,
2175	 * and we are within the spinlock anyways, we have this workaround.  */
2176	if (atomic_read(&device->ap_bio_cnt) > mxb)
2177		return false;
2178	if (test_bit(BITMAP_IO, &device->flags))
2179		return false;
2180	return true;
2181}
2182
2183static inline bool inc_ap_bio_cond(struct drbd_device *device)
2184{
2185	bool rv = false;
2186
2187	spin_lock_irq(&device->resource->req_lock);
2188	rv = may_inc_ap_bio(device);
2189	if (rv)
2190		atomic_inc(&device->ap_bio_cnt);
2191	spin_unlock_irq(&device->resource->req_lock);
2192
2193	return rv;
2194}
2195
2196static inline void inc_ap_bio(struct drbd_device *device)
2197{
2198	/* we wait here
2199	 *    as long as the device is suspended
2200	 *    until the bitmap is no longer on the fly during connection
2201	 *    handshake as long as we would exceed the max_buffer limit.
2202	 *
2203	 * to avoid races with the reconnect code,
2204	 * we need to atomic_inc within the spinlock. */
2205
2206	wait_event(device->misc_wait, inc_ap_bio_cond(device));
2207}
2208
2209static inline void dec_ap_bio(struct drbd_device *device)
2210{
2211	int mxb = drbd_get_max_buffers(device);
2212	int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2213
2214	D_ASSERT(device, ap_bio >= 0);
2215
2216	if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2217		if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2218			drbd_queue_work(&first_peer_device(device)->
2219				connection->sender_work,
2220				&device->bm_io_work.w);
2221	}
2222
2223	/* this currently does wake_up for every dec_ap_bio!
2224	 * maybe rather introduce some type of hysteresis?
2225	 * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
2226	if (ap_bio < mxb)
2227		wake_up(&device->misc_wait);
2228}
2229
2230static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2231{
2232	return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2233		first_peer_device(device)->connection->agreed_pro_version != 100;
2234}
2235
2236static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2237{
2238	int changed = device->ed_uuid != val;
2239	device->ed_uuid = val;
2240	return changed;
2241}
2242
2243static inline int drbd_queue_order_type(struct drbd_device *device)
2244{
2245	/* sorry, we currently have no working implementation
2246	 * of distributed TCQ stuff */
2247#ifndef QUEUE_ORDERED_NONE
2248#define QUEUE_ORDERED_NONE 0
2249#endif
2250	return QUEUE_ORDERED_NONE;
2251}
2252
2253static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2254{
2255	return list_first_entry_or_null(&resource->connections,
2256				struct drbd_connection, connections);
2257}
2258
2259#endif
2260