xref: /kernel/linux/linux-6.6/fs/xfs/xfs_log_priv.h (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6#ifndef	__XFS_LOG_PRIV_H__
7#define __XFS_LOG_PRIV_H__
8
9#include "xfs_extent_busy.h"	/* for struct xfs_busy_extents */
10
11struct xfs_buf;
12struct xlog;
13struct xlog_ticket;
14struct xfs_mount;
15
16/*
17 * get client id from packed copy.
18 *
19 * this hack is here because the xlog_pack code copies four bytes
20 * of xlog_op_header containing the fields oh_clientid, oh_flags
21 * and oh_res2 into the packed copy.
22 *
23 * later on this four byte chunk is treated as an int and the
24 * client id is pulled out.
25 *
26 * this has endian issues, of course.
27 */
28static inline uint xlog_get_client_id(__be32 i)
29{
30	return be32_to_cpu(i) >> 24;
31}
32
33/*
34 * In core log state
35 */
36enum xlog_iclog_state {
37	XLOG_STATE_ACTIVE,	/* Current IC log being written to */
38	XLOG_STATE_WANT_SYNC,	/* Want to sync this iclog; no more writes */
39	XLOG_STATE_SYNCING,	/* This IC log is syncing */
40	XLOG_STATE_DONE_SYNC,	/* Done syncing to disk */
41	XLOG_STATE_CALLBACK,	/* Callback functions now */
42	XLOG_STATE_DIRTY,	/* Dirty IC log, not ready for ACTIVE status */
43};
44
45#define XLOG_STATE_STRINGS \
46	{ XLOG_STATE_ACTIVE,	"XLOG_STATE_ACTIVE" }, \
47	{ XLOG_STATE_WANT_SYNC,	"XLOG_STATE_WANT_SYNC" }, \
48	{ XLOG_STATE_SYNCING,	"XLOG_STATE_SYNCING" }, \
49	{ XLOG_STATE_DONE_SYNC,	"XLOG_STATE_DONE_SYNC" }, \
50	{ XLOG_STATE_CALLBACK,	"XLOG_STATE_CALLBACK" }, \
51	{ XLOG_STATE_DIRTY,	"XLOG_STATE_DIRTY" }
52
53/*
54 * In core log flags
55 */
56#define XLOG_ICL_NEED_FLUSH	(1u << 0)	/* iclog needs REQ_PREFLUSH */
57#define XLOG_ICL_NEED_FUA	(1u << 1)	/* iclog needs REQ_FUA */
58
59#define XLOG_ICL_STRINGS \
60	{ XLOG_ICL_NEED_FLUSH,	"XLOG_ICL_NEED_FLUSH" }, \
61	{ XLOG_ICL_NEED_FUA,	"XLOG_ICL_NEED_FUA" }
62
63
64/*
65 * Log ticket flags
66 */
67#define XLOG_TIC_PERM_RESERV	(1u << 0)	/* permanent reservation */
68
69#define XLOG_TIC_FLAGS \
70	{ XLOG_TIC_PERM_RESERV,	"XLOG_TIC_PERM_RESERV" }
71
72/*
73 * Below are states for covering allocation transactions.
74 * By covering, we mean changing the h_tail_lsn in the last on-disk
75 * log write such that no allocation transactions will be re-done during
76 * recovery after a system crash. Recovery starts at the last on-disk
77 * log write.
78 *
79 * These states are used to insert dummy log entries to cover
80 * space allocation transactions which can undo non-transactional changes
81 * after a crash. Writes to a file with space
82 * already allocated do not result in any transactions. Allocations
83 * might include space beyond the EOF. So if we just push the EOF a
84 * little, the last transaction for the file could contain the wrong
85 * size. If there is no file system activity, after an allocation
86 * transaction, and the system crashes, the allocation transaction
87 * will get replayed and the file will be truncated. This could
88 * be hours/days/... after the allocation occurred.
89 *
90 * The fix for this is to do two dummy transactions when the
91 * system is idle. We need two dummy transaction because the h_tail_lsn
92 * in the log record header needs to point beyond the last possible
93 * non-dummy transaction. The first dummy changes the h_tail_lsn to
94 * the first transaction before the dummy. The second dummy causes
95 * h_tail_lsn to point to the first dummy. Recovery starts at h_tail_lsn.
96 *
97 * These dummy transactions get committed when everything
98 * is idle (after there has been some activity).
99 *
100 * There are 5 states used to control this.
101 *
102 *  IDLE -- no logging has been done on the file system or
103 *		we are done covering previous transactions.
104 *  NEED -- logging has occurred and we need a dummy transaction
105 *		when the log becomes idle.
106 *  DONE -- we were in the NEED state and have committed a dummy
107 *		transaction.
108 *  NEED2 -- we detected that a dummy transaction has gone to the
109 *		on disk log with no other transactions.
110 *  DONE2 -- we committed a dummy transaction when in the NEED2 state.
111 *
112 * There are two places where we switch states:
113 *
114 * 1.) In xfs_sync, when we detect an idle log and are in NEED or NEED2.
115 *	We commit the dummy transaction and switch to DONE or DONE2,
116 *	respectively. In all other states, we don't do anything.
117 *
118 * 2.) When we finish writing the on-disk log (xlog_state_clean_log).
119 *
120 *	No matter what state we are in, if this isn't the dummy
121 *	transaction going out, the next state is NEED.
122 *	So, if we aren't in the DONE or DONE2 states, the next state
123 *	is NEED. We can't be finishing a write of the dummy record
124 *	unless it was committed and the state switched to DONE or DONE2.
125 *
126 *	If we are in the DONE state and this was a write of the
127 *		dummy transaction, we move to NEED2.
128 *
129 *	If we are in the DONE2 state and this was a write of the
130 *		dummy transaction, we move to IDLE.
131 *
132 *
133 * Writing only one dummy transaction can get appended to
134 * one file space allocation. When this happens, the log recovery
135 * code replays the space allocation and a file could be truncated.
136 * This is why we have the NEED2 and DONE2 states before going idle.
137 */
138
139#define XLOG_STATE_COVER_IDLE	0
140#define XLOG_STATE_COVER_NEED	1
141#define XLOG_STATE_COVER_DONE	2
142#define XLOG_STATE_COVER_NEED2	3
143#define XLOG_STATE_COVER_DONE2	4
144
145#define XLOG_COVER_OPS		5
146
147typedef struct xlog_ticket {
148	struct list_head	t_queue;	/* reserve/write queue */
149	struct task_struct	*t_task;	/* task that owns this ticket */
150	xlog_tid_t		t_tid;		/* transaction identifier */
151	atomic_t		t_ref;		/* ticket reference count */
152	int			t_curr_res;	/* current reservation */
153	int			t_unit_res;	/* unit reservation */
154	char			t_ocnt;		/* original unit count */
155	char			t_cnt;		/* current unit count */
156	uint8_t			t_flags;	/* properties of reservation */
157	int			t_iclog_hdrs;	/* iclog hdrs in t_curr_res */
158} xlog_ticket_t;
159
160/*
161 * - A log record header is 512 bytes.  There is plenty of room to grow the
162 *	xlog_rec_header_t into the reserved space.
163 * - ic_data follows, so a write to disk can start at the beginning of
164 *	the iclog.
165 * - ic_forcewait is used to implement synchronous forcing of the iclog to disk.
166 * - ic_next is the pointer to the next iclog in the ring.
167 * - ic_log is a pointer back to the global log structure.
168 * - ic_size is the full size of the log buffer, minus the cycle headers.
169 * - ic_offset is the current number of bytes written to in this iclog.
170 * - ic_refcnt is bumped when someone is writing to the log.
171 * - ic_state is the state of the iclog.
172 *
173 * Because of cacheline contention on large machines, we need to separate
174 * various resources onto different cachelines. To start with, make the
175 * structure cacheline aligned. The following fields can be contended on
176 * by independent processes:
177 *
178 *	- ic_callbacks
179 *	- ic_refcnt
180 *	- fields protected by the global l_icloglock
181 *
182 * so we need to ensure that these fields are located in separate cachelines.
183 * We'll put all the read-only and l_icloglock fields in the first cacheline,
184 * and move everything else out to subsequent cachelines.
185 */
186typedef struct xlog_in_core {
187	wait_queue_head_t	ic_force_wait;
188	wait_queue_head_t	ic_write_wait;
189	struct xlog_in_core	*ic_next;
190	struct xlog_in_core	*ic_prev;
191	struct xlog		*ic_log;
192	u32			ic_size;
193	u32			ic_offset;
194	enum xlog_iclog_state	ic_state;
195	unsigned int		ic_flags;
196	void			*ic_datap;	/* pointer to iclog data */
197	struct list_head	ic_callbacks;
198
199	/* reference counts need their own cacheline */
200	atomic_t		ic_refcnt ____cacheline_aligned_in_smp;
201	xlog_in_core_2_t	*ic_data;
202#define ic_header	ic_data->hic_header
203#ifdef DEBUG
204	bool			ic_fail_crc : 1;
205#endif
206	struct semaphore	ic_sema;
207	struct work_struct	ic_end_io_work;
208	struct bio		ic_bio;
209	struct bio_vec		ic_bvec[];
210} xlog_in_core_t;
211
212/*
213 * The CIL context is used to aggregate per-transaction details as well be
214 * passed to the iclog for checkpoint post-commit processing.  After being
215 * passed to the iclog, another context needs to be allocated for tracking the
216 * next set of transactions to be aggregated into a checkpoint.
217 */
218struct xfs_cil;
219
220struct xfs_cil_ctx {
221	struct xfs_cil		*cil;
222	xfs_csn_t		sequence;	/* chkpt sequence # */
223	xfs_lsn_t		start_lsn;	/* first LSN of chkpt commit */
224	xfs_lsn_t		commit_lsn;	/* chkpt commit record lsn */
225	struct xlog_in_core	*commit_iclog;
226	struct xlog_ticket	*ticket;	/* chkpt ticket */
227	atomic_t		space_used;	/* aggregate size of regions */
228	struct xfs_busy_extents	busy_extents;
229	struct list_head	log_items;	/* log items in chkpt */
230	struct list_head	lv_chain;	/* logvecs being pushed */
231	struct list_head	iclog_entry;
232	struct list_head	committing;	/* ctx committing list */
233	struct work_struct	push_work;
234	atomic_t		order_id;
235
236	/*
237	 * CPUs that could have added items to the percpu CIL data.  Access is
238	 * coordinated with xc_ctx_lock.
239	 */
240	struct cpumask		cil_pcpmask;
241};
242
243/*
244 * Per-cpu CIL tracking items
245 */
246struct xlog_cil_pcp {
247	int32_t			space_used;
248	uint32_t		space_reserved;
249	struct list_head	busy_extents;
250	struct list_head	log_items;
251};
252
253/*
254 * Committed Item List structure
255 *
256 * This structure is used to track log items that have been committed but not
257 * yet written into the log. It is used only when the delayed logging mount
258 * option is enabled.
259 *
260 * This structure tracks the list of committing checkpoint contexts so
261 * we can avoid the problem of having to hold out new transactions during a
262 * flush until we have a the commit record LSN of the checkpoint. We can
263 * traverse the list of committing contexts in xlog_cil_push_lsn() to find a
264 * sequence match and extract the commit LSN directly from there. If the
265 * checkpoint is still in the process of committing, we can block waiting for
266 * the commit LSN to be determined as well. This should make synchronous
267 * operations almost as efficient as the old logging methods.
268 */
269struct xfs_cil {
270	struct xlog		*xc_log;
271	unsigned long		xc_flags;
272	atomic_t		xc_iclog_hdrs;
273	struct workqueue_struct	*xc_push_wq;
274
275	struct rw_semaphore	xc_ctx_lock ____cacheline_aligned_in_smp;
276	struct xfs_cil_ctx	*xc_ctx;
277
278	spinlock_t		xc_push_lock ____cacheline_aligned_in_smp;
279	xfs_csn_t		xc_push_seq;
280	bool			xc_push_commit_stable;
281	struct list_head	xc_committing;
282	wait_queue_head_t	xc_commit_wait;
283	wait_queue_head_t	xc_start_wait;
284	xfs_csn_t		xc_current_sequence;
285	wait_queue_head_t	xc_push_wait;	/* background push throttle */
286
287	void __percpu		*xc_pcp;	/* percpu CIL structures */
288} ____cacheline_aligned_in_smp;
289
290/* xc_flags bit values */
291#define	XLOG_CIL_EMPTY		1
292#define XLOG_CIL_PCP_SPACE	2
293
294/*
295 * The amount of log space we allow the CIL to aggregate is difficult to size.
296 * Whatever we choose, we have to make sure we can get a reservation for the
297 * log space effectively, that it is large enough to capture sufficient
298 * relogging to reduce log buffer IO significantly, but it is not too large for
299 * the log or induces too much latency when writing out through the iclogs. We
300 * track both space consumed and the number of vectors in the checkpoint
301 * context, so we need to decide which to use for limiting.
302 *
303 * Every log buffer we write out during a push needs a header reserved, which
304 * is at least one sector and more for v2 logs. Hence we need a reservation of
305 * at least 512 bytes per 32k of log space just for the LR headers. That means
306 * 16KB of reservation per megabyte of delayed logging space we will consume,
307 * plus various headers.  The number of headers will vary based on the num of
308 * io vectors, so limiting on a specific number of vectors is going to result
309 * in transactions of varying size. IOWs, it is more consistent to track and
310 * limit space consumed in the log rather than by the number of objects being
311 * logged in order to prevent checkpoint ticket overruns.
312 *
313 * Further, use of static reservations through the log grant mechanism is
314 * problematic. It introduces a lot of complexity (e.g. reserve grant vs write
315 * grant) and a significant deadlock potential because regranting write space
316 * can block on log pushes. Hence if we have to regrant log space during a log
317 * push, we can deadlock.
318 *
319 * However, we can avoid this by use of a dynamic "reservation stealing"
320 * technique during transaction commit whereby unused reservation space in the
321 * transaction ticket is transferred to the CIL ctx commit ticket to cover the
322 * space needed by the checkpoint transaction. This means that we never need to
323 * specifically reserve space for the CIL checkpoint transaction, nor do we
324 * need to regrant space once the checkpoint completes. This also means the
325 * checkpoint transaction ticket is specific to the checkpoint context, rather
326 * than the CIL itself.
327 *
328 * With dynamic reservations, we can effectively make up arbitrary limits for
329 * the checkpoint size so long as they don't violate any other size rules.
330 * Recovery imposes a rule that no transaction exceed half the log, so we are
331 * limited by that.  Furthermore, the log transaction reservation subsystem
332 * tries to keep 25% of the log free, so we need to keep below that limit or we
333 * risk running out of free log space to start any new transactions.
334 *
335 * In order to keep background CIL push efficient, we only need to ensure the
336 * CIL is large enough to maintain sufficient in-memory relogging to avoid
337 * repeated physical writes of frequently modified metadata. If we allow the CIL
338 * to grow to a substantial fraction of the log, then we may be pinning hundreds
339 * of megabytes of metadata in memory until the CIL flushes. This can cause
340 * issues when we are running low on memory - pinned memory cannot be reclaimed,
341 * and the CIL consumes a lot of memory. Hence we need to set an upper physical
342 * size limit for the CIL that limits the maximum amount of memory pinned by the
343 * CIL but does not limit performance by reducing relogging efficiency
344 * significantly.
345 *
346 * As such, the CIL push threshold ends up being the smaller of two thresholds:
347 * - a threshold large enough that it allows CIL to be pushed and progress to be
348 *   made without excessive blocking of incoming transaction commits. This is
349 *   defined to be 12.5% of the log space - half the 25% push threshold of the
350 *   AIL.
351 * - small enough that it doesn't pin excessive amounts of memory but maintains
352 *   close to peak relogging efficiency. This is defined to be 16x the iclog
353 *   buffer window (32MB) as measurements have shown this to be roughly the
354 *   point of diminishing performance increases under highly concurrent
355 *   modification workloads.
356 *
357 * To prevent the CIL from overflowing upper commit size bounds, we introduce a
358 * new threshold at which we block committing transactions until the background
359 * CIL commit commences and switches to a new context. While this is not a hard
360 * limit, it forces the process committing a transaction to the CIL to block and
361 * yeild the CPU, giving the CIL push work a chance to be scheduled and start
362 * work. This prevents a process running lots of transactions from overfilling
363 * the CIL because it is not yielding the CPU. We set the blocking limit at
364 * twice the background push space threshold so we keep in line with the AIL
365 * push thresholds.
366 *
367 * Note: this is not a -hard- limit as blocking is applied after the transaction
368 * is inserted into the CIL and the push has been triggered. It is largely a
369 * throttling mechanism that allows the CIL push to be scheduled and run. A hard
370 * limit will be difficult to implement without introducing global serialisation
371 * in the CIL commit fast path, and it's not at all clear that we actually need
372 * such hard limits given the ~7 years we've run without a hard limit before
373 * finding the first situation where a checkpoint size overflow actually
374 * occurred. Hence the simple throttle, and an ASSERT check to tell us that
375 * we've overrun the max size.
376 */
377#define XLOG_CIL_SPACE_LIMIT(log)	\
378	min_t(int, (log)->l_logsize >> 3, BBTOB(XLOG_TOTAL_REC_SHIFT(log)) << 4)
379
380#define XLOG_CIL_BLOCKING_SPACE_LIMIT(log)	\
381	(XLOG_CIL_SPACE_LIMIT(log) * 2)
382
383/*
384 * ticket grant locks, queues and accounting have their own cachlines
385 * as these are quite hot and can be operated on concurrently.
386 */
387struct xlog_grant_head {
388	spinlock_t		lock ____cacheline_aligned_in_smp;
389	struct list_head	waiters;
390	atomic64_t		grant;
391};
392
393/*
394 * The reservation head lsn is not made up of a cycle number and block number.
395 * Instead, it uses a cycle number and byte number.  Logs don't expect to
396 * overflow 31 bits worth of byte offset, so using a byte number will mean
397 * that round off problems won't occur when releasing partial reservations.
398 */
399struct xlog {
400	/* The following fields don't need locking */
401	struct xfs_mount	*l_mp;	        /* mount point */
402	struct xfs_ail		*l_ailp;	/* AIL log is working with */
403	struct xfs_cil		*l_cilp;	/* CIL log is working with */
404	struct xfs_buftarg	*l_targ;        /* buftarg of log */
405	struct workqueue_struct	*l_ioend_workqueue; /* for I/O completions */
406	struct delayed_work	l_work;		/* background flush work */
407	long			l_opstate;	/* operational state */
408	uint			l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
409	struct list_head	*l_buf_cancel_table;
410	int			l_iclog_hsize;  /* size of iclog header */
411	int			l_iclog_heads;  /* # of iclog header sectors */
412	uint			l_sectBBsize;   /* sector size in BBs (2^n) */
413	int			l_iclog_size;	/* size of log in bytes */
414	int			l_iclog_bufs;	/* number of iclog buffers */
415	xfs_daddr_t		l_logBBstart;   /* start block of log */
416	int			l_logsize;      /* size of log in bytes */
417	int			l_logBBsize;    /* size of log in BB chunks */
418
419	/* The following block of fields are changed while holding icloglock */
420	wait_queue_head_t	l_flush_wait ____cacheline_aligned_in_smp;
421						/* waiting for iclog flush */
422	int			l_covered_state;/* state of "covering disk
423						 * log entries" */
424	xlog_in_core_t		*l_iclog;       /* head log queue	*/
425	spinlock_t		l_icloglock;    /* grab to change iclog state */
426	int			l_curr_cycle;   /* Cycle number of log writes */
427	int			l_prev_cycle;   /* Cycle number before last
428						 * block increment */
429	int			l_curr_block;   /* current logical log block */
430	int			l_prev_block;   /* previous logical log block */
431
432	/*
433	 * l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
434	 * read without needing to hold specific locks. To avoid operations
435	 * contending with other hot objects, place each of them on a separate
436	 * cacheline.
437	 */
438	/* lsn of last LR on disk */
439	atomic64_t		l_last_sync_lsn ____cacheline_aligned_in_smp;
440	/* lsn of 1st LR with unflushed * buffers */
441	atomic64_t		l_tail_lsn ____cacheline_aligned_in_smp;
442
443	struct xlog_grant_head	l_reserve_head;
444	struct xlog_grant_head	l_write_head;
445
446	struct xfs_kobj		l_kobj;
447
448	/* log recovery lsn tracking (for buffer submission */
449	xfs_lsn_t		l_recovery_lsn;
450
451	uint32_t		l_iclog_roundoff;/* padding roundoff */
452
453	/* Users of log incompat features should take a read lock. */
454	struct rw_semaphore	l_incompat_users;
455};
456
457/*
458 * Bits for operational state
459 */
460#define XLOG_ACTIVE_RECOVERY	0	/* in the middle of recovery */
461#define XLOG_RECOVERY_NEEDED	1	/* log was recovered */
462#define XLOG_IO_ERROR		2	/* log hit an I/O error, and being
463				   shutdown */
464#define XLOG_TAIL_WARN		3	/* log tail verify warning issued */
465
466static inline bool
467xlog_recovery_needed(struct xlog *log)
468{
469	return test_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
470}
471
472static inline bool
473xlog_in_recovery(struct xlog *log)
474{
475	return test_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
476}
477
478static inline bool
479xlog_is_shutdown(struct xlog *log)
480{
481	return test_bit(XLOG_IO_ERROR, &log->l_opstate);
482}
483
484/*
485 * Wait until the xlog_force_shutdown() has marked the log as shut down
486 * so xlog_is_shutdown() will always return true.
487 */
488static inline void
489xlog_shutdown_wait(
490	struct xlog	*log)
491{
492	wait_var_event(&log->l_opstate, xlog_is_shutdown(log));
493}
494
495/* common routines */
496extern int
497xlog_recover(
498	struct xlog		*log);
499extern int
500xlog_recover_finish(
501	struct xlog		*log);
502extern void
503xlog_recover_cancel(struct xlog *);
504
505extern __le32	 xlog_cksum(struct xlog *log, struct xlog_rec_header *rhead,
506			    char *dp, int size);
507
508extern struct kmem_cache *xfs_log_ticket_cache;
509struct xlog_ticket *xlog_ticket_alloc(struct xlog *log, int unit_bytes,
510		int count, bool permanent);
511
512void	xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
513void	xlog_print_trans(struct xfs_trans *);
514int	xlog_write(struct xlog *log, struct xfs_cil_ctx *ctx,
515		struct list_head *lv_chain, struct xlog_ticket *tic,
516		uint32_t len);
517void	xfs_log_ticket_ungrant(struct xlog *log, struct xlog_ticket *ticket);
518void	xfs_log_ticket_regrant(struct xlog *log, struct xlog_ticket *ticket);
519
520void xlog_state_switch_iclogs(struct xlog *log, struct xlog_in_core *iclog,
521		int eventual_size);
522int xlog_state_release_iclog(struct xlog *log, struct xlog_in_core *iclog,
523		struct xlog_ticket *ticket);
524
525/*
526 * When we crack an atomic LSN, we sample it first so that the value will not
527 * change while we are cracking it into the component values. This means we
528 * will always get consistent component values to work from. This should always
529 * be used to sample and crack LSNs that are stored and updated in atomic
530 * variables.
531 */
532static inline void
533xlog_crack_atomic_lsn(atomic64_t *lsn, uint *cycle, uint *block)
534{
535	xfs_lsn_t val = atomic64_read(lsn);
536
537	*cycle = CYCLE_LSN(val);
538	*block = BLOCK_LSN(val);
539}
540
541/*
542 * Calculate and assign a value to an atomic LSN variable from component pieces.
543 */
544static inline void
545xlog_assign_atomic_lsn(atomic64_t *lsn, uint cycle, uint block)
546{
547	atomic64_set(lsn, xlog_assign_lsn(cycle, block));
548}
549
550/*
551 * When we crack the grant head, we sample it first so that the value will not
552 * change while we are cracking it into the component values. This means we
553 * will always get consistent component values to work from.
554 */
555static inline void
556xlog_crack_grant_head_val(int64_t val, int *cycle, int *space)
557{
558	*cycle = val >> 32;
559	*space = val & 0xffffffff;
560}
561
562static inline void
563xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
564{
565	xlog_crack_grant_head_val(atomic64_read(head), cycle, space);
566}
567
568static inline int64_t
569xlog_assign_grant_head_val(int cycle, int space)
570{
571	return ((int64_t)cycle << 32) | space;
572}
573
574static inline void
575xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
576{
577	atomic64_set(head, xlog_assign_grant_head_val(cycle, space));
578}
579
580/*
581 * Committed Item List interfaces
582 */
583int	xlog_cil_init(struct xlog *log);
584void	xlog_cil_init_post_recovery(struct xlog *log);
585void	xlog_cil_destroy(struct xlog *log);
586bool	xlog_cil_empty(struct xlog *log);
587void	xlog_cil_commit(struct xlog *log, struct xfs_trans *tp,
588			xfs_csn_t *commit_seq, bool regrant);
589void	xlog_cil_set_ctx_write_state(struct xfs_cil_ctx *ctx,
590			struct xlog_in_core *iclog);
591
592
593/*
594 * CIL force routines
595 */
596void xlog_cil_flush(struct xlog *log);
597xfs_lsn_t xlog_cil_force_seq(struct xlog *log, xfs_csn_t sequence);
598
599static inline void
600xlog_cil_force(struct xlog *log)
601{
602	xlog_cil_force_seq(log, log->l_cilp->xc_current_sequence);
603}
604
605/*
606 * Wrapper function for waiting on a wait queue serialised against wakeups
607 * by a spinlock. This matches the semantics of all the wait queues used in the
608 * log code.
609 */
610static inline void
611xlog_wait(
612	struct wait_queue_head	*wq,
613	struct spinlock		*lock)
614		__releases(lock)
615{
616	DECLARE_WAITQUEUE(wait, current);
617
618	add_wait_queue_exclusive(wq, &wait);
619	__set_current_state(TASK_UNINTERRUPTIBLE);
620	spin_unlock(lock);
621	schedule();
622	remove_wait_queue(wq, &wait);
623}
624
625int xlog_wait_on_iclog(struct xlog_in_core *iclog);
626
627/*
628 * The LSN is valid so long as it is behind the current LSN. If it isn't, this
629 * means that the next log record that includes this metadata could have a
630 * smaller LSN. In turn, this means that the modification in the log would not
631 * replay.
632 */
633static inline bool
634xlog_valid_lsn(
635	struct xlog	*log,
636	xfs_lsn_t	lsn)
637{
638	int		cur_cycle;
639	int		cur_block;
640	bool		valid = true;
641
642	/*
643	 * First, sample the current lsn without locking to avoid added
644	 * contention from metadata I/O. The current cycle and block are updated
645	 * (in xlog_state_switch_iclogs()) and read here in a particular order
646	 * to avoid false negatives (e.g., thinking the metadata LSN is valid
647	 * when it is not).
648	 *
649	 * The current block is always rewound before the cycle is bumped in
650	 * xlog_state_switch_iclogs() to ensure the current LSN is never seen in
651	 * a transiently forward state. Instead, we can see the LSN in a
652	 * transiently behind state if we happen to race with a cycle wrap.
653	 */
654	cur_cycle = READ_ONCE(log->l_curr_cycle);
655	smp_rmb();
656	cur_block = READ_ONCE(log->l_curr_block);
657
658	if ((CYCLE_LSN(lsn) > cur_cycle) ||
659	    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block)) {
660		/*
661		 * If the metadata LSN appears invalid, it's possible the check
662		 * above raced with a wrap to the next log cycle. Grab the lock
663		 * to check for sure.
664		 */
665		spin_lock(&log->l_icloglock);
666		cur_cycle = log->l_curr_cycle;
667		cur_block = log->l_curr_block;
668		spin_unlock(&log->l_icloglock);
669
670		if ((CYCLE_LSN(lsn) > cur_cycle) ||
671		    (CYCLE_LSN(lsn) == cur_cycle && BLOCK_LSN(lsn) > cur_block))
672			valid = false;
673	}
674
675	return valid;
676}
677
678/*
679 * Log vector and shadow buffers can be large, so we need to use kvmalloc() here
680 * to ensure success. Unfortunately, kvmalloc() only allows GFP_KERNEL contexts
681 * to fall back to vmalloc, so we can't actually do anything useful with gfp
682 * flags to control the kmalloc() behaviour within kvmalloc(). Hence kmalloc()
683 * will do direct reclaim and compaction in the slow path, both of which are
684 * horrendously expensive. We just want kmalloc to fail fast and fall back to
685 * vmalloc if it can't get somethign straight away from the free lists or
686 * buddy allocator. Hence we have to open code kvmalloc outselves here.
687 *
688 * This assumes that the caller uses memalloc_nofs_save task context here, so
689 * despite the use of GFP_KERNEL here, we are going to be doing GFP_NOFS
690 * allocations. This is actually the only way to make vmalloc() do GFP_NOFS
691 * allocations, so lets just all pretend this is a GFP_KERNEL context
692 * operation....
693 */
694static inline void *
695xlog_kvmalloc(
696	size_t		buf_size)
697{
698	gfp_t		flags = GFP_KERNEL;
699	void		*p;
700
701	flags &= ~__GFP_DIRECT_RECLAIM;
702	flags |= __GFP_NOWARN | __GFP_NORETRY;
703	do {
704		p = kmalloc(buf_size, flags);
705		if (!p)
706			p = vmalloc(buf_size);
707	} while (!p);
708
709	return p;
710}
711
712#endif	/* __XFS_LOG_PRIV_H__ */
713