xref: /kernel/linux/linux-6.6/fs/nilfs2/page.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Buffer/page management specific to NILFS
4 *
5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
6 *
7 * Written by Ryusuke Konishi and Seiji Kihara.
8 */
9
10#include <linux/pagemap.h>
11#include <linux/writeback.h>
12#include <linux/swap.h>
13#include <linux/bitops.h>
14#include <linux/page-flags.h>
15#include <linux/list.h>
16#include <linux/highmem.h>
17#include <linux/pagevec.h>
18#include <linux/gfp.h>
19#include "nilfs.h"
20#include "page.h"
21#include "mdt.h"
22
23
24#define NILFS_BUFFER_INHERENT_BITS					\
25	(BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) |	\
26	 BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
27
28static struct buffer_head *
29__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
30		       int blkbits, unsigned long b_state)
31
32{
33	unsigned long first_block;
34	struct buffer_head *bh;
35
36	if (!page_has_buffers(page))
37		create_empty_buffers(page, 1 << blkbits, b_state);
38
39	first_block = (unsigned long)index << (PAGE_SHIFT - blkbits);
40	bh = nilfs_page_get_nth_block(page, block - first_block);
41
42	touch_buffer(bh);
43	wait_on_buffer(bh);
44	return bh;
45}
46
47struct buffer_head *nilfs_grab_buffer(struct inode *inode,
48				      struct address_space *mapping,
49				      unsigned long blkoff,
50				      unsigned long b_state)
51{
52	int blkbits = inode->i_blkbits;
53	pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits);
54	struct page *page;
55	struct buffer_head *bh;
56
57	page = grab_cache_page(mapping, index);
58	if (unlikely(!page))
59		return NULL;
60
61	bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state);
62	if (unlikely(!bh)) {
63		unlock_page(page);
64		put_page(page);
65		return NULL;
66	}
67	return bh;
68}
69
70/**
71 * nilfs_forget_buffer - discard dirty state
72 * @bh: buffer head of the buffer to be discarded
73 */
74void nilfs_forget_buffer(struct buffer_head *bh)
75{
76	struct page *page = bh->b_page;
77	const unsigned long clear_bits =
78		(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
79		 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
80		 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
81
82	lock_buffer(bh);
83	set_mask_bits(&bh->b_state, clear_bits, 0);
84	if (nilfs_page_buffers_clean(page))
85		__nilfs_clear_page_dirty(page);
86
87	bh->b_blocknr = -1;
88	ClearPageUptodate(page);
89	ClearPageMappedToDisk(page);
90	unlock_buffer(bh);
91	brelse(bh);
92}
93
94/**
95 * nilfs_copy_buffer -- copy buffer data and flags
96 * @dbh: destination buffer
97 * @sbh: source buffer
98 */
99void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
100{
101	void *kaddr0, *kaddr1;
102	unsigned long bits;
103	struct page *spage = sbh->b_page, *dpage = dbh->b_page;
104	struct buffer_head *bh;
105
106	kaddr0 = kmap_atomic(spage);
107	kaddr1 = kmap_atomic(dpage);
108	memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
109	kunmap_atomic(kaddr1);
110	kunmap_atomic(kaddr0);
111
112	dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
113	dbh->b_blocknr = sbh->b_blocknr;
114	dbh->b_bdev = sbh->b_bdev;
115
116	bh = dbh;
117	bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
118	while ((bh = bh->b_this_page) != dbh) {
119		lock_buffer(bh);
120		bits &= bh->b_state;
121		unlock_buffer(bh);
122	}
123	if (bits & BIT(BH_Uptodate))
124		SetPageUptodate(dpage);
125	else
126		ClearPageUptodate(dpage);
127	if (bits & BIT(BH_Mapped))
128		SetPageMappedToDisk(dpage);
129	else
130		ClearPageMappedToDisk(dpage);
131}
132
133/**
134 * nilfs_page_buffers_clean - check if a page has dirty buffers or not.
135 * @page: page to be checked
136 *
137 * nilfs_page_buffers_clean() returns zero if the page has dirty buffers.
138 * Otherwise, it returns non-zero value.
139 */
140int nilfs_page_buffers_clean(struct page *page)
141{
142	struct buffer_head *bh, *head;
143
144	bh = head = page_buffers(page);
145	do {
146		if (buffer_dirty(bh))
147			return 0;
148		bh = bh->b_this_page;
149	} while (bh != head);
150	return 1;
151}
152
153void nilfs_page_bug(struct page *page)
154{
155	struct address_space *m;
156	unsigned long ino;
157
158	if (unlikely(!page)) {
159		printk(KERN_CRIT "NILFS_PAGE_BUG(NULL)\n");
160		return;
161	}
162
163	m = page->mapping;
164	ino = m ? m->host->i_ino : 0;
165
166	printk(KERN_CRIT "NILFS_PAGE_BUG(%p): cnt=%d index#=%llu flags=0x%lx "
167	       "mapping=%p ino=%lu\n",
168	       page, page_ref_count(page),
169	       (unsigned long long)page->index, page->flags, m, ino);
170
171	if (page_has_buffers(page)) {
172		struct buffer_head *bh, *head;
173		int i = 0;
174
175		bh = head = page_buffers(page);
176		do {
177			printk(KERN_CRIT
178			       " BH[%d] %p: cnt=%d block#=%llu state=0x%lx\n",
179			       i++, bh, atomic_read(&bh->b_count),
180			       (unsigned long long)bh->b_blocknr, bh->b_state);
181			bh = bh->b_this_page;
182		} while (bh != head);
183	}
184}
185
186/**
187 * nilfs_copy_page -- copy the page with buffers
188 * @dst: destination page
189 * @src: source page
190 * @copy_dirty: flag whether to copy dirty states on the page's buffer heads.
191 *
192 * This function is for both data pages and btnode pages.  The dirty flag
193 * should be treated by caller.  The page must not be under i/o.
194 * Both src and dst page must be locked
195 */
196static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
197{
198	struct buffer_head *dbh, *dbufs, *sbh;
199	unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
200
201	BUG_ON(PageWriteback(dst));
202
203	sbh = page_buffers(src);
204	if (!page_has_buffers(dst))
205		create_empty_buffers(dst, sbh->b_size, 0);
206
207	if (copy_dirty)
208		mask |= BIT(BH_Dirty);
209
210	dbh = dbufs = page_buffers(dst);
211	do {
212		lock_buffer(sbh);
213		lock_buffer(dbh);
214		dbh->b_state = sbh->b_state & mask;
215		dbh->b_blocknr = sbh->b_blocknr;
216		dbh->b_bdev = sbh->b_bdev;
217		sbh = sbh->b_this_page;
218		dbh = dbh->b_this_page;
219	} while (dbh != dbufs);
220
221	copy_highpage(dst, src);
222
223	if (PageUptodate(src) && !PageUptodate(dst))
224		SetPageUptodate(dst);
225	else if (!PageUptodate(src) && PageUptodate(dst))
226		ClearPageUptodate(dst);
227	if (PageMappedToDisk(src) && !PageMappedToDisk(dst))
228		SetPageMappedToDisk(dst);
229	else if (!PageMappedToDisk(src) && PageMappedToDisk(dst))
230		ClearPageMappedToDisk(dst);
231
232	do {
233		unlock_buffer(sbh);
234		unlock_buffer(dbh);
235		sbh = sbh->b_this_page;
236		dbh = dbh->b_this_page;
237	} while (dbh != dbufs);
238}
239
240int nilfs_copy_dirty_pages(struct address_space *dmap,
241			   struct address_space *smap)
242{
243	struct folio_batch fbatch;
244	unsigned int i;
245	pgoff_t index = 0;
246	int err = 0;
247
248	folio_batch_init(&fbatch);
249repeat:
250	if (!filemap_get_folios_tag(smap, &index, (pgoff_t)-1,
251				PAGECACHE_TAG_DIRTY, &fbatch))
252		return 0;
253
254	for (i = 0; i < folio_batch_count(&fbatch); i++) {
255		struct folio *folio = fbatch.folios[i], *dfolio;
256
257		folio_lock(folio);
258		if (unlikely(!folio_test_dirty(folio)))
259			NILFS_PAGE_BUG(&folio->page, "inconsistent dirty state");
260
261		dfolio = filemap_grab_folio(dmap, folio->index);
262		if (unlikely(IS_ERR(dfolio))) {
263			/* No empty page is added to the page cache */
264			folio_unlock(folio);
265			err = PTR_ERR(dfolio);
266			break;
267		}
268		if (unlikely(!folio_buffers(folio)))
269			NILFS_PAGE_BUG(&folio->page,
270				       "found empty page in dat page cache");
271
272		nilfs_copy_page(&dfolio->page, &folio->page, 1);
273		filemap_dirty_folio(folio_mapping(dfolio), dfolio);
274
275		folio_unlock(dfolio);
276		folio_put(dfolio);
277		folio_unlock(folio);
278	}
279	folio_batch_release(&fbatch);
280	cond_resched();
281
282	if (likely(!err))
283		goto repeat;
284	return err;
285}
286
287/**
288 * nilfs_copy_back_pages -- copy back pages to original cache from shadow cache
289 * @dmap: destination page cache
290 * @smap: source page cache
291 *
292 * No pages must be added to the cache during this process.
293 * This must be ensured by the caller.
294 */
295void nilfs_copy_back_pages(struct address_space *dmap,
296			   struct address_space *smap)
297{
298	struct folio_batch fbatch;
299	unsigned int i, n;
300	pgoff_t start = 0;
301
302	folio_batch_init(&fbatch);
303repeat:
304	n = filemap_get_folios(smap, &start, ~0UL, &fbatch);
305	if (!n)
306		return;
307
308	for (i = 0; i < folio_batch_count(&fbatch); i++) {
309		struct folio *folio = fbatch.folios[i], *dfolio;
310		pgoff_t index = folio->index;
311
312		folio_lock(folio);
313		dfolio = filemap_lock_folio(dmap, index);
314		if (!IS_ERR(dfolio)) {
315			/* overwrite existing folio in the destination cache */
316			WARN_ON(folio_test_dirty(dfolio));
317			nilfs_copy_page(&dfolio->page, &folio->page, 0);
318			folio_unlock(dfolio);
319			folio_put(dfolio);
320			/* Do we not need to remove folio from smap here? */
321		} else {
322			struct folio *f;
323
324			/* move the folio to the destination cache */
325			xa_lock_irq(&smap->i_pages);
326			f = __xa_erase(&smap->i_pages, index);
327			WARN_ON(folio != f);
328			smap->nrpages--;
329			xa_unlock_irq(&smap->i_pages);
330
331			xa_lock_irq(&dmap->i_pages);
332			f = __xa_store(&dmap->i_pages, index, folio, GFP_NOFS);
333			if (unlikely(f)) {
334				/* Probably -ENOMEM */
335				folio->mapping = NULL;
336				folio_put(folio);
337			} else {
338				folio->mapping = dmap;
339				dmap->nrpages++;
340				if (folio_test_dirty(folio))
341					__xa_set_mark(&dmap->i_pages, index,
342							PAGECACHE_TAG_DIRTY);
343			}
344			xa_unlock_irq(&dmap->i_pages);
345		}
346		folio_unlock(folio);
347	}
348	folio_batch_release(&fbatch);
349	cond_resched();
350
351	goto repeat;
352}
353
354/**
355 * nilfs_clear_dirty_pages - discard dirty pages in address space
356 * @mapping: address space with dirty pages for discarding
357 * @silent: suppress [true] or print [false] warning messages
358 */
359void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
360{
361	struct folio_batch fbatch;
362	unsigned int i;
363	pgoff_t index = 0;
364
365	folio_batch_init(&fbatch);
366
367	while (filemap_get_folios_tag(mapping, &index, (pgoff_t)-1,
368				PAGECACHE_TAG_DIRTY, &fbatch)) {
369		for (i = 0; i < folio_batch_count(&fbatch); i++) {
370			struct folio *folio = fbatch.folios[i];
371
372			folio_lock(folio);
373
374			/*
375			 * This folio may have been removed from the address
376			 * space by truncation or invalidation when the lock
377			 * was acquired.  Skip processing in that case.
378			 */
379			if (likely(folio->mapping == mapping))
380				nilfs_clear_dirty_page(&folio->page, silent);
381
382			folio_unlock(folio);
383		}
384		folio_batch_release(&fbatch);
385		cond_resched();
386	}
387}
388
389/**
390 * nilfs_clear_dirty_page - discard dirty page
391 * @page: dirty page that will be discarded
392 * @silent: suppress [true] or print [false] warning messages
393 */
394void nilfs_clear_dirty_page(struct page *page, bool silent)
395{
396	struct inode *inode = page->mapping->host;
397	struct super_block *sb = inode->i_sb;
398
399	BUG_ON(!PageLocked(page));
400
401	if (!silent)
402		nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu",
403			   page_offset(page), inode->i_ino);
404
405	ClearPageUptodate(page);
406	ClearPageMappedToDisk(page);
407
408	if (page_has_buffers(page)) {
409		struct buffer_head *bh, *head;
410		const unsigned long clear_bits =
411			(BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
412			 BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
413			 BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
414
415		bh = head = page_buffers(page);
416		do {
417			lock_buffer(bh);
418			if (!silent)
419				nilfs_warn(sb,
420					   "discard dirty block: blocknr=%llu, size=%zu",
421					   (u64)bh->b_blocknr, bh->b_size);
422
423			set_mask_bits(&bh->b_state, clear_bits, 0);
424			unlock_buffer(bh);
425		} while (bh = bh->b_this_page, bh != head);
426	}
427
428	__nilfs_clear_page_dirty(page);
429}
430
431unsigned int nilfs_page_count_clean_buffers(struct page *page,
432					    unsigned int from, unsigned int to)
433{
434	unsigned int block_start, block_end;
435	struct buffer_head *bh, *head;
436	unsigned int nc = 0;
437
438	for (bh = head = page_buffers(page), block_start = 0;
439	     bh != head || !block_start;
440	     block_start = block_end, bh = bh->b_this_page) {
441		block_end = block_start + bh->b_size;
442		if (block_end > from && block_start < to && !buffer_dirty(bh))
443			nc++;
444	}
445	return nc;
446}
447
448/*
449 * NILFS2 needs clear_page_dirty() in the following two cases:
450 *
451 * 1) For B-tree node pages and data pages of DAT file, NILFS2 clears dirty
452 *    flag of pages when it copies back pages from shadow cache to the
453 *    original cache.
454 *
455 * 2) Some B-tree operations like insertion or deletion may dispose buffers
456 *    in dirty state, and this needs to cancel the dirty state of their pages.
457 */
458int __nilfs_clear_page_dirty(struct page *page)
459{
460	struct address_space *mapping = page->mapping;
461
462	if (mapping) {
463		xa_lock_irq(&mapping->i_pages);
464		if (test_bit(PG_dirty, &page->flags)) {
465			__xa_clear_mark(&mapping->i_pages, page_index(page),
466					     PAGECACHE_TAG_DIRTY);
467			xa_unlock_irq(&mapping->i_pages);
468			return clear_page_dirty_for_io(page);
469		}
470		xa_unlock_irq(&mapping->i_pages);
471		return 0;
472	}
473	return TestClearPageDirty(page);
474}
475
476/**
477 * nilfs_find_uncommitted_extent - find extent of uncommitted data
478 * @inode: inode
479 * @start_blk: start block offset (in)
480 * @blkoff: start offset of the found extent (out)
481 *
482 * This function searches an extent of buffers marked "delayed" which
483 * starts from a block offset equal to or larger than @start_blk.  If
484 * such an extent was found, this will store the start offset in
485 * @blkoff and return its length in blocks.  Otherwise, zero is
486 * returned.
487 */
488unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
489					    sector_t start_blk,
490					    sector_t *blkoff)
491{
492	unsigned int i, nr_folios;
493	pgoff_t index;
494	unsigned long length = 0;
495	struct folio_batch fbatch;
496	struct folio *folio;
497
498	if (inode->i_mapping->nrpages == 0)
499		return 0;
500
501	index = start_blk >> (PAGE_SHIFT - inode->i_blkbits);
502
503	folio_batch_init(&fbatch);
504
505repeat:
506	nr_folios = filemap_get_folios_contig(inode->i_mapping, &index, ULONG_MAX,
507			&fbatch);
508	if (nr_folios == 0)
509		return length;
510
511	i = 0;
512	do {
513		folio = fbatch.folios[i];
514
515		folio_lock(folio);
516		if (folio_buffers(folio)) {
517			struct buffer_head *bh, *head;
518			sector_t b;
519
520			b = folio->index << (PAGE_SHIFT - inode->i_blkbits);
521			bh = head = folio_buffers(folio);
522			do {
523				if (b < start_blk)
524					continue;
525				if (buffer_delay(bh)) {
526					if (length == 0)
527						*blkoff = b;
528					length++;
529				} else if (length > 0) {
530					goto out_locked;
531				}
532			} while (++b, bh = bh->b_this_page, bh != head);
533		} else {
534			if (length > 0)
535				goto out_locked;
536		}
537		folio_unlock(folio);
538
539	} while (++i < nr_folios);
540
541	folio_batch_release(&fbatch);
542	cond_resched();
543	goto repeat;
544
545out_locked:
546	folio_unlock(folio);
547	folio_batch_release(&fbatch);
548	return length;
549}
550