xref: /kernel/linux/linux-6.6/fs/gfs2/aops.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5 */
6
7#include <linux/sched.h>
8#include <linux/slab.h>
9#include <linux/spinlock.h>
10#include <linux/completion.h>
11#include <linux/buffer_head.h>
12#include <linux/pagemap.h>
13#include <linux/pagevec.h>
14#include <linux/mpage.h>
15#include <linux/fs.h>
16#include <linux/writeback.h>
17#include <linux/swap.h>
18#include <linux/gfs2_ondisk.h>
19#include <linux/backing-dev.h>
20#include <linux/uio.h>
21#include <trace/events/writeback.h>
22#include <linux/sched/signal.h>
23
24#include "gfs2.h"
25#include "incore.h"
26#include "bmap.h"
27#include "glock.h"
28#include "inode.h"
29#include "log.h"
30#include "meta_io.h"
31#include "quota.h"
32#include "trans.h"
33#include "rgrp.h"
34#include "super.h"
35#include "util.h"
36#include "glops.h"
37#include "aops.h"
38
39
40void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
41			     size_t from, size_t len)
42{
43	struct buffer_head *head = folio_buffers(folio);
44	unsigned int bsize = head->b_size;
45	struct buffer_head *bh;
46	size_t to = from + len;
47	size_t start, end;
48
49	for (bh = head, start = 0; bh != head || !start;
50	     bh = bh->b_this_page, start = end) {
51		end = start + bsize;
52		if (end <= from)
53			continue;
54		if (start >= to)
55			break;
56		set_buffer_uptodate(bh);
57		gfs2_trans_add_data(ip->i_gl, bh);
58	}
59}
60
61/**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
71static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72				  struct buffer_head *bh_result, int create)
73{
74	int error;
75
76	error = gfs2_block_map(inode, lblock, bh_result, 0);
77	if (error)
78		return error;
79	if (!buffer_mapped(bh_result))
80		return -ENODATA;
81	return 0;
82}
83
84/**
85 * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_page
86 * @folio: The folio to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
90 * writes pages outside of i_size
91 */
92static int gfs2_write_jdata_folio(struct folio *folio,
93				 struct writeback_control *wbc)
94{
95	struct inode * const inode = folio->mapping->host;
96	loff_t i_size = i_size_read(inode);
97
98	/*
99	 * The folio straddles i_size.  It must be zeroed out on each and every
100	 * writepage invocation because it may be mmapped.  "A file is mapped
101	 * in multiples of the page size.  For a file that is not a multiple of
102	 * the page size, the remaining memory is zeroed when mapped, and
103	 * writes to that region are not written out to the file."
104	 */
105	if (folio_pos(folio) < i_size &&
106	    i_size < folio_pos(folio) + folio_size(folio))
107		folio_zero_segment(folio, offset_in_folio(folio, i_size),
108				folio_size(folio));
109
110	return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
111			wbc, end_buffer_async_write);
112}
113
114/**
115 * __gfs2_jdata_write_folio - The core of jdata writepage
116 * @folio: The folio to write
117 * @wbc: The writeback control
118 *
119 * This is shared between writepage and writepages and implements the
120 * core of the writepage operation. If a transaction is required then
121 * the checked flag will have been set and the transaction will have
122 * already been started before this is called.
123 */
124static int __gfs2_jdata_write_folio(struct folio *folio,
125		struct writeback_control *wbc)
126{
127	struct inode *inode = folio->mapping->host;
128	struct gfs2_inode *ip = GFS2_I(inode);
129
130	if (folio_test_checked(folio)) {
131		folio_clear_checked(folio);
132		if (!folio_buffers(folio)) {
133			folio_create_empty_buffers(folio,
134					inode->i_sb->s_blocksize,
135					BIT(BH_Dirty)|BIT(BH_Uptodate));
136		}
137		gfs2_trans_add_databufs(ip, folio, 0, folio_size(folio));
138	}
139	return gfs2_write_jdata_folio(folio, wbc);
140}
141
142/**
143 * gfs2_jdata_writepage - Write complete page
144 * @page: Page to write
145 * @wbc: The writeback control
146 *
147 * Returns: errno
148 *
149 */
150
151static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152{
153	struct folio *folio = page_folio(page);
154	struct inode *inode = page->mapping->host;
155	struct gfs2_inode *ip = GFS2_I(inode);
156	struct gfs2_sbd *sdp = GFS2_SB(inode);
157
158	if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
159		goto out;
160	if (folio_test_checked(folio) || current->journal_info)
161		goto out_ignore;
162	return __gfs2_jdata_write_folio(folio, wbc);
163
164out_ignore:
165	folio_redirty_for_writepage(wbc, folio);
166out:
167	folio_unlock(folio);
168	return 0;
169}
170
171/**
172 * gfs2_writepages - Write a bunch of dirty pages back to disk
173 * @mapping: The mapping to write
174 * @wbc: Write-back control
175 *
176 * Used for both ordered and writeback modes.
177 */
178static int gfs2_writepages(struct address_space *mapping,
179			   struct writeback_control *wbc)
180{
181	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
182	struct iomap_writepage_ctx wpc = { };
183	int ret;
184
185	/*
186	 * Even if we didn't write enough pages here, we might still be holding
187	 * dirty pages in the ail. We forcibly flush the ail because we don't
188	 * want balance_dirty_pages() to loop indefinitely trying to write out
189	 * pages held in the ail that it can't find.
190	 */
191	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
192	if (ret == 0 && wbc->nr_to_write > 0)
193		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
194	return ret;
195}
196
197/**
198 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
199 * @mapping: The mapping
200 * @wbc: The writeback control
201 * @fbatch: The batch of folios
202 * @done_index: Page index
203 *
204 * Returns: non-zero if loop should terminate, zero otherwise
205 */
206
207static int gfs2_write_jdata_batch(struct address_space *mapping,
208				    struct writeback_control *wbc,
209				    struct folio_batch *fbatch,
210				    pgoff_t *done_index)
211{
212	struct inode *inode = mapping->host;
213	struct gfs2_sbd *sdp = GFS2_SB(inode);
214	unsigned nrblocks;
215	int i;
216	int ret;
217	int nr_pages = 0;
218	int nr_folios = folio_batch_count(fbatch);
219
220	for (i = 0; i < nr_folios; i++)
221		nr_pages += folio_nr_pages(fbatch->folios[i]);
222	nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
223
224	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
225	if (ret < 0)
226		return ret;
227
228	for (i = 0; i < nr_folios; i++) {
229		struct folio *folio = fbatch->folios[i];
230
231		*done_index = folio->index;
232
233		folio_lock(folio);
234
235		if (unlikely(folio->mapping != mapping)) {
236continue_unlock:
237			folio_unlock(folio);
238			continue;
239		}
240
241		if (!folio_test_dirty(folio)) {
242			/* someone wrote it for us */
243			goto continue_unlock;
244		}
245
246		if (folio_test_writeback(folio)) {
247			if (wbc->sync_mode != WB_SYNC_NONE)
248				folio_wait_writeback(folio);
249			else
250				goto continue_unlock;
251		}
252
253		BUG_ON(folio_test_writeback(folio));
254		if (!folio_clear_dirty_for_io(folio))
255			goto continue_unlock;
256
257		trace_wbc_writepage(wbc, inode_to_bdi(inode));
258
259		ret = __gfs2_jdata_write_folio(folio, wbc);
260		if (unlikely(ret)) {
261			if (ret == AOP_WRITEPAGE_ACTIVATE) {
262				folio_unlock(folio);
263				ret = 0;
264			} else {
265
266				/*
267				 * done_index is set past this page,
268				 * so media errors will not choke
269				 * background writeout for the entire
270				 * file. This has consequences for
271				 * range_cyclic semantics (ie. it may
272				 * not be suitable for data integrity
273				 * writeout).
274				 */
275				*done_index = folio_next_index(folio);
276				ret = 1;
277				break;
278			}
279		}
280
281		/*
282		 * We stop writing back only if we are not doing
283		 * integrity sync. In case of integrity sync we have to
284		 * keep going until we have written all the pages
285		 * we tagged for writeback prior to entering this loop.
286		 */
287		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
288			ret = 1;
289			break;
290		}
291
292	}
293	gfs2_trans_end(sdp);
294	return ret;
295}
296
297/**
298 * gfs2_write_cache_jdata - Like write_cache_pages but different
299 * @mapping: The mapping to write
300 * @wbc: The writeback control
301 *
302 * The reason that we use our own function here is that we need to
303 * start transactions before we grab page locks. This allows us
304 * to get the ordering right.
305 */
306
307static int gfs2_write_cache_jdata(struct address_space *mapping,
308				  struct writeback_control *wbc)
309{
310	int ret = 0;
311	int done = 0;
312	struct folio_batch fbatch;
313	int nr_folios;
314	pgoff_t writeback_index;
315	pgoff_t index;
316	pgoff_t end;
317	pgoff_t done_index;
318	int cycled;
319	int range_whole = 0;
320	xa_mark_t tag;
321
322	folio_batch_init(&fbatch);
323	if (wbc->range_cyclic) {
324		writeback_index = mapping->writeback_index; /* prev offset */
325		index = writeback_index;
326		if (index == 0)
327			cycled = 1;
328		else
329			cycled = 0;
330		end = -1;
331	} else {
332		index = wbc->range_start >> PAGE_SHIFT;
333		end = wbc->range_end >> PAGE_SHIFT;
334		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
335			range_whole = 1;
336		cycled = 1; /* ignore range_cyclic tests */
337	}
338	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
339		tag = PAGECACHE_TAG_TOWRITE;
340	else
341		tag = PAGECACHE_TAG_DIRTY;
342
343retry:
344	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
345		tag_pages_for_writeback(mapping, index, end);
346	done_index = index;
347	while (!done && (index <= end)) {
348		nr_folios = filemap_get_folios_tag(mapping, &index, end,
349				tag, &fbatch);
350		if (nr_folios == 0)
351			break;
352
353		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
354				&done_index);
355		if (ret)
356			done = 1;
357		if (ret > 0)
358			ret = 0;
359		folio_batch_release(&fbatch);
360		cond_resched();
361	}
362
363	if (!cycled && !done) {
364		/*
365		 * range_cyclic:
366		 * We hit the last page and there is more work to be done: wrap
367		 * back to the start of the file
368		 */
369		cycled = 1;
370		index = 0;
371		end = writeback_index - 1;
372		goto retry;
373	}
374
375	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
376		mapping->writeback_index = done_index;
377
378	return ret;
379}
380
381
382/**
383 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
384 * @mapping: The mapping to write
385 * @wbc: The writeback control
386 *
387 */
388
389static int gfs2_jdata_writepages(struct address_space *mapping,
390				 struct writeback_control *wbc)
391{
392	struct gfs2_inode *ip = GFS2_I(mapping->host);
393	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
394	int ret;
395
396	ret = gfs2_write_cache_jdata(mapping, wbc);
397	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
398		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
399			       GFS2_LFC_JDATA_WPAGES);
400		ret = gfs2_write_cache_jdata(mapping, wbc);
401	}
402	return ret;
403}
404
405/**
406 * stuffed_readpage - Fill in a Linux page with stuffed file data
407 * @ip: the inode
408 * @page: the page
409 *
410 * Returns: errno
411 */
412static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
413{
414	struct buffer_head *dibh;
415	u64 dsize = i_size_read(&ip->i_inode);
416	void *kaddr;
417	int error;
418
419	/*
420	 * Due to the order of unstuffing files and ->fault(), we can be
421	 * asked for a zero page in the case of a stuffed file being extended,
422	 * so we need to supply one here. It doesn't happen often.
423	 */
424	if (unlikely(page->index)) {
425		zero_user(page, 0, PAGE_SIZE);
426		SetPageUptodate(page);
427		return 0;
428	}
429
430	error = gfs2_meta_inode_buffer(ip, &dibh);
431	if (error)
432		return error;
433
434	kaddr = kmap_local_page(page);
435	memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
436	memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
437	kunmap_local(kaddr);
438	flush_dcache_page(page);
439	brelse(dibh);
440	SetPageUptodate(page);
441
442	return 0;
443}
444
445/**
446 * gfs2_read_folio - read a folio from a file
447 * @file: The file to read
448 * @folio: The folio in the file
449 */
450static int gfs2_read_folio(struct file *file, struct folio *folio)
451{
452	struct inode *inode = folio->mapping->host;
453	struct gfs2_inode *ip = GFS2_I(inode);
454	struct gfs2_sbd *sdp = GFS2_SB(inode);
455	int error;
456
457	if (!gfs2_is_jdata(ip) ||
458	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
459		error = iomap_read_folio(folio, &gfs2_iomap_ops);
460	} else if (gfs2_is_stuffed(ip)) {
461		error = stuffed_readpage(ip, &folio->page);
462		folio_unlock(folio);
463	} else {
464		error = mpage_read_folio(folio, gfs2_block_map);
465	}
466
467	if (unlikely(gfs2_withdrawn(sdp)))
468		return -EIO;
469
470	return error;
471}
472
473/**
474 * gfs2_internal_read - read an internal file
475 * @ip: The gfs2 inode
476 * @buf: The buffer to fill
477 * @pos: The file position
478 * @size: The amount to read
479 *
480 */
481
482int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
483                       unsigned size)
484{
485	struct address_space *mapping = ip->i_inode.i_mapping;
486	unsigned long index = *pos >> PAGE_SHIFT;
487	unsigned offset = *pos & (PAGE_SIZE - 1);
488	unsigned copied = 0;
489	unsigned amt;
490	struct page *page;
491
492	do {
493		page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
494		if (IS_ERR(page)) {
495			if (PTR_ERR(page) == -EINTR)
496				continue;
497			return PTR_ERR(page);
498		}
499		amt = size - copied;
500		if (offset + size > PAGE_SIZE)
501			amt = PAGE_SIZE - offset;
502		memcpy_from_page(buf + copied, page, offset, amt);
503		put_page(page);
504		copied += amt;
505		index++;
506		offset = 0;
507	} while(copied < size);
508	(*pos) += size;
509	return size;
510}
511
512/**
513 * gfs2_readahead - Read a bunch of pages at once
514 * @rac: Read-ahead control structure
515 *
516 * Some notes:
517 * 1. This is only for readahead, so we can simply ignore any things
518 *    which are slightly inconvenient (such as locking conflicts between
519 *    the page lock and the glock) and return having done no I/O. Its
520 *    obviously not something we'd want to do on too regular a basis.
521 *    Any I/O we ignore at this time will be done via readpage later.
522 * 2. We don't handle stuffed files here we let readpage do the honours.
523 * 3. mpage_readahead() does most of the heavy lifting in the common case.
524 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
525 */
526
527static void gfs2_readahead(struct readahead_control *rac)
528{
529	struct inode *inode = rac->mapping->host;
530	struct gfs2_inode *ip = GFS2_I(inode);
531
532	if (gfs2_is_stuffed(ip))
533		;
534	else if (gfs2_is_jdata(ip))
535		mpage_readahead(rac, gfs2_block_map);
536	else
537		iomap_readahead(rac, &gfs2_iomap_ops);
538}
539
540/**
541 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
542 * @inode: the rindex inode
543 */
544void adjust_fs_space(struct inode *inode)
545{
546	struct gfs2_sbd *sdp = GFS2_SB(inode);
547	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
548	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
549	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
550	struct buffer_head *m_bh;
551	u64 fs_total, new_free;
552
553	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
554		return;
555
556	/* Total up the file system space, according to the latest rindex. */
557	fs_total = gfs2_ri_total(sdp);
558	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
559		goto out;
560
561	spin_lock(&sdp->sd_statfs_spin);
562	gfs2_statfs_change_in(m_sc, m_bh->b_data +
563			      sizeof(struct gfs2_dinode));
564	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
565		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
566	else
567		new_free = 0;
568	spin_unlock(&sdp->sd_statfs_spin);
569	fs_warn(sdp, "File system extended by %llu blocks.\n",
570		(unsigned long long)new_free);
571	gfs2_statfs_change(sdp, new_free, new_free, 0);
572
573	update_statfs(sdp, m_bh);
574	brelse(m_bh);
575out:
576	sdp->sd_rindex_uptodate = 0;
577	gfs2_trans_end(sdp);
578}
579
580static bool jdata_dirty_folio(struct address_space *mapping,
581		struct folio *folio)
582{
583	if (current->journal_info)
584		folio_set_checked(folio);
585	return block_dirty_folio(mapping, folio);
586}
587
588/**
589 * gfs2_bmap - Block map function
590 * @mapping: Address space info
591 * @lblock: The block to map
592 *
593 * Returns: The disk address for the block or 0 on hole or error
594 */
595
596static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
597{
598	struct gfs2_inode *ip = GFS2_I(mapping->host);
599	struct gfs2_holder i_gh;
600	sector_t dblock = 0;
601	int error;
602
603	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
604	if (error)
605		return 0;
606
607	if (!gfs2_is_stuffed(ip))
608		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
609
610	gfs2_glock_dq_uninit(&i_gh);
611
612	return dblock;
613}
614
615static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
616{
617	struct gfs2_bufdata *bd;
618
619	lock_buffer(bh);
620	gfs2_log_lock(sdp);
621	clear_buffer_dirty(bh);
622	bd = bh->b_private;
623	if (bd) {
624		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
625			list_del_init(&bd->bd_list);
626		else {
627			spin_lock(&sdp->sd_ail_lock);
628			gfs2_remove_from_journal(bh, REMOVE_JDATA);
629			spin_unlock(&sdp->sd_ail_lock);
630		}
631	}
632	bh->b_bdev = NULL;
633	clear_buffer_mapped(bh);
634	clear_buffer_req(bh);
635	clear_buffer_new(bh);
636	gfs2_log_unlock(sdp);
637	unlock_buffer(bh);
638}
639
640static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
641				size_t length)
642{
643	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
644	size_t stop = offset + length;
645	int partial_page = (offset || length < folio_size(folio));
646	struct buffer_head *bh, *head;
647	unsigned long pos = 0;
648
649	BUG_ON(!folio_test_locked(folio));
650	if (!partial_page)
651		folio_clear_checked(folio);
652	head = folio_buffers(folio);
653	if (!head)
654		goto out;
655
656	bh = head;
657	do {
658		if (pos + bh->b_size > stop)
659			return;
660
661		if (offset <= pos)
662			gfs2_discard(sdp, bh);
663		pos += bh->b_size;
664		bh = bh->b_this_page;
665	} while (bh != head);
666out:
667	if (!partial_page)
668		filemap_release_folio(folio, 0);
669}
670
671/**
672 * gfs2_release_folio - free the metadata associated with a folio
673 * @folio: the folio that's being released
674 * @gfp_mask: passed from Linux VFS, ignored by us
675 *
676 * Calls try_to_free_buffers() to free the buffers and put the folio if the
677 * buffers can be released.
678 *
679 * Returns: true if the folio was put or else false
680 */
681
682bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
683{
684	struct address_space *mapping = folio->mapping;
685	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
686	struct buffer_head *bh, *head;
687	struct gfs2_bufdata *bd;
688
689	head = folio_buffers(folio);
690	if (!head)
691		return false;
692
693	/*
694	 * mm accommodates an old ext3 case where clean folios might
695	 * not have had the dirty bit cleared.	Thus, it can send actual
696	 * dirty folios to ->release_folio() via shrink_active_list().
697	 *
698	 * As a workaround, we skip folios that contain dirty buffers
699	 * below.  Once ->release_folio isn't called on dirty folios
700	 * anymore, we can warn on dirty buffers like we used to here
701	 * again.
702	 */
703
704	gfs2_log_lock(sdp);
705	bh = head;
706	do {
707		if (atomic_read(&bh->b_count))
708			goto cannot_release;
709		bd = bh->b_private;
710		if (bd && bd->bd_tr)
711			goto cannot_release;
712		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
713			goto cannot_release;
714		bh = bh->b_this_page;
715	} while (bh != head);
716
717	bh = head;
718	do {
719		bd = bh->b_private;
720		if (bd) {
721			gfs2_assert_warn(sdp, bd->bd_bh == bh);
722			bd->bd_bh = NULL;
723			bh->b_private = NULL;
724			/*
725			 * The bd may still be queued as a revoke, in which
726			 * case we must not dequeue nor free it.
727			 */
728			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
729				list_del_init(&bd->bd_list);
730			if (list_empty(&bd->bd_list))
731				kmem_cache_free(gfs2_bufdata_cachep, bd);
732		}
733
734		bh = bh->b_this_page;
735	} while (bh != head);
736	gfs2_log_unlock(sdp);
737
738	return try_to_free_buffers(folio);
739
740cannot_release:
741	gfs2_log_unlock(sdp);
742	return false;
743}
744
745static const struct address_space_operations gfs2_aops = {
746	.writepages = gfs2_writepages,
747	.read_folio = gfs2_read_folio,
748	.readahead = gfs2_readahead,
749	.dirty_folio = iomap_dirty_folio,
750	.release_folio = iomap_release_folio,
751	.invalidate_folio = iomap_invalidate_folio,
752	.bmap = gfs2_bmap,
753	.migrate_folio = filemap_migrate_folio,
754	.is_partially_uptodate = iomap_is_partially_uptodate,
755	.error_remove_page = generic_error_remove_page,
756};
757
758static const struct address_space_operations gfs2_jdata_aops = {
759	.writepage = gfs2_jdata_writepage,
760	.writepages = gfs2_jdata_writepages,
761	.read_folio = gfs2_read_folio,
762	.readahead = gfs2_readahead,
763	.dirty_folio = jdata_dirty_folio,
764	.bmap = gfs2_bmap,
765	.invalidate_folio = gfs2_invalidate_folio,
766	.release_folio = gfs2_release_folio,
767	.is_partially_uptodate = block_is_partially_uptodate,
768	.error_remove_page = generic_error_remove_page,
769};
770
771void gfs2_set_aops(struct inode *inode)
772{
773	if (gfs2_is_jdata(GFS2_I(inode)))
774		inode->i_mapping->a_ops = &gfs2_jdata_aops;
775	else
776		inode->i_mapping->a_ops = &gfs2_aops;
777}
778