xref: /kernel/linux/linux-5.10/fs/ocfs2/mmap.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
4 *
5 * mmap.c
6 *
7 * Code to deal with the mess that is clustered mmap.
8 *
9 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
10 */
11
12#include <linux/fs.h>
13#include <linux/types.h>
14#include <linux/highmem.h>
15#include <linux/pagemap.h>
16#include <linux/uio.h>
17#include <linux/signal.h>
18#include <linux/rbtree.h>
19
20#include <cluster/masklog.h>
21
22#include "ocfs2.h"
23
24#include "aops.h"
25#include "dlmglue.h"
26#include "file.h"
27#include "inode.h"
28#include "mmap.h"
29#include "super.h"
30#include "ocfs2_trace.h"
31
32
33static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
34{
35	struct vm_area_struct *vma = vmf->vma;
36	sigset_t oldset;
37	vm_fault_t ret;
38
39	ocfs2_block_signals(&oldset);
40	ret = filemap_fault(vmf);
41	ocfs2_unblock_signals(&oldset);
42
43	trace_ocfs2_fault(OCFS2_I(vma->vm_file->f_mapping->host)->ip_blkno,
44			  vma, vmf->page, vmf->pgoff);
45	return ret;
46}
47
48static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
49			struct buffer_head *di_bh, struct page *page)
50{
51	int err;
52	vm_fault_t ret = VM_FAULT_NOPAGE;
53	struct inode *inode = file_inode(file);
54	struct address_space *mapping = inode->i_mapping;
55	loff_t pos = page_offset(page);
56	unsigned int len = PAGE_SIZE;
57	pgoff_t last_index;
58	struct page *locked_page = NULL;
59	void *fsdata;
60	loff_t size = i_size_read(inode);
61
62	last_index = (size - 1) >> PAGE_SHIFT;
63
64	/*
65	 * There are cases that lead to the page no longer belonging to the
66	 * mapping.
67	 * 1) pagecache truncates locally due to memory pressure.
68	 * 2) pagecache truncates when another is taking EX lock against
69	 * inode lock. see ocfs2_data_convert_worker.
70	 *
71	 * The i_size check doesn't catch the case where nodes truncated and
72	 * then re-extended the file. We'll re-check the page mapping after
73	 * taking the page lock inside of ocfs2_write_begin_nolock().
74	 *
75	 * Let VM retry with these cases.
76	 */
77	if ((page->mapping != inode->i_mapping) ||
78	    (!PageUptodate(page)) ||
79	    (page_offset(page) >= size))
80		goto out;
81
82	/*
83	 * Call ocfs2_write_begin() and ocfs2_write_end() to take
84	 * advantage of the allocation code there. We pass a write
85	 * length of the whole page (chopped to i_size) to make sure
86	 * the whole thing is allocated.
87	 *
88	 * Since we know the page is up to date, we don't have to
89	 * worry about ocfs2_write_begin() skipping some buffer reads
90	 * because the "write" would invalidate their data.
91	 */
92	if (page->index == last_index)
93		len = ((size - 1) & ~PAGE_MASK) + 1;
94
95	err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
96				       &locked_page, &fsdata, di_bh, page);
97	if (err) {
98		if (err != -ENOSPC)
99			mlog_errno(err);
100		ret = vmf_error(err);
101		goto out;
102	}
103
104	if (!locked_page) {
105		ret = VM_FAULT_NOPAGE;
106		goto out;
107	}
108	err = ocfs2_write_end_nolock(mapping, pos, len, len, fsdata);
109	BUG_ON(err != len);
110	ret = VM_FAULT_LOCKED;
111out:
112	return ret;
113}
114
115static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
116{
117	struct page *page = vmf->page;
118	struct inode *inode = file_inode(vmf->vma->vm_file);
119	struct buffer_head *di_bh = NULL;
120	sigset_t oldset;
121	int err;
122	vm_fault_t ret;
123
124	sb_start_pagefault(inode->i_sb);
125	ocfs2_block_signals(&oldset);
126
127	/*
128	 * The cluster locks taken will block a truncate from another
129	 * node. Taking the data lock will also ensure that we don't
130	 * attempt page truncation as part of a downconvert.
131	 */
132	err = ocfs2_inode_lock(inode, &di_bh, 1);
133	if (err < 0) {
134		mlog_errno(err);
135		ret = vmf_error(err);
136		goto out;
137	}
138
139	/*
140	 * The alloc sem should be enough to serialize with
141	 * ocfs2_truncate_file() changing i_size as well as any thread
142	 * modifying the inode btree.
143	 */
144	down_write(&OCFS2_I(inode)->ip_alloc_sem);
145
146	ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
147
148	up_write(&OCFS2_I(inode)->ip_alloc_sem);
149
150	brelse(di_bh);
151	ocfs2_inode_unlock(inode, 1);
152
153out:
154	ocfs2_unblock_signals(&oldset);
155	sb_end_pagefault(inode->i_sb);
156	return ret;
157}
158
159static const struct vm_operations_struct ocfs2_file_vm_ops = {
160	.fault		= ocfs2_fault,
161	.page_mkwrite	= ocfs2_page_mkwrite,
162};
163
164int ocfs2_mmap(struct file *file, struct vm_area_struct *vma)
165{
166	int ret = 0, lock_level = 0;
167
168	ret = ocfs2_inode_lock_atime(file_inode(file),
169				    file->f_path.mnt, &lock_level, 1);
170	if (ret < 0) {
171		mlog_errno(ret);
172		goto out;
173	}
174	ocfs2_inode_unlock(file_inode(file), lock_level);
175out:
176	vma->vm_ops = &ocfs2_file_vm_ops;
177	return 0;
178}
179
180