xref: /kernel/linux/linux-5.10/fs/9p/cache.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * V9FS cache definitions.
4 *
5 *  Copyright (C) 2009 by Abhishek Kulkarni <adkulkar@umail.iu.edu>
6 */
7
8#include <linux/jiffies.h>
9#include <linux/file.h>
10#include <linux/slab.h>
11#include <linux/stat.h>
12#include <linux/sched.h>
13#include <linux/fs.h>
14#include <net/9p/9p.h>
15
16#include "v9fs.h"
17#include "cache.h"
18
19#define CACHETAG_LEN  11
20
21struct fscache_netfs v9fs_cache_netfs = {
22	.name 		= "9p",
23	.version 	= 0,
24};
25
26/**
27 * v9fs_random_cachetag - Generate a random tag to be associated
28 *			  with a new cache session.
29 *
30 * The value of jiffies is used for a fairly randomly cache tag.
31 */
32
33static
34int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
35{
36	v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
37	if (!v9ses->cachetag)
38		return -ENOMEM;
39
40	return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
41}
42
43const struct fscache_cookie_def v9fs_cache_session_index_def = {
44	.name		= "9P.session",
45	.type		= FSCACHE_COOKIE_TYPE_INDEX,
46};
47
48void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
49{
50	/* If no cache session tag was specified, we generate a random one. */
51	if (!v9ses->cachetag) {
52		if (v9fs_random_cachetag(v9ses) < 0) {
53			v9ses->fscache = NULL;
54			kfree(v9ses->cachetag);
55			v9ses->cachetag = NULL;
56			return;
57		}
58	}
59
60	v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
61						&v9fs_cache_session_index_def,
62						v9ses->cachetag,
63						strlen(v9ses->cachetag),
64						NULL, 0,
65						v9ses, 0, true);
66	p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
67		 v9ses, v9ses->fscache);
68}
69
70void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
71{
72	p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
73		 v9ses, v9ses->fscache);
74	fscache_relinquish_cookie(v9ses->fscache, NULL, false);
75	v9ses->fscache = NULL;
76}
77
78static enum
79fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
80					    const void *buffer,
81					    uint16_t buflen,
82					    loff_t object_size)
83{
84	const struct v9fs_inode *v9inode = cookie_netfs_data;
85
86	if (buflen != sizeof(v9inode->qid.version))
87		return FSCACHE_CHECKAUX_OBSOLETE;
88
89	if (memcmp(buffer, &v9inode->qid.version,
90		   sizeof(v9inode->qid.version)))
91		return FSCACHE_CHECKAUX_OBSOLETE;
92
93	return FSCACHE_CHECKAUX_OKAY;
94}
95
96const struct fscache_cookie_def v9fs_cache_inode_index_def = {
97	.name		= "9p.inode",
98	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
99	.check_aux	= v9fs_cache_inode_check_aux,
100};
101
102void v9fs_cache_inode_get_cookie(struct inode *inode)
103{
104	struct v9fs_inode *v9inode;
105	struct v9fs_session_info *v9ses;
106
107	if (!S_ISREG(inode->i_mode))
108		return;
109
110	v9inode = V9FS_I(inode);
111	if (v9inode->fscache)
112		return;
113
114	v9ses = v9fs_inode2v9ses(inode);
115	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
116						  &v9fs_cache_inode_index_def,
117						  &v9inode->qid.path,
118						  sizeof(v9inode->qid.path),
119						  &v9inode->qid.version,
120						  sizeof(v9inode->qid.version),
121						  v9inode,
122						  i_size_read(&v9inode->vfs_inode),
123						  true);
124
125	p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
126		 inode, v9inode->fscache);
127}
128
129void v9fs_cache_inode_put_cookie(struct inode *inode)
130{
131	struct v9fs_inode *v9inode = V9FS_I(inode);
132
133	if (!v9inode->fscache)
134		return;
135	p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
136		 inode, v9inode->fscache);
137
138	fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
139				  false);
140	v9inode->fscache = NULL;
141}
142
143void v9fs_cache_inode_flush_cookie(struct inode *inode)
144{
145	struct v9fs_inode *v9inode = V9FS_I(inode);
146
147	if (!v9inode->fscache)
148		return;
149	p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
150		 inode, v9inode->fscache);
151
152	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
153	v9inode->fscache = NULL;
154}
155
156void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
157{
158	struct v9fs_inode *v9inode = V9FS_I(inode);
159
160	if (!v9inode->fscache)
161		return;
162
163	mutex_lock(&v9inode->fscache_lock);
164
165	if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
166		v9fs_cache_inode_flush_cookie(inode);
167	else
168		v9fs_cache_inode_get_cookie(inode);
169
170	mutex_unlock(&v9inode->fscache_lock);
171}
172
173void v9fs_cache_inode_reset_cookie(struct inode *inode)
174{
175	struct v9fs_inode *v9inode = V9FS_I(inode);
176	struct v9fs_session_info *v9ses;
177	struct fscache_cookie *old;
178
179	if (!v9inode->fscache)
180		return;
181
182	old = v9inode->fscache;
183
184	mutex_lock(&v9inode->fscache_lock);
185	fscache_relinquish_cookie(v9inode->fscache, NULL, true);
186
187	v9ses = v9fs_inode2v9ses(inode);
188	v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
189						  &v9fs_cache_inode_index_def,
190						  &v9inode->qid.path,
191						  sizeof(v9inode->qid.path),
192						  &v9inode->qid.version,
193						  sizeof(v9inode->qid.version),
194						  v9inode,
195						  i_size_read(&v9inode->vfs_inode),
196						  true);
197	p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
198		 inode, old, v9inode->fscache);
199
200	mutex_unlock(&v9inode->fscache_lock);
201}
202
203int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
204{
205	struct inode *inode = page->mapping->host;
206	struct v9fs_inode *v9inode = V9FS_I(inode);
207
208	BUG_ON(!v9inode->fscache);
209
210	return fscache_maybe_release_page(v9inode->fscache, page, gfp);
211}
212
213void __v9fs_fscache_invalidate_page(struct page *page)
214{
215	struct inode *inode = page->mapping->host;
216	struct v9fs_inode *v9inode = V9FS_I(inode);
217
218	BUG_ON(!v9inode->fscache);
219
220	if (PageFsCache(page)) {
221		fscache_wait_on_page_write(v9inode->fscache, page);
222		BUG_ON(!PageLocked(page));
223		fscache_uncache_page(v9inode->fscache, page);
224	}
225}
226
227static void v9fs_vfs_readpage_complete(struct page *page, void *data,
228				       int error)
229{
230	if (!error)
231		SetPageUptodate(page);
232
233	unlock_page(page);
234}
235
236/**
237 * __v9fs_readpage_from_fscache - read a page from cache
238 *
239 * Returns 0 if the pages are in cache and a BIO is submitted,
240 * 1 if the pages are not in cache and -error otherwise.
241 */
242
243int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page)
244{
245	int ret;
246	const struct v9fs_inode *v9inode = V9FS_I(inode);
247
248	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
249	if (!v9inode->fscache)
250		return -ENOBUFS;
251
252	ret = fscache_read_or_alloc_page(v9inode->fscache,
253					 page,
254					 v9fs_vfs_readpage_complete,
255					 NULL,
256					 GFP_KERNEL);
257	switch (ret) {
258	case -ENOBUFS:
259	case -ENODATA:
260		p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret);
261		return 1;
262	case 0:
263		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
264		return ret;
265	default:
266		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
267		return ret;
268	}
269}
270
271/**
272 * __v9fs_readpages_from_fscache - read multiple pages from cache
273 *
274 * Returns 0 if the pages are in cache and a BIO is submitted,
275 * 1 if the pages are not in cache and -error otherwise.
276 */
277
278int __v9fs_readpages_from_fscache(struct inode *inode,
279				  struct address_space *mapping,
280				  struct list_head *pages,
281				  unsigned *nr_pages)
282{
283	int ret;
284	const struct v9fs_inode *v9inode = V9FS_I(inode);
285
286	p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages);
287	if (!v9inode->fscache)
288		return -ENOBUFS;
289
290	ret = fscache_read_or_alloc_pages(v9inode->fscache,
291					  mapping, pages, nr_pages,
292					  v9fs_vfs_readpage_complete,
293					  NULL,
294					  mapping_gfp_mask(mapping));
295	switch (ret) {
296	case -ENOBUFS:
297	case -ENODATA:
298		p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret);
299		return 1;
300	case 0:
301		BUG_ON(!list_empty(pages));
302		BUG_ON(*nr_pages != 0);
303		p9_debug(P9_DEBUG_FSC, "BIO submitted\n");
304		return ret;
305	default:
306		p9_debug(P9_DEBUG_FSC, "ret %d\n", ret);
307		return ret;
308	}
309}
310
311/**
312 * __v9fs_readpage_to_fscache - write a page to the cache
313 *
314 */
315
316void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page)
317{
318	int ret;
319	const struct v9fs_inode *v9inode = V9FS_I(inode);
320
321	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
322	ret = fscache_write_page(v9inode->fscache, page,
323				 i_size_read(&v9inode->vfs_inode), GFP_KERNEL);
324	p9_debug(P9_DEBUG_FSC, "ret =  %d\n", ret);
325	if (ret != 0)
326		v9fs_uncache_page(inode, page);
327}
328
329/*
330 * wait for a page to complete writing to the cache
331 */
332void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
333{
334	const struct v9fs_inode *v9inode = V9FS_I(inode);
335	p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page);
336	if (PageFsCache(page))
337		fscache_wait_on_page_write(v9inode->fscache, page);
338}
339