xref: /kernel/linux/linux-5.10/fs/9p/vfs_file.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/fs/9p/vfs_file.c
4 *
5 * This file contians vfs file ops for 9P2000.
6 *
7 *  Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
8 *  Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
9 */
10
11#include <linux/module.h>
12#include <linux/errno.h>
13#include <linux/fs.h>
14#include <linux/sched.h>
15#include <linux/file.h>
16#include <linux/stat.h>
17#include <linux/string.h>
18#include <linux/inet.h>
19#include <linux/list.h>
20#include <linux/pagemap.h>
21#include <linux/utsname.h>
22#include <linux/uaccess.h>
23#include <linux/idr.h>
24#include <linux/uio.h>
25#include <linux/slab.h>
26#include <net/9p/9p.h>
27#include <net/9p/client.h>
28
29#include "v9fs.h"
30#include "v9fs_vfs.h"
31#include "fid.h"
32#include "cache.h"
33
34static const struct vm_operations_struct v9fs_file_vm_ops;
35static const struct vm_operations_struct v9fs_mmap_file_vm_ops;
36
37/**
38 * v9fs_file_open - open a file (or directory)
39 * @inode: inode to be opened
40 * @file: file being opened
41 *
42 */
43
44int v9fs_file_open(struct inode *inode, struct file *file)
45{
46	int err;
47	struct v9fs_inode *v9inode;
48	struct v9fs_session_info *v9ses;
49	struct p9_fid *fid;
50	int omode;
51
52	p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file);
53	v9inode = V9FS_I(inode);
54	v9ses = v9fs_inode2v9ses(inode);
55	if (v9fs_proto_dotl(v9ses))
56		omode = v9fs_open_to_dotl_flags(file->f_flags);
57	else
58		omode = v9fs_uflags2omode(file->f_flags,
59					v9fs_proto_dotu(v9ses));
60	fid = file->private_data;
61	if (!fid) {
62		fid = v9fs_fid_clone(file_dentry(file));
63		if (IS_ERR(fid))
64			return PTR_ERR(fid);
65
66		err = p9_client_open(fid, omode);
67		if (err < 0) {
68			p9_client_clunk(fid);
69			return err;
70		}
71		if ((file->f_flags & O_APPEND) &&
72			(!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)))
73			generic_file_llseek(file, 0, SEEK_END);
74	}
75
76	file->private_data = fid;
77	mutex_lock(&v9inode->v_mutex);
78	if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) &&
79	    !v9inode->writeback_fid &&
80	    ((file->f_flags & O_ACCMODE) != O_RDONLY)) {
81		/*
82		 * clone a fid and add it to writeback_fid
83		 * we do it during open time instead of
84		 * page dirty time via write_begin/page_mkwrite
85		 * because we want write after unlink usecase
86		 * to work.
87		 */
88		fid = v9fs_writeback_fid(file_dentry(file));
89		if (IS_ERR(fid)) {
90			err = PTR_ERR(fid);
91			mutex_unlock(&v9inode->v_mutex);
92			goto out_error;
93		}
94		v9inode->writeback_fid = (void *) fid;
95	}
96	mutex_unlock(&v9inode->v_mutex);
97	if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
98		v9fs_cache_inode_set_cookie(inode, file);
99	return 0;
100out_error:
101	p9_client_clunk(file->private_data);
102	file->private_data = NULL;
103	return err;
104}
105
106/**
107 * v9fs_file_lock - lock a file (or directory)
108 * @filp: file to be locked
109 * @cmd: lock command
110 * @fl: file lock structure
111 *
112 * Bugs: this looks like a local only lock, we should extend into 9P
113 *       by using open exclusive
114 */
115
116static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
117{
118	int res = 0;
119	struct inode *inode = file_inode(filp);
120
121	p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
122
123	/* No mandatory locks */
124	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
125		return -ENOLCK;
126
127	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
128		filemap_write_and_wait(inode->i_mapping);
129		invalidate_mapping_pages(&inode->i_data, 0, -1);
130	}
131
132	return res;
133}
134
135static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
136{
137	struct p9_flock flock;
138	struct p9_fid *fid;
139	uint8_t status = P9_LOCK_ERROR;
140	int res = 0;
141	unsigned char fl_type;
142	struct v9fs_session_info *v9ses;
143
144	fid = filp->private_data;
145	BUG_ON(fid == NULL);
146
147	if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
148		BUG();
149
150	res = locks_lock_file_wait(filp, fl);
151	if (res < 0)
152		goto out;
153
154	/* convert posix lock to p9 tlock args */
155	memset(&flock, 0, sizeof(flock));
156	/* map the lock type */
157	switch (fl->fl_type) {
158	case F_RDLCK:
159		flock.type = P9_LOCK_TYPE_RDLCK;
160		break;
161	case F_WRLCK:
162		flock.type = P9_LOCK_TYPE_WRLCK;
163		break;
164	case F_UNLCK:
165		flock.type = P9_LOCK_TYPE_UNLCK;
166		break;
167	}
168	flock.start = fl->fl_start;
169	if (fl->fl_end == OFFSET_MAX)
170		flock.length = 0;
171	else
172		flock.length = fl->fl_end - fl->fl_start + 1;
173	flock.proc_id = fl->fl_pid;
174	flock.client_id = fid->clnt->name;
175	if (IS_SETLKW(cmd))
176		flock.flags = P9_LOCK_FLAGS_BLOCK;
177
178	v9ses = v9fs_inode2v9ses(file_inode(filp));
179
180	/*
181	 * if its a blocked request and we get P9_LOCK_BLOCKED as the status
182	 * for lock request, keep on trying
183	 */
184	for (;;) {
185		res = p9_client_lock_dotl(fid, &flock, &status);
186		if (res < 0)
187			goto out_unlock;
188
189		if (status != P9_LOCK_BLOCKED)
190			break;
191		if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd))
192			break;
193		if (schedule_timeout_interruptible(v9ses->session_lock_timeout)
194				!= 0)
195			break;
196		/*
197		 * p9_client_lock_dotl overwrites flock.client_id with the
198		 * server message, free and reuse the client name
199		 */
200		if (flock.client_id != fid->clnt->name) {
201			kfree(flock.client_id);
202			flock.client_id = fid->clnt->name;
203		}
204	}
205
206	/* map 9p status to VFS status */
207	switch (status) {
208	case P9_LOCK_SUCCESS:
209		res = 0;
210		break;
211	case P9_LOCK_BLOCKED:
212		res = -EAGAIN;
213		break;
214	default:
215		WARN_ONCE(1, "unknown lock status code: %d\n", status);
216		fallthrough;
217	case P9_LOCK_ERROR:
218	case P9_LOCK_GRACE:
219		res = -ENOLCK;
220		break;
221	}
222
223out_unlock:
224	/*
225	 * incase server returned error for lock request, revert
226	 * it locally
227	 */
228	if (res < 0 && fl->fl_type != F_UNLCK) {
229		fl_type = fl->fl_type;
230		fl->fl_type = F_UNLCK;
231		/* Even if this fails we want to return the remote error */
232		locks_lock_file_wait(filp, fl);
233		fl->fl_type = fl_type;
234	}
235	if (flock.client_id != fid->clnt->name)
236		kfree(flock.client_id);
237out:
238	return res;
239}
240
241static int v9fs_file_getlock(struct file *filp, struct file_lock *fl)
242{
243	struct p9_getlock glock;
244	struct p9_fid *fid;
245	int res = 0;
246
247	fid = filp->private_data;
248	BUG_ON(fid == NULL);
249
250	posix_test_lock(filp, fl);
251	/*
252	 * if we have a conflicting lock locally, no need to validate
253	 * with server
254	 */
255	if (fl->fl_type != F_UNLCK)
256		return res;
257
258	/* convert posix lock to p9 tgetlock args */
259	memset(&glock, 0, sizeof(glock));
260	glock.type  = P9_LOCK_TYPE_UNLCK;
261	glock.start = fl->fl_start;
262	if (fl->fl_end == OFFSET_MAX)
263		glock.length = 0;
264	else
265		glock.length = fl->fl_end - fl->fl_start + 1;
266	glock.proc_id = fl->fl_pid;
267	glock.client_id = fid->clnt->name;
268
269	res = p9_client_getlock_dotl(fid, &glock);
270	if (res < 0)
271		goto out;
272	/* map 9p lock type to os lock type */
273	switch (glock.type) {
274	case P9_LOCK_TYPE_RDLCK:
275		fl->fl_type = F_RDLCK;
276		break;
277	case P9_LOCK_TYPE_WRLCK:
278		fl->fl_type = F_WRLCK;
279		break;
280	case P9_LOCK_TYPE_UNLCK:
281		fl->fl_type = F_UNLCK;
282		break;
283	}
284	if (glock.type != P9_LOCK_TYPE_UNLCK) {
285		fl->fl_start = glock.start;
286		if (glock.length == 0)
287			fl->fl_end = OFFSET_MAX;
288		else
289			fl->fl_end = glock.start + glock.length - 1;
290		fl->fl_pid = -glock.proc_id;
291	}
292out:
293	if (glock.client_id != fid->clnt->name)
294		kfree(glock.client_id);
295	return res;
296}
297
298/**
299 * v9fs_file_lock_dotl - lock a file (or directory)
300 * @filp: file to be locked
301 * @cmd: lock command
302 * @fl: file lock structure
303 *
304 */
305
306static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl)
307{
308	struct inode *inode = file_inode(filp);
309	int ret = -ENOLCK;
310
311	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
312		 filp, cmd, fl, filp);
313
314	/* No mandatory locks */
315	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
316		goto out_err;
317
318	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
319		filemap_write_and_wait(inode->i_mapping);
320		invalidate_mapping_pages(&inode->i_data, 0, -1);
321	}
322
323	if (IS_SETLK(cmd) || IS_SETLKW(cmd))
324		ret = v9fs_file_do_lock(filp, cmd, fl);
325	else if (IS_GETLK(cmd))
326		ret = v9fs_file_getlock(filp, fl);
327	else
328		ret = -EINVAL;
329out_err:
330	return ret;
331}
332
333/**
334 * v9fs_file_flock_dotl - lock a file
335 * @filp: file to be locked
336 * @cmd: lock command
337 * @fl: file lock structure
338 *
339 */
340
341static int v9fs_file_flock_dotl(struct file *filp, int cmd,
342	struct file_lock *fl)
343{
344	struct inode *inode = file_inode(filp);
345	int ret = -ENOLCK;
346
347	p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n",
348		 filp, cmd, fl, filp);
349
350	/* No mandatory locks */
351	if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
352		goto out_err;
353
354	if (!(fl->fl_flags & FL_FLOCK))
355		goto out_err;
356
357	if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
358		filemap_write_and_wait(inode->i_mapping);
359		invalidate_mapping_pages(&inode->i_data, 0, -1);
360	}
361	/* Convert flock to posix lock */
362	fl->fl_flags |= FL_POSIX;
363	fl->fl_flags ^= FL_FLOCK;
364
365	if (IS_SETLK(cmd) | IS_SETLKW(cmd))
366		ret = v9fs_file_do_lock(filp, cmd, fl);
367	else
368		ret = -EINVAL;
369out_err:
370	return ret;
371}
372
373/**
374 * v9fs_file_read - read from a file
375 * @filp: file pointer to read
376 * @udata: user data buffer to read data into
377 * @count: size of buffer
378 * @offset: offset at which to read data
379 *
380 */
381
382static ssize_t
383v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
384{
385	struct p9_fid *fid = iocb->ki_filp->private_data;
386	int ret, err = 0;
387
388	p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n",
389		 iov_iter_count(to), iocb->ki_pos);
390
391	if (iocb->ki_filp->f_flags & O_NONBLOCK)
392		ret = p9_client_read_once(fid, iocb->ki_pos, to, &err);
393	else
394		ret = p9_client_read(fid, iocb->ki_pos, to, &err);
395	if (!ret)
396		return err;
397
398	iocb->ki_pos += ret;
399	return ret;
400}
401
402/**
403 * v9fs_file_write - write to a file
404 * @filp: file pointer to write
405 * @data: data buffer to write data from
406 * @count: size of buffer
407 * @offset: offset at which to write data
408 *
409 */
410static ssize_t
411v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
412{
413	struct file *file = iocb->ki_filp;
414	ssize_t retval;
415	loff_t origin;
416	int err = 0;
417
418	retval = generic_write_checks(iocb, from);
419	if (retval <= 0)
420		return retval;
421
422	origin = iocb->ki_pos;
423	retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err);
424	if (retval > 0) {
425		struct inode *inode = file_inode(file);
426		loff_t i_size;
427		unsigned long pg_start, pg_end;
428		pg_start = origin >> PAGE_SHIFT;
429		pg_end = (origin + retval - 1) >> PAGE_SHIFT;
430		if (inode->i_mapping && inode->i_mapping->nrpages)
431			invalidate_inode_pages2_range(inode->i_mapping,
432						      pg_start, pg_end);
433		iocb->ki_pos += retval;
434		i_size = i_size_read(inode);
435		if (iocb->ki_pos > i_size) {
436			inode_add_bytes(inode, iocb->ki_pos - i_size);
437			/*
438			 * Need to serialize against i_size_write() in
439			 * v9fs_stat2inode()
440			 */
441			v9fs_i_size_write(inode, iocb->ki_pos);
442		}
443		return retval;
444	}
445	return err;
446}
447
448static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end,
449			   int datasync)
450{
451	struct p9_fid *fid;
452	struct inode *inode = filp->f_mapping->host;
453	struct p9_wstat wstat;
454	int retval;
455
456	retval = file_write_and_wait_range(filp, start, end);
457	if (retval)
458		return retval;
459
460	inode_lock(inode);
461	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
462
463	fid = filp->private_data;
464	v9fs_blank_wstat(&wstat);
465
466	retval = p9_client_wstat(fid, &wstat);
467	inode_unlock(inode);
468
469	return retval;
470}
471
472int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end,
473			 int datasync)
474{
475	struct p9_fid *fid;
476	struct inode *inode = filp->f_mapping->host;
477	int retval;
478
479	retval = file_write_and_wait_range(filp, start, end);
480	if (retval)
481		return retval;
482
483	inode_lock(inode);
484	p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync);
485
486	fid = filp->private_data;
487
488	retval = p9_client_fsync(fid, datasync);
489	inode_unlock(inode);
490
491	return retval;
492}
493
494static int
495v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma)
496{
497	int retval;
498
499
500	retval = generic_file_mmap(filp, vma);
501	if (!retval)
502		vma->vm_ops = &v9fs_file_vm_ops;
503
504	return retval;
505}
506
507static int
508v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma)
509{
510	int retval;
511	struct inode *inode;
512	struct v9fs_inode *v9inode;
513	struct p9_fid *fid;
514
515	inode = file_inode(filp);
516	v9inode = V9FS_I(inode);
517	mutex_lock(&v9inode->v_mutex);
518	if (!v9inode->writeback_fid &&
519	    (vma->vm_flags & VM_SHARED) &&
520	    (vma->vm_flags & VM_WRITE)) {
521		/*
522		 * clone a fid and add it to writeback_fid
523		 * we do it during mmap instead of
524		 * page dirty time via write_begin/page_mkwrite
525		 * because we want write after unlink usecase
526		 * to work.
527		 */
528		fid = v9fs_writeback_fid(file_dentry(filp));
529		if (IS_ERR(fid)) {
530			retval = PTR_ERR(fid);
531			mutex_unlock(&v9inode->v_mutex);
532			return retval;
533		}
534		v9inode->writeback_fid = (void *) fid;
535	}
536	mutex_unlock(&v9inode->v_mutex);
537
538	retval = generic_file_mmap(filp, vma);
539	if (!retval)
540		vma->vm_ops = &v9fs_mmap_file_vm_ops;
541
542	return retval;
543}
544
545static vm_fault_t
546v9fs_vm_page_mkwrite(struct vm_fault *vmf)
547{
548	struct v9fs_inode *v9inode;
549	struct page *page = vmf->page;
550	struct file *filp = vmf->vma->vm_file;
551	struct inode *inode = file_inode(filp);
552
553
554	p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
555		 page, (unsigned long)filp->private_data);
556
557	/* Update file times before taking page lock */
558	file_update_time(filp);
559
560	v9inode = V9FS_I(inode);
561	/* make sure the cache has finished storing the page */
562	v9fs_fscache_wait_on_page_write(inode, page);
563	BUG_ON(!v9inode->writeback_fid);
564	lock_page(page);
565	if (page->mapping != inode->i_mapping)
566		goto out_unlock;
567	wait_for_stable_page(page);
568
569	return VM_FAULT_LOCKED;
570out_unlock:
571	unlock_page(page);
572	return VM_FAULT_NOPAGE;
573}
574
575/**
576 * v9fs_mmap_file_read - read from a file
577 * @filp: file pointer to read
578 * @data: user data buffer to read data into
579 * @count: size of buffer
580 * @offset: offset at which to read data
581 *
582 */
583static ssize_t
584v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
585{
586	/* TODO: Check if there are dirty pages */
587	return v9fs_file_read_iter(iocb, to);
588}
589
590/**
591 * v9fs_mmap_file_write - write to a file
592 * @filp: file pointer to write
593 * @data: data buffer to write data from
594 * @count: size of buffer
595 * @offset: offset at which to write data
596 *
597 */
598static ssize_t
599v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
600{
601	/*
602	 * TODO: invalidate mmaps on filp's inode between
603	 * offset and offset+count
604	 */
605	return v9fs_file_write_iter(iocb, from);
606}
607
608static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
609{
610	struct inode *inode;
611
612	struct writeback_control wbc = {
613		.nr_to_write = LONG_MAX,
614		.sync_mode = WB_SYNC_ALL,
615		.range_start = (loff_t)vma->vm_pgoff * PAGE_SIZE,
616		 /* absolute end, byte at end included */
617		.range_end = (loff_t)vma->vm_pgoff * PAGE_SIZE +
618			(vma->vm_end - vma->vm_start - 1),
619	};
620
621	if (!(vma->vm_flags & VM_SHARED))
622		return;
623
624	p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma);
625
626	inode = file_inode(vma->vm_file);
627
628	if (!mapping_can_writeback(inode->i_mapping))
629		wbc.nr_to_write = 0;
630
631	might_sleep();
632	sync_inode(inode, &wbc);
633}
634
635
636static const struct vm_operations_struct v9fs_file_vm_ops = {
637	.fault = filemap_fault,
638	.map_pages = filemap_map_pages,
639	.page_mkwrite = v9fs_vm_page_mkwrite,
640};
641
642static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
643	.close = v9fs_mmap_vm_close,
644	.fault = filemap_fault,
645	.map_pages = filemap_map_pages,
646	.page_mkwrite = v9fs_vm_page_mkwrite,
647};
648
649
650const struct file_operations v9fs_cached_file_operations = {
651	.llseek = generic_file_llseek,
652	.read_iter = generic_file_read_iter,
653	.write_iter = generic_file_write_iter,
654	.open = v9fs_file_open,
655	.release = v9fs_dir_release,
656	.lock = v9fs_file_lock,
657	.mmap = v9fs_file_mmap,
658	.splice_read = generic_file_splice_read,
659	.splice_write = iter_file_splice_write,
660	.fsync = v9fs_file_fsync,
661};
662
663const struct file_operations v9fs_cached_file_operations_dotl = {
664	.llseek = generic_file_llseek,
665	.read_iter = generic_file_read_iter,
666	.write_iter = generic_file_write_iter,
667	.open = v9fs_file_open,
668	.release = v9fs_dir_release,
669	.lock = v9fs_file_lock_dotl,
670	.flock = v9fs_file_flock_dotl,
671	.mmap = v9fs_file_mmap,
672	.splice_read = generic_file_splice_read,
673	.splice_write = iter_file_splice_write,
674	.fsync = v9fs_file_fsync_dotl,
675};
676
677const struct file_operations v9fs_file_operations = {
678	.llseek = generic_file_llseek,
679	.read_iter = v9fs_file_read_iter,
680	.write_iter = v9fs_file_write_iter,
681	.open = v9fs_file_open,
682	.release = v9fs_dir_release,
683	.lock = v9fs_file_lock,
684	.mmap = generic_file_readonly_mmap,
685	.splice_read = generic_file_splice_read,
686	.splice_write = iter_file_splice_write,
687	.fsync = v9fs_file_fsync,
688};
689
690const struct file_operations v9fs_file_operations_dotl = {
691	.llseek = generic_file_llseek,
692	.read_iter = v9fs_file_read_iter,
693	.write_iter = v9fs_file_write_iter,
694	.open = v9fs_file_open,
695	.release = v9fs_dir_release,
696	.lock = v9fs_file_lock_dotl,
697	.flock = v9fs_file_flock_dotl,
698	.mmap = generic_file_readonly_mmap,
699	.splice_read = generic_file_splice_read,
700	.splice_write = iter_file_splice_write,
701	.fsync = v9fs_file_fsync_dotl,
702};
703
704const struct file_operations v9fs_mmap_file_operations = {
705	.llseek = generic_file_llseek,
706	.read_iter = v9fs_mmap_file_read_iter,
707	.write_iter = v9fs_mmap_file_write_iter,
708	.open = v9fs_file_open,
709	.release = v9fs_dir_release,
710	.lock = v9fs_file_lock,
711	.mmap = v9fs_mmap_file_mmap,
712	.splice_read = generic_file_splice_read,
713	.splice_write = iter_file_splice_write,
714	.fsync = v9fs_file_fsync,
715};
716
717const struct file_operations v9fs_mmap_file_operations_dotl = {
718	.llseek = generic_file_llseek,
719	.read_iter = v9fs_mmap_file_read_iter,
720	.write_iter = v9fs_mmap_file_write_iter,
721	.open = v9fs_file_open,
722	.release = v9fs_dir_release,
723	.lock = v9fs_file_lock_dotl,
724	.flock = v9fs_file_flock_dotl,
725	.mmap = v9fs_mmap_file_mmap,
726	.splice_read = generic_file_splice_read,
727	.splice_write = iter_file_splice_write,
728	.fsync = v9fs_file_fsync_dotl,
729};
730