Lines Matching refs:from

314 	 * Both of fdatasync() and fsync() are able to be recovered from
623 static int truncate_partial_data_page(struct inode *inode, u64 from,
626 loff_t offset = from & (PAGE_SIZE - 1);
627 pgoff_t index = from >> PAGE_SHIFT;
657 int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
666 trace_f2fs_truncate_blocks_enter(inode, from);
668 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
683 f2fs_truncate_inline_inode(inode, ipage, from);
716 err = truncate_partial_data_page(inode, from, truncate_page);
722 int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
724 u64 free_from = from;
733 free_from = round_up(from,
750 if (from != free_from) {
751 err = f2fs_truncate_partial_cluster(inode, from, lock);
1417 /* write out all dirty pages from offset */
1630 /* write out all dirty pages from offset */
2846 /* write out all dirty pages from offset */
4526 static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
4539 count = generic_write_checks(iocb, from);
4621 struct iov_iter *from)
4630 ret = generic_perform_write(iocb, from);
4669 static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
4678 const ssize_t count = iov_iter_count(from);
4721 dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
4744 if (iov_iter_count(from)) {
4753 ret2 = f2fs_buffered_write_iter(iocb, from);
4754 if (iov_iter_count(from))
4781 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4785 const size_t orig_count = iov_iter_count(from);
4811 ret = f2fs_write_checks(iocb, from);
4816 dio = f2fs_should_use_dio(inode, iocb, from);
4819 target_size = iocb->ki_pos + iov_iter_count(from);
4820 preallocated = f2fs_preallocate_blocks(iocb, from, dio);
4830 f2fs_dio_write_iter(iocb, from, &may_need_sync) :
4831 f2fs_buffered_write_iter(iocb, from);
4858 /* If buffered IO was forced, flush and drop the data from