Lines Matching refs:from

41  * Remove the dirty flags from a span of pages.
505 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
609 * @wait_oplock_handler: must be false if called from oplock_break_handler
643 /* remove it from the lists */
1047 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1121 /* return code from the ->release op is always ignored */
1855 * a lock from the file's list.
1880 * request - add all locks from the tmp
1967 * Windows 7 server can delay breaking lease from read to None
2162 filemap_fdatawait from here so tell
2511 /* Clean up remaining pages from the original wdata */
2562 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
2577 offset += (loff_t)from;
2579 write_data += from;
2581 if ((to > PAGE_SIZE) || (from > to)) {
2600 write_data, to - from, &offset);
2718 * writeback from the loop above.
2918 * NULL), or even swizzled back from swapper_space to tmpfs file
3090 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
3403 cifs_write_from_iter(loff_t fpos, size_t len, struct iov_iter *from,
3458 cur_len = cifs_limit_bvec_subset(from, max_len, max_segs, &nsegs);
3460 cur_len, max_len, nsegs, from->nr_segs, max_segs);
3481 wdata->iter = *from;
3507 iov_iter_advance(from, cur_len);
3599 struct kiocb *iocb, struct iov_iter *from, bool direct)
3609 rc = generic_write_checks(iocb, from);
3633 if (user_backed_iter(from)) {
3640 rc = netfs_extract_user_iter(from, iov_iter_count(from),
3649 ctx->bv_need_unpin = iov_iter_extract_will_pin(from);
3650 } else if ((iov_iter_is_bvec(from) || iov_iter_is_kvec(from)) &&
3658 ctx->bv = (void *)dup_iter(&ctx->iter, from, GFP_KERNEL);
3669 ctx->iter = *from;
3682 * value from the later writes. If the other write succeeds, then
3684 * we'll get a new rc value from that.
3721 ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from)
3726 return __cifs_writev(iocb, from, true);
3729 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
3731 return __cifs_writev(iocb, from, false);
3735 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
3751 rc = generic_write_checks(iocb, from);
3755 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
3758 rc = __generic_file_write_iter(iocb, from);
3771 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
3789 written = generic_file_write_iter(iocb, from);
3792 written = cifs_writev(iocb, from);
3797 * to the server exactly from the pos to pos+len-1 rather than flush all
3799 * these pages but not on the region from pos to ppos+len-1.
3801 written = cifs_user_writev(iocb, from);
3807 * reading stale data from the cache. All subsequent read
3808 * operations will read new data from the server.
4277 * In strict cache mode we need to read from the server all the time
4281 * on pages affected by this read but not on the region from pos to
4525 /* Note that readahead_count() lags behind our dequeuing of pages from
4763 /* We do not want to update the file size from server for inodes
4807 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
4820 * If we write a full page it will be up to date, no need to read from
5048 * users (on this or other systems) from reading it
5059 * from reading or writing the file