Lines Matching refs:bytes

164 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
172 if (unlikely(bytes > i->count))
173 bytes = i->count;
175 if (unlikely(!bytes))
179 wanted = bytes;
183 copy = min(bytes, iov->iov_len - skip);
194 bytes -= copy;
196 while (unlikely(!left && bytes)) {
199 copy = min(bytes, iov->iov_len);
204 bytes -= copy;
206 if (likely(!bytes)) {
213 copy = min(bytes, iov->iov_len - skip);
223 bytes -= copy;
224 while (unlikely(!left && bytes)) {
227 copy = min(bytes, iov->iov_len);
232 bytes -= copy;
241 i->count -= wanted - bytes;
245 return wanted - bytes;
248 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
256 if (unlikely(bytes > i->count))
257 bytes = i->count;
259 if (unlikely(!bytes))
263 wanted = bytes;
267 copy = min(bytes, iov->iov_len - skip);
278 bytes -= copy;
280 while (unlikely(!left && bytes)) {
283 copy = min(bytes, iov->iov_len);
288 bytes -= copy;
290 if (likely(!bytes)) {
297 copy = min(bytes, iov->iov_len - skip);
307 bytes -= copy;
308 while (unlikely(!left && bytes)) {
311 copy = min(bytes, iov->iov_len);
316 bytes -= copy;
325 i->count -= wanted - bytes;
329 return wanted - bytes;
375 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
385 if (unlikely(bytes > i->count))
386 bytes = i->count;
388 if (unlikely(!bytes))
399 buf->len += bytes;
400 i->iov_offset += bytes;
414 buf->len = bytes;
417 i->iov_offset = offset + bytes;
420 i->count -= bytes;
421 return bytes;
426 * bytes. For each iovec, fault in each page that constitutes the iovec.
431 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
439 iterate_iovec(i, bytes, v, iov, skip, ({
547 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
558 bytes = n = push_pipe(i, bytes, &i_head, &off);
571 i->count -= bytes;
572 return bytes;
582 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
596 bytes = n = push_pipe(i, bytes, &i_head, &r);
612 i->count -= bytes;
615 return bytes;
618 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
622 return copy_pipe_to_iter(addr, bytes, i);
625 iterate_and_advance(i, bytes, v,
632 return bytes;
659 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
670 bytes = n = push_pipe(i, bytes, &i_head, &off);
696 * @bytes: total transfer length
701 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes
716 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
722 return copy_mc_pipe_to_iter(addr, bytes, i);
725 iterate_and_advance(i, bytes, v,
733 bytes = curr_addr - s_addr - rem;
734 return bytes;
742 bytes = curr_addr - s_addr - rem;
743 return bytes;
748 return bytes;
753 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
762 iterate_and_advance(i, bytes, v,
769 return bytes;
773 bool _copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i)
780 if (unlikely(i->count < bytes))
785 iterate_all_kinds(i, bytes, v, ({
795 iov_iter_advance(i, bytes);
800 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
807 iterate_and_advance(i, bytes, v,
815 return bytes;
823 * @bytes: total transfer length
834 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
841 iterate_and_advance(i, bytes, v,
850 return bytes;
855 bool _copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
862 if (unlikely(i->count < bytes))
864 iterate_all_kinds(i, bytes, v, ({
874 iov_iter_advance(i, bytes);
903 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
906 if (unlikely(!page_copy_sane(page, offset, bytes)))
910 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
914 if (unlikely(i->count < bytes))
915 bytes = i->count;
916 i->count -= bytes;
917 return bytes;
919 return copy_page_to_iter_iovec(page, offset, bytes, i);
921 return copy_page_to_iter_pipe(page, offset, bytes, i);
925 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
928 if (unlikely(!page_copy_sane(page, offset, bytes)))
936 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
940 return copy_page_from_iter_iovec(page, offset, bytes, i);
944 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
954 bytes = n = push_pipe(i, bytes, &i_head, &off);
967 i->count -= bytes;
968 return bytes;
971 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
974 return pipe_zero(bytes, i);
975 iterate_and_advance(i, bytes, v,
981 return bytes;
986 struct iov_iter *i, unsigned long offset, size_t bytes)
989 if (unlikely(!page_copy_sane(page, offset, bytes))) {
998 iterate_all_kinds(i, bytes, v,
1005 return bytes;
1208 * @count: The size of the I/O buffer in bytes.
1438 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1449 iterate_and_advance(i, bytes, v, ({
1473 return bytes;
1477 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
1488 if (unlikely(i->count < bytes))
1490 iterate_all_kinds(i, bytes, v, ({
1514 iov_iter_advance(i, bytes);
1519 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
1528 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
1536 iterate_and_advance(i, bytes, v, ({
1561 return bytes;
1565 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
1573 copied = copy_to_iter(addr, bytes, i);
1799 * Return: Negative error code on error, bytes imported on success