Lines Matching defs:rreq
26 iov_iter_xarray(&iter, ITER_DEST, &subreq->rreq->mapping->i_pages,
44 static void netfs_read_from_cache(struct netfs_io_request *rreq,
48 struct netfs_cache_resources *cres = &rreq->cache_resources;
52 iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages,
63 static void netfs_fill_with_zeroes(struct netfs_io_request *rreq,
87 static void netfs_read_from_server(struct netfs_io_request *rreq,
91 rreq->netfs_ops->issue_read(subreq);
97 static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
99 trace_netfs_rreq(rreq, netfs_rreq_trace_done);
100 netfs_clear_subrequests(rreq, was_async);
101 netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
110 static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
120 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
121 XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
139 netfs_rreq_completed(rreq, was_async);
146 struct netfs_io_request *rreq = subreq->rreq;
150 trace_netfs_failure(rreq, subreq, transferred_or_error,
159 if (atomic_dec_and_test(&rreq->nr_copy_ops))
160 netfs_rreq_unmark_after_write(rreq, was_async);
169 static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
171 struct netfs_cache_resources *cres = &rreq->cache_resources;
176 trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
181 atomic_inc(&rreq->nr_copy_ops);
183 list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
191 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
193 while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
204 rreq->i_size, true);
206 trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
211 iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
214 atomic_inc(&rreq->nr_copy_ops);
223 if (atomic_dec_and_test(&rreq->nr_copy_ops))
224 netfs_rreq_unmark_after_write(rreq, false);
229 struct netfs_io_request *rreq =
232 netfs_rreq_do_write_to_cache(rreq);
235 static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
237 rreq->work.func = netfs_rreq_write_to_cache_work;
238 if (!queue_work(system_unbound_wq, &rreq->work))
245 static void netfs_rreq_short_read(struct netfs_io_request *rreq,
255 atomic_inc(&rreq->nr_outstanding);
257 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
259 netfs_read_from_server(rreq, subreq);
263 * Resubmit any short or failed operations. Returns true if we got the rreq
266 static bool netfs_rreq_perform_resubmissions(struct netfs_io_request *rreq)
272 trace_netfs_rreq(rreq, netfs_rreq_trace_resubmit);
277 atomic_inc(&rreq->nr_outstanding);
279 __clear_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
280 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
289 atomic_inc(&rreq->nr_outstanding);
290 netfs_read_from_server(rreq, subreq);
292 netfs_rreq_short_read(rreq, subreq);
297 if (atomic_dec_and_test(&rreq->nr_outstanding))
300 wake_up_var(&rreq->nr_outstanding);
307 static void netfs_rreq_is_still_valid(struct netfs_io_request *rreq)
311 if (!rreq->netfs_ops->is_still_valid ||
312 rreq->netfs_ops->is_still_valid(rreq))
315 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
318 __set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
329 static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
331 trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
334 netfs_rreq_is_still_valid(rreq);
336 if (!test_bit(NETFS_RREQ_FAILED, &rreq->flags) &&
337 test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags)) {
338 if (netfs_rreq_perform_resubmissions(rreq))
343 netfs_rreq_unlock_folios(rreq);
345 clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
346 wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
348 if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
349 return netfs_rreq_write_to_cache(rreq);
351 netfs_rreq_completed(rreq, was_async);
356 struct netfs_io_request *rreq =
358 netfs_rreq_assess(rreq, false);
365 static void netfs_rreq_terminated(struct netfs_io_request *rreq,
368 if (test_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags) &&
370 if (!queue_work(system_unbound_wq, &rreq->work))
373 netfs_rreq_assess(rreq, was_async);
399 struct netfs_io_request *rreq = subreq->rreq;
419 trace_netfs_failure(rreq, subreq, transferred_or_error,
426 rreq->debug_id, subreq->debug_index,
438 set_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
444 u = atomic_dec_return(&rreq->nr_outstanding);
446 netfs_rreq_terminated(rreq, was_async);
448 wake_up_var(&rreq->nr_outstanding);
470 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
476 set_bit(NETFS_RREQ_INCOMPLETE_IO, &rreq->flags);
479 set_bit(NETFS_RREQ_FAILED, &rreq->flags);
480 rreq->error = subreq->error;
489 struct netfs_io_request *rreq = subreq->rreq;
490 struct netfs_cache_resources *cres = &rreq->cache_resources;
494 if (subreq->start >= rreq->i_size)
503 netfs_rreq_prepare_read(struct netfs_io_request *rreq,
508 _enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
510 source = netfs_cache_prepare_read(subreq, rreq->i_size);
521 if (subreq->len > rreq->i_size - subreq->start)
522 subreq->len = rreq->i_size - subreq->start;
524 if (rreq->netfs_ops->clamp_length &&
525 !rreq->netfs_ops->clamp_length(subreq)) {
543 static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
549 subreq = netfs_alloc_subrequest(rreq);
554 subreq->start = rreq->start + rreq->submitted;
555 subreq->len = rreq->len - rreq->submitted;
557 _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted);
558 list_add_tail(&subreq->rreq_link, &rreq->subrequests);
568 source = netfs_rreq_prepare_read(rreq, subreq);
572 atomic_inc(&rreq->nr_outstanding);
574 rreq->submitted += subreq->len;
579 netfs_fill_with_zeroes(rreq, subreq);
582 netfs_read_from_server(rreq, subreq);
585 netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
594 rreq->error = subreq->error;
604 int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
610 rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
612 if (rreq->len == 0) {
613 pr_err("Zero-sized read [R=%x]\n", rreq->debug_id);
614 netfs_put_request(rreq, false, netfs_rreq_trace_put_zero_len);
618 INIT_WORK(&rreq->work, netfs_rreq_work);
621 netfs_get_request(rreq, netfs_rreq_trace_get_hold);
626 atomic_set(&rreq->nr_outstanding, 1);
628 if (!netfs_rreq_submit_slice(rreq, &debug_index))
631 } while (rreq->submitted < rreq->len);
639 wait_var_event(&rreq->nr_outstanding,
640 atomic_read(&rreq->nr_outstanding) == 1);
641 netfs_rreq_assess(rreq, false);
642 if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags))
647 ret = rreq->error;
648 if (ret == 0 && rreq->submitted < rreq->len) {
649 trace_netfs_failure(rreq, NULL, ret, netfs_fail_short_read);
652 netfs_put_request(rreq, false, netfs_rreq_trace_put_hold);
655 if (atomic_dec_and_test(&rreq->nr_outstanding))
656 netfs_rreq_assess(rreq, false);