Lines Matching defs:rreq
16 void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
20 pgoff_t start_page = rreq->start / PAGE_SIZE;
21 pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
25 XA_STATE(xas, &rreq->mapping->i_pages, start_page);
27 if (test_bit(NETFS_RREQ_FAILED, &rreq->flags)) {
28 __clear_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags);
29 list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
40 subreq = list_first_entry(&rreq->subrequests,
44 trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
75 if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
92 if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
93 if (folio_index(folio) == rreq->no_unlock_folio &&
94 test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
103 if (rreq->netfs_ops->done)
104 rreq->netfs_ops->done(rreq);
107 static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
110 struct netfs_cache_resources *cres = &rreq->cache_resources;
116 static void netfs_rreq_expand(struct netfs_io_request *rreq,
122 netfs_cache_expand_readahead(rreq, &rreq->start, &rreq->len, rreq->i_size);
127 if (rreq->netfs_ops->expand_readahead)
128 rreq->netfs_ops->expand_readahead(rreq);
139 if (rreq->start != readahead_pos(ractl) ||
140 rreq->len != readahead_length(ractl)) {
141 readahead_expand(ractl, rreq->start, rreq->len);
142 rreq->start = readahead_pos(ractl);
143 rreq->len = readahead_length(ractl);
145 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
167 struct netfs_io_request *rreq;
176 rreq = netfs_alloc_request(ractl->mapping, ractl->file,
180 if (IS_ERR(rreq))
184 ret = ctx->ops->begin_cache_operation(rreq);
190 trace_netfs_read(rreq, readahead_pos(ractl), readahead_length(ractl),
193 netfs_rreq_expand(rreq, ractl);
201 netfs_begin_read(rreq, false);
205 netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);
227 struct netfs_io_request *rreq;
233 rreq = netfs_alloc_request(mapping, file,
236 if (IS_ERR(rreq)) {
237 ret = PTR_ERR(rreq);
242 ret = ctx->ops->begin_cache_operation(rreq);
248 trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
249 return netfs_begin_read(rreq, true);
252 netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
346 struct netfs_io_request *rreq;
383 rreq = netfs_alloc_request(mapping, file,
386 if (IS_ERR(rreq)) {
387 ret = PTR_ERR(rreq);
390 rreq->no_unlock_folio = folio_index(folio);
391 __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
394 ret = ctx->ops->begin_cache_operation(rreq);
400 trace_netfs_read(rreq, pos, len, netfs_read_trace_write_begin);
406 netfs_rreq_expand(rreq, &ractl);
413 ret = netfs_begin_read(rreq, true);
427 netfs_put_request(rreq, false, netfs_rreq_trace_put_failed);