Lines Matching defs:fbatch
269 * @fbatch: batch of folios to delete
272 * @fbatch from the mapping. The function expects @fbatch to be sorted
274 * It tolerates holes in @fbatch (mapping entries at those indices are not
280 struct folio_batch *fbatch)
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
289 if (i >= folio_batch_count(fbatch))
302 if (folio != fbatch->folios[i]) {
304 fbatch->folios[i]->index, folio);
321 struct folio_batch *fbatch)
325 if (!folio_batch_count(fbatch))
330 for (i = 0; i < folio_batch_count(fbatch); i++) {
331 struct folio *folio = fbatch->folios[i];
336 page_cache_delete_batch(mapping, fbatch);
342 for (i = 0; i < folio_batch_count(fbatch); i++)
343 filemap_free_folio(mapping, fbatch->folios[i]);
508 struct folio_batch fbatch;
511 folio_batch_init(&fbatch);
517 PAGECACHE_TAG_WRITEBACK, &fbatch);
523 struct folio *folio = fbatch.folios[i];
528 folio_batch_release(&fbatch);
2009 * @fbatch: Where the resulting entries are placed.
2013 * the mapping. The entries are placed in @fbatch. find_get_entries()
2025 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2032 indices[fbatch->nr] = xas.xa_index;
2033 if (!folio_batch_add(fbatch, folio))
2038 if (folio_batch_count(fbatch)) {
2040 int idx = folio_batch_count(fbatch) - 1;
2042 folio = fbatch->folios[idx];
2047 return folio_batch_count(fbatch);
2055 * @fbatch: Where the resulting entries are placed.
2056 * @indices: The cache indices of the entries in @fbatch.
2071 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2091 indices[fbatch->nr] = xas.xa_index;
2092 if (!folio_batch_add(fbatch, folio))
2102 if (folio_batch_count(fbatch)) {
2104 int idx = folio_batch_count(fbatch) - 1;
2106 folio = fbatch->folios[idx];
2111 return folio_batch_count(fbatch);
2119 * @fbatch: The batch to fill.
2123 * in @fbatch with an elevated reference count.
2136 pgoff_t end, struct folio_batch *fbatch)
2146 if (!folio_batch_add(fbatch, folio)) {
2169 return folio_batch_count(fbatch);
2178 * @fbatch: The batch to fill
2189 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2214 if (!folio_batch_add(fbatch, folio)) {
2231 nr = folio_batch_count(fbatch);
2234 folio = fbatch->folios[nr - 1];
2242 return folio_batch_count(fbatch);
2252 * @fbatch: The batch to fill
2260 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2274 if (!folio_batch_add(fbatch, folio)) {
2296 return folio_batch_count(fbatch);
2330 pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2349 if (!folio_batch_add(fbatch, folio))
2480 struct folio_batch *fbatch)
2515 folio_batch_add(fbatch, folio);
2536 struct folio_batch *fbatch, bool need_uptodate)
2552 filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2553 if (!folio_batch_count(fbatch)) {
2558 filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2560 if (!folio_batch_count(fbatch)) {
2564 iocb->ki_pos >> PAGE_SHIFT, fbatch);
2570 folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2578 folio_batch_count(fbatch) > 1)
2590 if (likely(--fbatch->nr))
2624 struct folio_batch fbatch;
2636 folio_batch_init(&fbatch);
2652 error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2689 fbatch.folios[0]))
2690 folio_mark_accessed(fbatch.folios[0]);
2692 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2693 struct folio *folio = fbatch.folios[i];
2724 for (i = 0; i < folio_batch_count(&fbatch); i++)
2725 folio_put(fbatch.folios[i]);
2726 folio_batch_init(&fbatch);
2901 struct folio_batch fbatch;
2919 folio_batch_init(&fbatch);
2928 error = filemap_get_pages(&iocb, len, &fbatch, true);
2951 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2952 struct folio *folio = fbatch.folios[i];
2979 folio_batch_release(&fbatch);
2983 folio_batch_release(&fbatch);