Lines Matching defs:folio

151 static int orangefs_writepages_callback(struct folio *folio,
155 struct orangefs_write_range *wr = folio->private;
159 folio_unlock(folio);
172 ow->pages[ow->npages++] = &folio->page;
184 ow->pages[ow->npages++] = &folio->page;
194 ret = orangefs_writepage_locked(&folio->page, wbc);
195 mapping_set_error(folio->mapping, ret);
196 folio_unlock(folio);
197 folio_end_writeback(folio);
239 static int orangefs_launder_folio(struct folio *);
247 struct folio *folio;
278 while ((folio = readahead_folio(rac))) {
280 folio_mark_uptodate(folio);
281 folio_unlock(folio);
285 static int orangefs_read_folio(struct file *file, struct folio *folio)
287 struct inode *inode = folio->mapping->host;
291 loff_t off; /* offset of this folio in the file */
293 if (folio_test_dirty(folio))
294 orangefs_launder_folio(folio);
296 off = folio_pos(folio);
297 bvec_set_folio(&bv, folio, folio_size(folio), 0);
298 iov_iter_bvec(&iter, ITER_DEST, &bv, 1, folio_size(folio));
301 folio_size(folio), inode->i_size, NULL, NULL, file);
302 /* this will only zero remaining unread portions of the folio data */
305 flush_dcache_folio(folio);
307 folio_set_error(folio);
309 folio_mark_uptodate(folio);
312 /* unlock the folio after the ->read_folio() routine completes */
313 folio_unlock(folio);
322 struct folio *folio;
334 folio = page_folio(page);
336 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
342 ret = orangefs_launder_folio(folio);
346 if (folio_test_private(folio)) {
348 wr = folio_get_private(folio);
355 ret = orangefs_launder_folio(folio);
369 folio_attach_private(folio, wr);
409 static void orangefs_invalidate_folio(struct folio *folio,
412 struct orangefs_write_range *wr = folio_get_private(folio);
415 kfree(folio_detach_private(folio));
418 } else if (folio_pos(folio) + offset <= wr->pos &&
419 wr->pos + wr->len <= folio_pos(folio) + offset + length) {
420 kfree(folio_detach_private(folio));
422 folio_cancel_dirty(folio);
425 } else if (wr->pos < folio_pos(folio) + offset &&
426 wr->pos + wr->len <= folio_pos(folio) + offset + length &&
427 folio_pos(folio) + offset < wr->pos + wr->len) {
429 x = wr->pos + wr->len - (folio_pos(folio) + offset);
435 } else if (folio_pos(folio) + offset <= wr->pos &&
436 folio_pos(folio) + offset + length < wr->pos + wr->len &&
437 wr->pos < folio_pos(folio) + offset + length) {
439 x = folio_pos(folio) + offset + length - wr->pos;
446 } else if (wr->pos < folio_pos(folio) + offset &&
447 folio_pos(folio) + offset + length < wr->pos + wr->len) {
459 if (!((folio_pos(folio) + offset + length <= wr->pos) ^
460 (wr->pos + wr->len <= folio_pos(folio) + offset))) {
463 folio_pos(folio) + offset, length);
475 orangefs_launder_folio(folio);
478 static bool orangefs_release_folio(struct folio *folio, gfp_t foo)
480 return !folio_test_private(folio);
483 static void orangefs_free_folio(struct folio *folio)
485 kfree(folio_detach_private(folio));
488 static int orangefs_launder_folio(struct folio *folio)
495 folio_wait_writeback(folio);
496 if (folio_clear_dirty_for_io(folio)) {
497 r = orangefs_writepage_locked(&folio->page, &wbc);
498 folio_end_writeback(folio);
635 struct folio *folio = page_folio(vmf->page);
649 folio_lock(folio);
650 if (folio_test_dirty(folio) && !folio_test_private(folio)) {
652 * Should be impossible. If it happens, launder the folio
656 if (orangefs_launder_folio(folio)) {
661 if (folio_test_private(folio)) {
662 wr = folio_get_private(folio);
669 if (orangefs_launder_folio(folio)) {
684 folio_attach_private(folio, wr);
688 if (folio->mapping != inode->i_mapping) {
689 folio_unlock(folio);
695 * We mark the folio dirty already here so that when freeze is in
697 * see the dirty folio and writeprotect it again.
699 folio_mark_dirty(folio);
700 folio_wait_stable(folio);