Lines Matching defs:page

22 	struct page *page;
29 /* point to the next page which contains the following bvecs */ \
30 struct page *nextpage; \
59 /* I: page offset of start position of decompression */
62 /* I: page offset of inline compressed data */
124 * bit 30: I/O error occurred on this page
125 * bit 0 - 29: remaining parts to complete this page
129 static inline void z_erofs_onlinepage_init(struct page *page)
136 set_page_private(page, u.v);
138 SetPagePrivate(page);
141 static inline void z_erofs_onlinepage_split(struct page *page)
143 atomic_inc((atomic_t *)&page->private);
146 static void z_erofs_onlinepage_endio(struct page *page, int err)
150 DBG_BUGON(!PagePrivate(page));
153 orig = atomic_read((atomic_t *)&page->private);
155 } while (atomic_cmpxchg((atomic_t *)&page->private, orig, v) != orig);
158 set_page_private(page, 0);
159 ClearPagePrivate(page);
161 SetPageUptodate(page);
162 unlock_page(page);
186 struct page *bvpage;
191 static struct page *z_erofs_bvec_iter_end(struct z_erofs_bvec_iter *iter)
198 static struct page *z_erofs_bvset_flip(struct z_erofs_bvec_iter *iter)
202 struct page *nextpage = iter->bvset->nextpage;
203 struct page *oldpage;
233 struct page **candidate_bvpage,
234 struct page **pagepool)
237 struct page *nextpage = *candidate_bvpage;
258 struct page **old_bvpage)
507 * processing page is the tail page of a pcluster, this pcluster can
508 * safely use the whole page (since the previous pcluster is within the
511 * | tail (partial) page | head (partial) page |
515 * [ (*) the page above can be used as inplace I/O. ]
525 struct page *pagepool;
526 struct page *candidate_bvpage;
576 struct page *page;
578 struct page *newpage = NULL;
580 /* the compressed page was loaded before */
581 if (READ_ONCE(pcl->compressed_bvecs[i].page))
584 page = find_get_page(mc, pcl->obj.index + i);
586 if (page) {
587 t = (void *)((unsigned long)page | 1);
595 * try to use cached I/O if page allocation
606 if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
609 if (page)
610 put_page(page);
637 struct page *page = pcl->compressed_bvecs[i].page;
639 if (!page)
642 /* block other users from reclaiming or migrating the page */
643 if (!trylock_page(page))
646 if (!erofs_page_is_managed(sbi, page))
650 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
651 detach_page_private(page);
652 unlock_page(page);
673 if (pcl->compressed_bvecs[i].page == &folio->page) {
674 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
730 if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
731 NULL, bvec->page)) {
752 fe->candidate_bvpage = bvec->page;
893 get_page(map->buf.page);
894 WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
944 static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
964 memcpy_to_page(page, cur, src + erofs_blkoff(sb, pos), cnt);
971 struct page *page)
975 const loff_t offset = page_offset(page);
980 z_erofs_onlinepage_init(page);
1000 zero_user_segment(page, cur, end);
1009 err = z_erofs_read_fragment(inode->i_sb, page, cur, cur + len,
1024 * Ensure the current partial page belongs to this submit chain rather
1026 * those chains are handled asynchronously thus the page cannot be used
1035 .page = page,
1042 z_erofs_onlinepage_split(page);
1063 z_erofs_onlinepage_endio(page, err);
1082 static bool z_erofs_page_is_invalidated(struct page *page)
1084 return !page->mapping && !z_erofs_is_shortlived_page(page);
1088 struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
1093 struct page **decompressed_pages;
1095 struct page **compressed_pages;
1098 struct page **pagepool;
1119 be->decompressed_pages[pgnr] = bvec->page;
1145 dst = kmap_local_page(bvi->bvec.page);
1166 z_erofs_onlinepage_endio(bvi->bvec.page, err);
1176 struct page *old_bvpage;
1188 DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
1207 struct page *page = bvec->page;
1210 if (!page) {
1214 be->compressed_pages[i] = page;
1217 if (!PageUptodate(page))
1222 DBG_BUGON(z_erofs_page_is_invalidated(page));
1223 if (!z_erofs_is_shortlived_page(page)) {
1224 if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
1225 if (!PageUptodate(page))
1249 struct page *page;
1255 /* allocate (de)compressed page arrays if cannot be kept on stack */
1263 sizeof(struct page *) * be->nr_pages);
1271 kvcalloc(be->nr_pages, sizeof(struct page *),
1275 kvcalloc(pclusterpages, sizeof(struct page *),
1307 page = pcl->compressed_bvecs[0].page;
1308 WRITE_ONCE(pcl->compressed_bvecs[0].page, NULL);
1309 put_page(page);
1313 page = be->compressed_pages[i];
1315 if (erofs_page_is_managed(sbi, page))
1317 (void)z_erofs_put_shortlivedpage(be->pagepool, page);
1318 WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
1327 page = be->decompressed_pages[i];
1328 if (!page)
1331 DBG_BUGON(z_erofs_page_is_invalidated(page));
1334 if (z_erofs_put_shortlivedpage(be->pagepool, page))
1336 z_erofs_onlinepage_endio(page, err);
1355 struct page **pagepool)
1383 struct page *pagepool = NULL;
1438 static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
1440 struct page **pagepool,
1448 struct page *oldpage, *page;
1452 page = READ_ONCE(pcl->compressed_bvecs[nr].page);
1453 oldpage = page;
1455 if (!page)
1458 justfound = (unsigned long)page & 1UL;
1459 page = (struct page *)((unsigned long)page & ~1UL);
1465 if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
1466 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1467 set_page_private(page, 0);
1471 mapping = READ_ONCE(page->mapping);
1481 /* directly return for shortlived page as well */
1482 if (z_erofs_is_shortlived_page(page))
1485 lock_page(page);
1487 /* only true if page reclaim goes wrong, should never happen */
1488 DBG_BUGON(justfound && PagePrivate(page));
1490 /* the page is still in manage cache */
1491 if (page->mapping == mc) {
1492 WRITE_ONCE(pcl->compressed_bvecs[nr].page, page);
1494 if (!PagePrivate(page)) {
1496 * impossible to be !PagePrivate(page) for
1498 * the page is already in compressed_bvecs[].
1503 set_page_private(page, (unsigned long)pcl);
1504 SetPagePrivate(page);
1508 if (PageUptodate(page)) {
1509 unlock_page(page);
1510 page = NULL;
1516 * the managed page has been truncated, it's unsafe to
1517 * reuse this one, let's allocate a new cache-managed page.
1519 DBG_BUGON(page->mapping);
1523 unlock_page(page);
1524 put_page(page);
1526 page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
1527 if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page,
1528 oldpage, page)) {
1529 erofs_pagepool_add(pagepool, page);
1534 if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
1535 /* turn into temporary page if fails (1 ref) */
1536 set_page_private(page, Z_EROFS_SHORTLIVED_PAGE);
1539 attach_page_private(page, pcl);
1541 put_page(page);
1544 return page;
1607 struct page *page = bvec->bv_page;
1609 DBG_BUGON(PageUptodate(page));
1610 DBG_BUGON(z_erofs_page_is_invalidated(page));
1612 if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
1614 SetPageUptodate(page);
1615 unlock_page(page);
1680 struct page *page;
1682 page = pickup_page_for_submission(pcl, i++,
1684 if (!page)
1698 if (unlikely(PageWorkingset(page)) && !memstall) {
1717 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
1809 struct page *page;
1811 page = erofs_grab_cache_page_nowait(inode->i_mapping, index);
1812 if (page) {
1813 if (PageUptodate(page))
1814 unlock_page(page);
1816 (void)z_erofs_do_read_page(f, page);
1817 put_page(page);
1837 err = z_erofs_do_read_page(&f, &folio->page);
1878 err = z_erofs_do_read_page(&f, &folio->page);