Lines Matching refs:page

11  * can store up to three compressed pages per page which improves the
13 * storing an integral number of objects per page) and simplicity.
16 * number of object per page) when reclaim is used.
32 #include <linux/page-flags.h>
53 * in the beginning of an allocated page are occupied by z3fold header, so
55 * which shows the max number of free chunks in z3fold page, also there will
99 * struct z3fold_header - z3fold page metadata occupying first chunks of each
100 * z3fold page, except for HEADLESS pages
101 * @buddy: links the z3fold page into the relevant list in the
103 * @page_lock: per-page lock
104 * @refcount: reference count for the z3fold page
105 * @work: work_struct for page layout optimization
108 * @cpu: CPU which this page "belongs" to
136 * @stale_lock: protects pool stale page list
138 * buddies; the list each z3fold page is added to depends on
147 * @compact_wq: workqueue for page layout background optimization
148 * @release_wq: workqueue for safe page release
149 * @work: work_struct for safe page release
174 * Internal z3fold page flags
238 /* Lock a z3fold page */
244 /* Try to lock a z3fold page */
250 /* Unlock a z3fold page */
286 /* Returns the z3fold page where a given handle is stored */
292 /* return locked z3fold page if it's not headless */
300 struct page *page = virt_to_page(zhdr);
302 if (!test_bit(PAGE_HEADLESS, &page->private))
396 /* Initializes the z3fold header of a newly allocated z3fold page */
397 static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
400 struct z3fold_header *zhdr = page_address(page);
403 INIT_LIST_HEAD(&page->lru);
404 clear_bit(PAGE_HEADLESS, &page->private);
405 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
406 clear_bit(NEEDS_COMPACTING, &page->private);
407 clear_bit(PAGE_STALE, &page->private);
408 clear_bit(PAGE_CLAIMED, &page->private);
433 /* Resets the struct page fields and frees the page */
434 static void free_z3fold_page(struct page *page, bool headless)
437 lock_page(page);
438 __ClearPageMovable(page);
439 unlock_page(page);
441 ClearPagePrivate(page);
442 __free_page(page);
452 * Encodes the handle of a particular buddy within a z3fold page
463 * For a headless page, its handle is its pointer with the extra
524 struct page *page = virt_to_page(zhdr);
528 set_bit(PAGE_STALE, &page->private);
529 clear_bit(NEEDS_COMPACTING, &page->private);
531 if (!list_empty(&page->lru))
532 list_del_init(&page->lru);
582 struct page *page = virt_to_page(zhdr);
585 if (WARN_ON(!test_bit(PAGE_STALE, &page->private)))
589 free_z3fold_page(page, false);
597 * Returns the number of free chunks in a z3fold page.
689 * the page lock is already taken
774 struct page *page = virt_to_page(zhdr);
776 if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private))
779 if (unlikely(PageIsolated(page)))
822 struct page *page;
824 page = virt_to_page(zhdr);
829 if (WARN_ON(!test_and_clear_bit(NEEDS_COMPACTING, &page->private))) {
842 if (test_bit(PAGE_STALE, &page->private) ||
843 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
853 clear_bit(PAGE_CLAIMED, &page->private);
861 clear_bit(PAGE_CLAIMED, &page->private);
873 /* returns _locked_ z3fold page header or NULL */
878 struct page *page;
883 /* First, try to find an unbuddied z3fold page. */
911 page = virt_to_page(zhdr);
912 if (test_bit(NEEDS_COMPACTING, &page->private) ||
913 test_bit(PAGE_CLAIMED, &page->private)) {
923 * this page could not be removed from its unbuddied
925 * page lock so kref_put could not be called before
956 page = virt_to_page(zhdr);
957 if (test_bit(NEEDS_COMPACTING, &page->private) ||
958 test_bit(PAGE_CLAIMED, &page->private)) {
1079 * performed first. If no suitable free region is found, then a new page is
1087 * a new page.
1094 struct page *page = NULL;
1121 page = virt_to_page(zhdr);
1127 page = NULL;
1133 * Before allocating a page, let's see if we can take one from
1141 page = virt_to_page(zhdr);
1146 if (!page)
1147 page = alloc_page(gfp);
1149 if (!page)
1152 zhdr = init_z3fold_page(page, bud == HEADLESS, pool, gfp);
1154 __free_page(page);
1160 set_bit(PAGE_HEADLESS, &page->private);
1164 lock_page(page);
1165 __SetPageMovable(page, pool->inode->i_mapping);
1166 unlock_page(page);
1168 if (trylock_page(page)) {
1169 __SetPageMovable(page, pool->inode->i_mapping);
1170 unlock_page(page);
1188 /* Add/move z3fold page to beginning of LRU */
1189 if (!list_empty(&page->lru))
1190 list_del(&page->lru);
1192 list_add(&page->lru, &pool->lru);
1207 * In the case that the z3fold page in which the allocation resides is under
1209 * only sets the first|last_chunks to 0. The page is actually freed
1215 struct page *page;
1220 page = virt_to_page(zhdr);
1221 page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
1223 if (test_bit(PAGE_HEADLESS, &page->private)) {
1224 /* if a headless page is under reclaim, just leave.
1226 * has not been set before, we release this page
1231 list_del(&page->lru);
1234 free_z3fold_page(page, true);
1267 /* the page has not been claimed by us */
1271 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1273 clear_bit(PAGE_CLAIMED, &page->private);
1282 clear_bit(PAGE_CLAIMED, &page->private);
1287 clear_bit(PAGE_CLAIMED, &page->private);
1293 * z3fold_reclaim_page() - evicts allocations from a pool page and frees it
1294 * @pool: pool from which a page will attempt to be evicted
1300 * information on how the allocations are organized within each z3fold page.
1306 * The user detects a page should be reclaimed and calls z3fold_reclaim_page().
1307 * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and
1312 * non-zero. z3fold_reclaim_page() will add the z3fold page back to the
1313 * appropriate list and try the next z3fold page on the LRU up to
1318 * contains logic to delay freeing the page if the page is under reclaim,
1319 * as indicated by the setting of the PG_reclaim flag on the underlying page.
1321 * If all buddies in the z3fold page are successfully evicted, then the
1322 * z3fold page can be freed.
1324 * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are
1332 struct page *page = NULL;
1351 page = list_entry(pos, struct page, lru);
1353 zhdr = page_address(page);
1354 if (test_bit(PAGE_HEADLESS, &page->private)) {
1357 * until we have the page lock to avoid racing
1365 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1384 * need to do it under page lock, otherwise checking
1388 test_and_set_bit(PAGE_CLAIMED, &page->private)) {
1395 continue; /* can't evict such page */
1405 list_del_init(&page->lru);
1408 if (!test_bit(PAGE_HEADLESS, &page->private)) {
1430 * reference to this page
1454 if (test_bit(PAGE_HEADLESS, &page->private)) {
1456 free_z3fold_page(page, true);
1461 list_add(&page->lru, &pool->lru);
1463 clear_bit(PAGE_CLAIMED, &page->private);
1474 * if we are here, the page is still not completely
1479 list_add(&page->lru, &pool->lru);
1482 clear_bit(PAGE_CLAIMED, &page->private);
1498 * correct starting chunk within the page.
1505 struct page *page;
1511 page = virt_to_page(zhdr);
1513 if (test_bit(PAGE_HEADLESS, &page->private))
1523 set_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1550 struct page *page;
1554 page = virt_to_page(zhdr);
1556 if (test_bit(PAGE_HEADLESS, &page->private))
1561 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1577 static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode)
1582 VM_BUG_ON_PAGE(!PageMovable(page), page);
1583 VM_BUG_ON_PAGE(PageIsolated(page), page);
1585 if (test_bit(PAGE_HEADLESS, &page->private))
1588 zhdr = page_address(page);
1590 if (test_bit(NEEDS_COMPACTING, &page->private) ||
1591 test_bit(PAGE_STALE, &page->private))
1597 if (test_and_set_bit(PAGE_CLAIMED, &page->private))
1603 if (!list_empty(&page->lru))
1604 list_del_init(&page->lru);
1616 static int z3fold_page_migrate(struct address_space *mapping, struct page *newpage,
1617 struct page *page, enum migrate_mode mode)
1623 VM_BUG_ON_PAGE(!PageMovable(page), page);
1624 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1625 VM_BUG_ON_PAGE(!test_bit(PAGE_CLAIMED, &page->private), page);
1628 zhdr = page_address(page);
1635 clear_bit(PAGE_CLAIMED, &page->private);
1644 newpage->private = page->private;
1645 page->private = 0;
1654 new_mapping = page_mapping(page);
1655 __ClearPageMovable(page);
1656 ClearPagePrivate(page);
1676 page_mapcount_reset(page);
1677 clear_bit(PAGE_CLAIMED, &page->private);
1678 put_page(page);
1682 static void z3fold_page_putback(struct page *page)
1687 zhdr = page_address(page);
1693 INIT_LIST_HEAD(&page->lru);
1699 list_add(&page->lru, &pool->lru);
1701 clear_bit(PAGE_CLAIMED, &page->private);
1811 /* Make sure the z3fold header is not larger than the page size */