Lines Matching refs:page

16  * struct page(s) to form a zspage.
18 * Usage of struct page fields:
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
21 * For the huge page, this is always 0, so we use this field
23 * page->page_type: first object offset in a subpage of zspage
25 * Usage of struct page flags:
26 * PG_private: identifies the first component page
27 * PG_owner_priv_1: identifies the huge component page
71 * span more than 1 page which avoids complex case of mapping 2 pages simply
133 * On systems with 4K page size, this gives 255 size classes! There is a
135 * - Large number of size classes is potentially wasteful as free page are
151 * of ->inuse objects to all objects that page can store). For example,
155 * difference between the least busy page in the group (minimum permitted
156 * number of ->inuse objects) and the most busy page (maximum permitted
254 struct page *first_page;
429 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
434 static __maybe_unused int is_first_page(struct page *page)
436 return PagePrivate(page);
451 static inline struct page *get_first_page(struct zspage *zspage)
453 struct page *first_page = zspage->first_page;
459 static inline unsigned int get_first_obj_offset(struct page *page)
461 return page->page_type;
464 static inline void set_first_obj_offset(struct page *page, unsigned int offset)
466 page->page_type = offset;
662 * status of the given page.
678 * Take integer division into consideration: a page with one inuse
716 * objects, the fullness status of the page can change, for instance, from
718 * checks if such a status change has occurred for the given page and
719 * accordingly moves the page from the list of the old fullness group to that
739 static struct zspage *get_zspage(struct page *page)
741 struct zspage *zspage = (struct zspage *)page_private(page);
747 static struct page *get_next_page(struct page *page)
749 struct zspage *zspage = get_zspage(page);
754 return (struct page *)page->index;
758 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
760 * @page: page object resides in zspage
763 static void obj_to_location(unsigned long obj, struct page **page,
767 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
771 static void obj_to_page(unsigned long obj, struct page **page)
774 *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
778 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
779 * @page: page object resides in zspage
782 static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
786 obj = page_to_pfn(page) << OBJ_INDEX_BITS;
798 static inline bool obj_allocated(struct page *page, void *obj,
802 struct zspage *zspage = get_zspage(page);
805 VM_BUG_ON_PAGE(!is_first_page(page), page);
806 handle = page->index;
818 static void reset_page(struct page *page)
820 __ClearPageMovable(page);
821 ClearPagePrivate(page);
822 set_page_private(page, 0);
823 page_mapcount_reset(page);
824 page->index = 0;
829 struct page *cursor, *fail;
851 struct page *page, *next;
862 next = page = get_first_page(zspage);
864 VM_BUG_ON_PAGE(!PageLocked(page), page);
865 next = get_next_page(page);
866 reset_page(page);
867 unlock_page(page);
868 dec_zone_page_state(page, NR_ZSPAGES);
869 put_page(page);
870 page = next;
871 } while (page != NULL);
887 * lock_page. The page locks trylock_zspage got will be released
904 struct page *page = get_first_page(zspage);
906 while (page) {
907 struct page *next_page;
911 set_first_obj_offset(page, off);
913 vaddr = kmap_atomic(page);
923 * page, which must point to the first object on the next
924 * page (if present)
926 next_page = get_next_page(page);
937 page = next_page;
945 struct page *pages[])
948 struct page *page;
949 struct page *prev_page = NULL;
954 * 1. all pages are linked together using page->index
955 * 2. each sub-page point to zspage using page->private
957 * we set PG_private to identify the first page (i.e. no other sub-page
961 page = pages[i];
962 set_page_private(page, (unsigned long)zspage);
963 page->index = 0;
965 zspage->first_page = page;
966 SetPagePrivate(page);
971 prev_page->index = (unsigned long)page;
973 prev_page = page;
985 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
995 struct page *page;
997 page = alloc_page(gfp);
998 if (!page) {
1007 inc_zone_page_state(page, NR_ZSPAGES);
1008 pages[i] = page;
1054 struct page *pages[2], int off, int size)
1060 /* disable page faults to match kmap_atomic() return conditions */
1082 struct page *pages[2], int off, int size)
1109 /* enable page faults to match kunmap_atomic() return conditions */
1190 * This function returns with preemption and page faults disabled.
1196 struct page *page;
1202 struct page *pages[2];
1215 obj_to_location(obj, &page, &obj_idx);
1216 zspage = get_zspage(page);
1234 /* this object is contained entirely within a page */
1235 area->vm_addr = kmap_atomic(page);
1241 pages[0] = page;
1242 pages[1] = get_next_page(page);
1257 struct page *page;
1265 obj_to_location(obj, &page, &obj_idx);
1266 zspage = get_zspage(page);
1274 struct page *pages[2];
1276 pages[0] = page;
1277 pages[1] = get_next_page(page);
1295 * page.
1315 struct page *m_page;
1338 /* record handle to page->index */
1422 struct page *f_page;
1448 struct page *f_page;
1458 * so it's safe to get the page from handle.
1481 struct page *s_page, *d_page;
1553 struct page *page, int *obj_idx)
1558 void *addr = kmap_atomic(page);
1560 offset = get_first_obj_offset(page);
1564 if (obj_allocated(page, addr + offset, &handle))
1584 struct page *s_page = get_first_page(src_zspage);
1651 * @zspage: target page
1673 struct page *curr_page, *page;
1678 * lock each page under migrate_read_lock(). Otherwise, the page we lock
1680 * the wrong page to unlock, so we must take a reference to the page
1685 page = get_first_page(zspage);
1686 if (trylock_page(page))
1688 get_page(page);
1690 wait_on_page_locked(page);
1691 put_page(page);
1694 curr_page = page;
1695 while ((page = get_next_page(curr_page))) {
1696 if (trylock_page(page)) {
1697 curr_page = page;
1699 get_page(page);
1701 wait_on_page_locked(page);
1702 put_page(page);
1741 /* Number of isolated subpage for *page migration* in this zspage */
1756 struct page *newpage, struct page *oldpage)
1758 struct page *page;
1759 struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
1762 page = get_first_page(zspage);
1764 if (page == oldpage)
1767 pages[idx] = page;
1769 } while ((page = get_next_page(page)) != NULL);
1778 static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
1787 VM_BUG_ON_PAGE(PageIsolated(page), page);
1789 zspage = get_zspage(page);
1798 static int zs_page_migrate(struct page *newpage, struct page *page,
1804 struct page *dummy;
1819 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1821 /* The page is locked, so this pointer must remain valid */
1822 zspage = get_zspage(page);
1835 offset = get_first_obj_offset(page);
1836 s_addr = kmap_atomic(page);
1847 if (obj_allocated(page, addr, &handle)) {
1858 replace_sub_page(class, zspage, newpage, page);
1868 if (page_zone(newpage) != page_zone(page)) {
1869 dec_zone_page_state(page, NR_ZSPAGES);
1873 reset_page(page);
1874 put_page(page);
1879 static void zs_page_putback(struct page *page)
1884 VM_BUG_ON_PAGE(!PageIsolated(page), page);
1886 zspage = get_zspage(page);
1955 struct page *page = get_first_page(zspage);
1958 WARN_ON(!trylock_page(page));
1959 __SetPageMovable(page, &zsmalloc_mops);
1960 unlock_page(page);
1961 } while ((page = get_next_page(page)) != NULL);