Lines Matching defs:zspage

16  * struct page(s) to form a zspage.
19 * page->private: points to zspage
20 * page->index: links together all component pages of a zspage
23 * page->page_type: first object offset in a subpage of zspage
37 * zspage->lock
193 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
202 * For every zspage, zspage->freeobj gives head of this list.
244 struct zspage {
268 static void SetZsHugePage(struct zspage *zspage)
270 zspage->huge = 1;
273 static bool ZsHugePage(struct zspage *zspage)
275 return zspage->huge;
278 static void migrate_lock_init(struct zspage *zspage);
279 static void migrate_read_lock(struct zspage *zspage);
280 static void migrate_read_unlock(struct zspage *zspage);
283 static void migrate_write_lock(struct zspage *zspage);
284 static void migrate_write_lock_nested(struct zspage *zspage);
285 static void migrate_write_unlock(struct zspage *zspage);
288 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
290 static void migrate_write_lock(struct zspage *zspage) {}
291 static void migrate_write_lock_nested(struct zspage *zspage) {}
292 static void migrate_write_unlock(struct zspage *zspage) {}
295 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
305 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
333 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
339 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
341 kmem_cache_free(pool->zspage_cachep, zspage);
429 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
440 static inline int get_zspage_inuse(struct zspage *zspage)
442 return zspage->inuse;
446 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
448 zspage->inuse += val;
451 static inline struct page *get_first_page(struct zspage *zspage)
453 struct page *first_page = zspage->first_page;
469 static inline unsigned int get_freeobj(struct zspage *zspage)
471 return zspage->freeobj;
474 static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
476 zspage->freeobj = obj;
479 static void get_zspage_mapping(struct zspage *zspage,
483 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
485 *fullness = zspage->fullness;
486 *class_idx = zspage->class;
490 struct zspage *zspage)
492 return pool->size_class[zspage->class];
495 static void set_zspage_mapping(struct zspage *zspage,
499 zspage->class = class_idx;
500 zspage->fullness = fullness;
505 * class maintains a list of zspages where each zspage is divided
664 static int get_fullness_group(struct size_class *class, struct zspage *zspage)
668 inuse = get_zspage_inuse(zspage);
688 * have. This functions inserts the given zspage into the freelist
692 struct zspage *zspage,
696 list_add(&zspage->list, &class->fullness_list[fullness]);
700 * This function removes the given zspage from the freelist identified
704 struct zspage *zspage,
709 list_del_init(&zspage->list);
722 static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
727 get_zspage_mapping(zspage, &class_idx, &currfg);
728 newfg = get_fullness_group(class, zspage);
732 remove_zspage(class, zspage, currfg);
733 insert_zspage(class, zspage, newfg);
734 set_zspage_mapping(zspage, class_idx, newfg);
739 static struct zspage *get_zspage(struct page *page)
741 struct zspage *zspage = (struct zspage *)page_private(page);
743 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
744 return zspage;
749 struct zspage *zspage = get_zspage(page);
751 if (unlikely(ZsHugePage(zspage)))
760 * @page: page object resides in zspage
779 * @page: page object resides in zspage
802 struct zspage *zspage = get_zspage(page);
804 if (unlikely(ZsHugePage(zspage))) {
827 static int trylock_zspage(struct zspage *zspage)
831 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
841 for (cursor = get_first_page(zspage); cursor != fail; cursor =
849 struct zspage *zspage)
855 get_zspage_mapping(zspage, &class_idx, &fg);
859 VM_BUG_ON(get_zspage_inuse(zspage));
862 next = page = get_first_page(zspage);
873 cache_free_zspage(pool, zspage);
880 struct zspage *zspage)
882 VM_BUG_ON(get_zspage_inuse(zspage));
883 VM_BUG_ON(list_empty(&zspage->list));
890 if (!trylock_zspage(zspage)) {
895 remove_zspage(class, zspage, ZS_INUSE_RATIO_0);
896 __free_zspage(pool, class, zspage);
899 /* Initialize a newly allocated zspage */
900 static void init_zspage(struct size_class *class, struct zspage *zspage)
904 struct page *page = get_first_page(zspage);
941 set_freeobj(zspage, 0);
944 static void create_page_chain(struct size_class *class, struct zspage *zspage,
955 * 2. each sub-page point to zspage using page->private
962 set_page_private(page, (unsigned long)zspage);
965 zspage->first_page = page;
969 SetZsHugePage(zspage);
978 * Allocate a zspage for the given size class
980 static struct zspage *alloc_zspage(struct zs_pool *pool,
986 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
988 if (!zspage)
991 zspage->magic = ZSPAGE_MAGIC;
992 migrate_lock_init(zspage);
1003 cache_free_zspage(pool, zspage);
1011 create_page_chain(class, zspage, pages);
1012 init_zspage(class, zspage);
1013 zspage->pool = pool;
1015 return zspage;
1018 static struct zspage *find_get_zspage(struct size_class *class)
1021 struct zspage *zspage;
1024 zspage = list_first_entry_or_null(&class->fullness_list[i],
1025 struct zspage, list);
1026 if (zspage)
1030 return zspage;
1140 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1142 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1145 static bool zspage_empty(struct zspage *zspage)
1147 return get_zspage_inuse(zspage) == 0;
1195 struct zspage *zspage;
1212 /* It guarantees it can get zspage from handle safely */
1216 zspage = get_zspage(page);
1219 * migration cannot move any zpages in this zspage. Here, pool->lock
1221 * zs_unmap_object API so delegate the locking from class to zspage
1224 migrate_read_lock(zspage);
1227 class = zspage_class(pool, zspage);
1247 if (likely(!ZsHugePage(zspage)))
1256 struct zspage *zspage;
1266 zspage = get_zspage(page);
1267 class = zspage_class(pool, zspage);
1284 migrate_read_unlock(zspage);
1294 * or bigger size will be stored in zspage consisting of a single physical
1308 struct zspage *zspage, unsigned long handle)
1319 class = pool->size_class[zspage->class];
1321 obj = get_freeobj(zspage);
1326 m_page = get_first_page(zspage);
1333 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1334 if (likely(!ZsHugePage(zspage)))
1339 zspage->first_page->index = handle;
1342 mod_zspage_inuse(zspage, 1);
1365 struct zspage *zspage;
1380 zspage = find_get_zspage(class);
1381 if (likely(zspage)) {
1382 obj = obj_malloc(pool, zspage, handle);
1383 /* Now move the zspage to another fullness group, if required */
1384 fix_fullness_group(class, zspage);
1393 zspage = alloc_zspage(pool, class, gfp);
1394 if (!zspage) {
1400 obj = obj_malloc(pool, zspage, handle);
1401 newfg = get_fullness_group(class, zspage);
1402 insert_zspage(class, zspage, newfg);
1403 set_zspage_mapping(zspage, class->index, newfg);
1409 /* We completely set up zspage so mark them as movable */
1410 SetZsPageMovable(pool, zspage);
1421 struct zspage *zspage;
1429 zspage = get_zspage(f_page);
1434 /* Insert this object in containing zspage's freelist */
1435 if (likely(!ZsHugePage(zspage)))
1436 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1439 set_freeobj(zspage, f_objidx);
1442 mod_zspage_inuse(zspage, -1);
1447 struct zspage *zspage;
1463 zspage = get_zspage(f_page);
1464 class = zspage_class(pool, zspage);
1469 fullness = fix_fullness_group(class, zspage);
1471 free_zspage(pool, class, zspage);
1549 * Find alloced object in zspage from index object and
1578 static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage,
1579 struct zspage *dst_zspage)
1614 static struct zspage *isolate_src_zspage(struct size_class *class)
1616 struct zspage *zspage;
1620 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1621 struct zspage, list);
1622 if (zspage) {
1623 remove_zspage(class, zspage, fg);
1624 return zspage;
1628 return zspage;
1631 static struct zspage *isolate_dst_zspage(struct size_class *class)
1633 struct zspage *zspage;
1637 zspage = list_first_entry_or_null(&class->fullness_list[fg],
1638 struct zspage, list);
1639 if (zspage) {
1640 remove_zspage(class, zspage, fg);
1641 return zspage;
1645 return zspage;
1649 * putback_zspage - add @zspage into right class's fullness list
1651 * @zspage: target page
1653 * Return @zspage's fullness status
1655 static int putback_zspage(struct size_class *class, struct zspage *zspage)
1659 fullness = get_fullness_group(class, zspage);
1660 insert_zspage(class, zspage, fullness);
1661 set_zspage_mapping(zspage, class->index, fullness);
1668 * To prevent zspage destroy during migration, zspage freeing should
1669 * hold locks of all pages in the zspage.
1671 static void lock_zspage(struct zspage *zspage)
1679 * may no longer belong to the zspage. This means that we may wait for
1684 migrate_read_lock(zspage);
1685 page = get_first_page(zspage);
1689 migrate_read_unlock(zspage);
1700 migrate_read_unlock(zspage);
1703 migrate_read_lock(zspage);
1706 migrate_read_unlock(zspage);
1710 static void migrate_lock_init(struct zspage *zspage)
1712 rwlock_init(&zspage->lock);
1715 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1717 read_lock(&zspage->lock);
1720 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1722 read_unlock(&zspage->lock);
1726 static void migrate_write_lock(struct zspage *zspage)
1728 write_lock(&zspage->lock);
1731 static void migrate_write_lock_nested(struct zspage *zspage)
1733 write_lock_nested(&zspage->lock, SINGLE_DEPTH_NESTING);
1736 static void migrate_write_unlock(struct zspage *zspage)
1738 write_unlock(&zspage->lock);
1741 /* Number of isolated subpage for *page migration* in this zspage */
1742 static void inc_zspage_isolation(struct zspage *zspage)
1744 zspage->isolated++;
1747 static void dec_zspage_isolation(struct zspage *zspage)
1749 VM_BUG_ON(zspage->isolated == 0);
1750 zspage->isolated--;
1755 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1762 page = get_first_page(zspage);
1771 create_page_chain(class, zspage, pages);
1773 if (unlikely(ZsHugePage(zspage)))
1781 struct zspage *zspage;
1784 * Page is locked so zspage couldn't be destroyed. For detail, look at
1789 zspage = get_zspage(page);
1790 pool = zspage->pool;
1792 inc_zspage_isolation(zspage);
1803 struct zspage *zspage;
1822 zspage = get_zspage(page);
1823 pool = zspage->pool;
1830 class = zspage_class(pool, zspage);
1833 migrate_write_lock(zspage);
1839 * Here, any user cannot access all objects in the zspage so let's move.
1858 replace_sub_page(class, zspage, newpage, page);
1859 dec_zspage_isolation(zspage);
1861 * Since we complete the data copy and set up new zspage structure,
1865 migrate_write_unlock(zspage);
1882 struct zspage *zspage;
1886 zspage = get_zspage(page);
1887 pool = zspage->pool;
1889 dec_zspage_isolation(zspage);
1900 * Caller should hold page_lock of all pages in the zspage
1901 * In here, we cannot use zspage meta data.
1909 struct zspage *zspage, *tmp;
1925 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
1926 list_del(&zspage->list);
1927 lock_zspage(zspage);
1929 get_zspage_mapping(zspage, &class_idx, &fullness);
1933 __free_zspage(pool, class, zspage);
1953 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
1955 struct page *page = get_first_page(zspage);
1990 struct zspage *src_zspage = NULL;
1991 struct zspage *dst_zspage = NULL;