Lines Matching defs:zspage
16 * struct page(s) to form a zspage.
19 * page->private: points to zspage
20 * page->freelist(index): links together all component pages of a zspage
23 * page->units: first object offset in a subpage of zspage
72 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
187 * N = total number of objects zspage can store
190 * Similarly, we assign zspage to:
209 /* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
234 * For every zspage, zspage->freeobj gives head of this list.
279 struct zspage {
304 static void migrate_lock_init(struct zspage *zspage);
305 static void migrate_read_lock(struct zspage *zspage);
306 static void migrate_read_unlock(struct zspage *zspage);
309 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
315 static void migrate_lock_init(struct zspage *zspage) {}
316 static void migrate_read_lock(struct zspage *zspage) {}
317 static void migrate_read_unlock(struct zspage *zspage) {}
320 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
330 pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
358 static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
364 static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
366 kmem_cache_free(pool->zspage_cachep, zspage);
457 /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
460 static bool is_zspage_isolated(struct zspage *zspage)
462 return zspage->isolated;
471 static inline int get_zspage_inuse(struct zspage *zspage)
473 return zspage->inuse;
477 static inline void mod_zspage_inuse(struct zspage *zspage, int val)
479 zspage->inuse += val;
482 static inline struct page *get_first_page(struct zspage *zspage)
484 struct page *first_page = zspage->first_page;
500 static inline unsigned int get_freeobj(struct zspage *zspage)
502 return zspage->freeobj;
505 static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
507 zspage->freeobj = obj;
510 static void get_zspage_mapping(struct zspage *zspage,
514 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
516 *fullness = zspage->fullness;
517 *class_idx = zspage->class;
520 static void set_zspage_mapping(struct zspage *zspage,
524 zspage->class = class_idx;
525 zspage->fullness = fullness;
530 * class maintains a list of zspages where each zspage is divided
690 struct zspage *zspage)
695 inuse = get_zspage_inuse(zspage);
713 * have. This functions inserts the given zspage into the freelist
717 struct zspage *zspage,
720 struct zspage *head;
724 struct zspage, list);
730 if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
731 list_add(&zspage->list, &head->list);
735 list_add(&zspage->list, &class->fullness_list[fullness]);
739 * This function removes the given zspage from the freelist identified
743 struct zspage *zspage,
747 VM_BUG_ON(is_zspage_isolated(zspage));
749 list_del_init(&zspage->list);
763 struct zspage *zspage)
768 get_zspage_mapping(zspage, &class_idx, &currfg);
769 newfg = get_fullness_group(class, zspage);
773 if (!is_zspage_isolated(zspage)) {
774 remove_zspage(class, zspage, currfg);
775 insert_zspage(class, zspage, newfg);
778 set_zspage_mapping(zspage, class_idx, newfg);
786 * to form a zspage for each size class. This is important
788 * each zspage which is given as:
791 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
794 * link together 3 PAGE_SIZE sized pages to form a zspage
800 /* zspage order which gives maximum used size per KB */
820 static struct zspage *get_zspage(struct page *page)
822 struct zspage *zspage = (struct zspage *)page->private;
824 BUG_ON(zspage->magic != ZSPAGE_MAGIC);
825 return zspage;
839 * @page: page object resides in zspage
852 * @page: page object resides in zspage
910 static int trylock_zspage(struct zspage *zspage)
914 for (cursor = get_first_page(zspage); cursor != NULL; cursor =
924 for (cursor = get_first_page(zspage); cursor != fail; cursor =
932 struct zspage *zspage)
938 get_zspage_mapping(zspage, &class_idx, &fg);
942 VM_BUG_ON(get_zspage_inuse(zspage));
945 next = page = get_first_page(zspage);
956 cache_free_zspage(pool, zspage);
964 struct zspage *zspage)
966 VM_BUG_ON(get_zspage_inuse(zspage));
967 VM_BUG_ON(list_empty(&zspage->list));
969 if (!trylock_zspage(zspage)) {
974 remove_zspage(class, zspage, ZS_EMPTY);
975 __free_zspage(pool, class, zspage);
978 /* Initialize a newly allocated zspage */
979 static void init_zspage(struct size_class *class, struct zspage *zspage)
983 struct page *page = get_first_page(zspage);
1020 set_freeobj(zspage, 0);
1023 static void create_page_chain(struct size_class *class, struct zspage *zspage,
1034 * 2. each sub-page point to zspage using page->private
1041 set_page_private(page, (unsigned long)zspage);
1044 zspage->first_page = page;
1057 * Allocate a zspage for the given size class
1059 static struct zspage *alloc_zspage(struct zs_pool *pool,
1065 struct zspage *zspage = cache_alloc_zspage(pool, gfp);
1067 if (!zspage)
1070 memset(zspage, 0, sizeof(struct zspage));
1071 zspage->magic = ZSPAGE_MAGIC;
1072 migrate_lock_init(zspage);
1083 cache_free_zspage(pool, zspage);
1094 create_page_chain(class, zspage, pages);
1095 init_zspage(class, zspage);
1097 return zspage;
1100 static struct zspage *find_get_zspage(struct size_class *class)
1103 struct zspage *zspage;
1106 zspage = list_first_entry_or_null(&class->fullness_list[i],
1107 struct zspage, list);
1108 if (zspage)
1112 return zspage;
1222 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1224 return get_zspage_inuse(zspage) == class->objs_per_zspage;
1251 struct zspage *zspage;
1275 zspage = get_zspage(page);
1277 /* migration cannot move any subpage in this zspage */
1278 migrate_read_lock(zspage);
1280 get_zspage_mapping(zspage, &class_idx, &fg);
1309 struct zspage *zspage;
1321 zspage = get_zspage(page);
1322 get_zspage_mapping(zspage, &class_idx, &fg);
1340 migrate_read_unlock(zspage);
1351 * or bigger size will be stored in zspage consisting of a single physical
1365 struct zspage *zspage, unsigned long handle)
1376 obj = get_freeobj(zspage);
1381 m_page = get_first_page(zspage);
1388 set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
1394 zspage->first_page->index = handle;
1397 mod_zspage_inuse(zspage, 1);
1421 struct zspage *zspage;
1435 zspage = find_get_zspage(class);
1436 if (likely(zspage)) {
1437 obj = obj_malloc(class, zspage, handle);
1438 /* Now move the zspage to another fullness group, if required */
1439 fix_fullness_group(class, zspage);
1448 zspage = alloc_zspage(pool, class, gfp);
1449 if (!zspage) {
1455 obj = obj_malloc(class, zspage, handle);
1456 newfg = get_fullness_group(class, zspage);
1457 insert_zspage(class, zspage, newfg);
1458 set_zspage_mapping(zspage, class->index, newfg);
1464 /* We completely set up zspage so mark them as movable */
1465 SetZsPageMovable(pool, zspage);
1475 struct zspage *zspage;
1484 zspage = get_zspage(f_page);
1488 /* Insert this object in containing zspage's freelist */
1490 link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1492 set_freeobj(zspage, f_objidx);
1493 mod_zspage_inuse(zspage, -1);
1499 struct zspage *zspage;
1514 zspage = get_zspage(f_page);
1516 migrate_read_lock(zspage);
1518 get_zspage_mapping(zspage, &class_idx, &fullness);
1523 fullness = fix_fullness_group(class, zspage);
1525 migrate_read_unlock(zspage);
1529 isolated = is_zspage_isolated(zspage);
1530 migrate_read_unlock(zspage);
1531 /* If zspage is isolated, zs_page_putback will free the zspage */
1533 free_zspage(pool, class, zspage);
1606 * Find alloced object in zspage from index object and
1642 /* Source spage for migration which could be a subpage of zspage */
1645 * of zspage. */
1702 static struct zspage *isolate_zspage(struct size_class *class, bool source)
1705 struct zspage *zspage;
1714 zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
1715 struct zspage, list);
1716 if (zspage) {
1717 VM_BUG_ON(is_zspage_isolated(zspage));
1718 remove_zspage(class, zspage, fg[i]);
1719 return zspage;
1723 return zspage;
1727 * putback_zspage - add @zspage into right class's fullness list
1729 * @zspage: target page
1731 * Return @zspage's fullness_group
1734 struct zspage *zspage)
1738 VM_BUG_ON(is_zspage_isolated(zspage));
1740 fullness = get_fullness_group(class, zspage);
1741 insert_zspage(class, zspage, fullness);
1742 set_zspage_mapping(zspage, class->index, fullness);
1749 * To prevent zspage destroy during migration, zspage freeing should
1750 * hold locks of all pages in the zspage.
1752 static void lock_zspage(struct zspage *zspage)
1760 * may no longer belong to the zspage. This means that we may wait for
1765 migrate_read_lock(zspage);
1766 page = get_first_page(zspage);
1770 migrate_read_unlock(zspage);
1781 migrate_read_unlock(zspage);
1784 migrate_read_lock(zspage);
1787 migrate_read_unlock(zspage);
1817 static void migrate_lock_init(struct zspage *zspage)
1819 rwlock_init(&zspage->lock);
1822 static void migrate_read_lock(struct zspage *zspage) __acquires(&zspage->lock)
1824 read_lock(&zspage->lock);
1827 static void migrate_read_unlock(struct zspage *zspage) __releases(&zspage->lock)
1829 read_unlock(&zspage->lock);
1832 static void migrate_write_lock(struct zspage *zspage)
1834 write_lock(&zspage->lock);
1837 static void migrate_write_unlock(struct zspage *zspage)
1839 write_unlock(&zspage->lock);
1842 /* Number of isolated subpage for *page migration* in this zspage */
1843 static void inc_zspage_isolation(struct zspage *zspage)
1845 zspage->isolated++;
1848 static void dec_zspage_isolation(struct zspage *zspage)
1850 zspage->isolated--;
1855 struct zspage *zspage)
1859 fg = putback_zspage(class, zspage);
1879 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1886 page = get_first_page(zspage);
1895 create_page_chain(class, zspage, pages);
1908 struct zspage *zspage;
1912 * Page is locked so zspage couldn't be destroyed. For detail, look at
1918 zspage = get_zspage(page);
1925 get_zspage_mapping(zspage, &class_idx, &fullness);
1931 if (get_zspage_inuse(zspage) == 0) {
1936 /* zspage is isolated for object migration */
1937 if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1943 * If this is first time isolation for the zspage, isolate zspage from
1944 * size_class to prevent further object allocation from the zspage.
1946 if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
1947 get_zspage_mapping(zspage, &class_idx, &fullness);
1949 remove_zspage(class, zspage, fullness);
1952 inc_zspage_isolation(zspage);
1965 struct zspage *zspage;
1985 zspage = get_zspage(page);
1987 /* Concurrent compactor cannot migrate any subpage in zspage */
1988 migrate_write_lock(zspage);
1989 get_zspage_mapping(zspage, &class_idx, &fullness);
1995 if (!get_zspage_inuse(zspage)) {
2016 * Here, any user cannot access all objects in the zspage so let's move.
2039 replace_sub_page(class, zspage, newpage, page);
2042 dec_zspage_isolation(zspage);
2045 * Page migration is done so let's putback isolated zspage to
2046 * the list if @page is final isolated subpage in the zspage.
2048 if (!is_zspage_isolated(zspage)) {
2055 putback_zspage_deferred(pool, class, zspage);
2082 migrate_write_unlock(zspage);
2094 struct zspage *zspage;
2099 zspage = get_zspage(page);
2100 get_zspage_mapping(zspage, &class_idx, &fg);
2106 dec_zspage_isolation(zspage);
2107 if (!is_zspage_isolated(zspage)) {
2109 * Due to page_lock, we cannot free zspage immediately
2112 putback_zspage_deferred(pool, class, zspage);
2172 * Caller should hold page_lock of all pages in the zspage
2173 * In here, we cannot use zspage meta data.
2181 struct zspage *zspage, *tmp;
2197 list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
2198 list_del(&zspage->list);
2199 lock_zspage(zspage);
2201 get_zspage_mapping(zspage, &class_idx, &fullness);
2205 __free_zspage(pool, pool->size_class[class_idx], zspage);
2220 static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
2222 struct page *page = get_first_page(zspage);
2256 struct zspage *src_zspage;
2257 struct zspage *dst_zspage = NULL;
2273 * and see if anyone had allocated another zspage.