Lines Matching defs:size_class

200 struct size_class {
255 struct size_class *size_class[ZS_SIZE_CLASSES];
547 static inline void zs_stat_inc(struct size_class *class,
554 static inline void zs_stat_dec(struct size_class *class,
561 static inline unsigned long zs_stat_get(struct size_class *class,
584 static unsigned long zs_can_compact(struct size_class *class);
590 struct size_class *class;
604 class = pool->size_class[i];
689 static enum fullness_group get_fullness_group(struct size_class *class,
716 static void insert_zspage(struct size_class *class,
742 static void remove_zspage(struct size_class *class,
762 static enum fullness_group fix_fullness_group(struct size_class *class,
931 static void __free_zspage(struct zs_pool *pool, struct size_class *class,
963 static void free_zspage(struct zs_pool *pool, struct size_class *class,
979 static void init_zspage(struct size_class *class, struct zspage *zspage)
1023 static void create_page_chain(struct size_class *class, struct zspage *zspage,
1060 struct size_class *class,
1100 static struct zspage *find_get_zspage(struct size_class *class)
1212 static bool can_merge(struct size_class *prev, int pages_per_zspage,
1222 static bool zspage_full(struct size_class *class, struct zspage *zspage)
1258 struct size_class *class;
1281 class = pool->size_class[class_idx];
1316 struct size_class *class;
1323 class = pool->size_class[class_idx];
1347 * zsmalloc &size_class.
1356 * Return: the size (in bytes) of the first huge zsmalloc &size_class.
1364 static unsigned long obj_malloc(struct size_class *class,
1419 struct size_class *class;
1432 class = pool->size_class[get_size_class_index(size)];
1472 static void obj_free(struct size_class *class, unsigned long obj)
1504 struct size_class *class;
1519 class = pool->size_class[class_idx];
1542 static void zs_object_copy(struct size_class *class, unsigned long dst,
1609 static unsigned long find_alloced_obj(struct size_class *class,
1652 static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
1702 static struct zspage *isolate_zspage(struct size_class *class, bool source)
1733 static enum fullness_group putback_zspage(struct size_class *class,
1854 struct size_class *class,
1879 static void replace_sub_page(struct size_class *class, struct zspage *zspage,
1905 struct size_class *class;
1928 class = pool->size_class[class_idx];
1944 * size_class to prevent further object allocation from the zspage.
1962 struct size_class *class;
1991 class = pool->size_class[class_idx];
2090 struct size_class *class;
2103 class = pool->size_class[class_idx];
2178 struct size_class *class;
2187 class = pool->size_class[i];
2203 class = pool->size_class[class_idx];
2205 __free_zspage(pool, pool->size_class[class_idx], zspage);
2237 static unsigned long zs_can_compact(struct size_class *class)
2253 struct size_class *class)
2306 struct size_class *class;
2310 class = pool->size_class[i];
2350 struct size_class *class;
2356 class = pool->size_class[i];
2397 struct size_class *prev_class = NULL;
2417 * Iterate reversely, because, size of size_class that we want to use
2424 struct size_class *class;
2455 * size_class is used for normal zsmalloc operation such
2457 * have one size_class for each size, there is a chance that we
2458 * can get more memory utilization if we use one size_class for
2459 * many different sizes whose size_class have same
2460 * characteristics. So, we makes size_class point to
2461 * previous size_class if possible.
2465 pool->size_class[i] = prev_class;
2470 class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
2479 pool->size_class[i] = class;
2519 struct size_class *class = pool->size_class[i];