1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * mm/zswapd_control.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #include <linux/memcontrol.h>
9 #include <linux/types.h>
10 #include <linux/cgroup-defs.h>
11 #include <linux/cgroup.h>
12 #include <linux/memcg_policy.h>
13 #include <linux/file.h>
14 #include <linux/zswapd.h>
15 
16 #include "zswapd_internal.h"
17 
18 #define ANON_REFAULT_SNAPSHOT_MIN_INTERVAL 200
19 #define AREA_ANON_REFAULT_THRESHOLD 22000
20 #define EMPTY_ROUND_CHECK_THRESHOLD 10
21 #define EMPTY_ROUND_SKIP_INTERVAL 20
22 #define ZSWAPD_MAX_LEVEL_NUM 10
23 #define MAX_SKIP_INTERVAL 1000
24 #define MAX_RECLAIM_SIZE 100
25 
26 #define INACTIVE_FILE_RATIO 90
27 #define ACTIVE_FILE_RATIO 70
28 #define COMPRESS_RATIO 30
29 #define ZRAM_WM_RATIO 0
30 #define MAX_RATIO 100
31 
32 #define CHECK_BUFFER_VALID(var1, var2) (((var2) != 0) && ((var1) > (var2)))
33 
34 struct zswapd_param {
35 	unsigned int min_score;
36 	unsigned int max_score;
37 	unsigned int ub_mem2zram_ratio;
38 	unsigned int ub_zram2ufs_ratio;
39 	unsigned int refault_threshold;
40 };
41 
42 static struct zswapd_param zswap_param[ZSWAPD_MAX_LEVEL_NUM];
43 struct eventfd_ctx *zswapd_press_efd[LEVEL_COUNT];
44 static DEFINE_MUTEX(pressure_event_lock);
45 static DEFINE_MUTEX(reclaim_para_lock);
46 
47 atomic_t avail_buffers = ATOMIC_INIT(0);
48 atomic_t min_avail_buffers = ATOMIC_INIT(0);
49 atomic_t high_avail_buffers = ATOMIC_INIT(0);
50 atomic_t max_reclaim_size = ATOMIC_INIT(MAX_RECLAIM_SIZE);
51 
52 atomic_t inactive_file_ratio = ATOMIC_INIT(INACTIVE_FILE_RATIO);
53 atomic_t active_file_ratio = ATOMIC_INIT(ACTIVE_FILE_RATIO);
54 atomic_t zram_wm_ratio = ATOMIC_INIT(ZRAM_WM_RATIO);
55 atomic_t compress_ratio = ATOMIC_INIT(COMPRESS_RATIO);
56 
57 atomic64_t zram_critical_threshold = ATOMIC_LONG_INIT(0);
58 atomic64_t free_swap_threshold = ATOMIC_LONG_INIT(0);
59 atomic64_t area_anon_refault_threshold = ATOMIC_LONG_INIT(AREA_ANON_REFAULT_THRESHOLD);
60 atomic64_t anon_refault_snapshot_min_interval =
61 	ATOMIC_LONG_INIT(ANON_REFAULT_SNAPSHOT_MIN_INTERVAL);
62 atomic64_t empty_round_skip_interval = ATOMIC_LONG_INIT(EMPTY_ROUND_SKIP_INTERVAL);
63 atomic64_t max_skip_interval = ATOMIC_LONG_INIT(MAX_SKIP_INTERVAL);
64 atomic64_t empty_round_check_threshold = ATOMIC_LONG_INIT(EMPTY_ROUND_CHECK_THRESHOLD);
65 
get_zram_wm_ratio(void)66 inline unsigned int get_zram_wm_ratio(void)
67 {
68 	return atomic_read(&zram_wm_ratio);
69 }
70 
get_compress_ratio(void)71 inline unsigned int get_compress_ratio(void)
72 {
73 	return atomic_read(&compress_ratio);
74 }
75 
get_inactive_file_ratio(void)76 inline unsigned int get_inactive_file_ratio(void)
77 {
78 	return atomic_read(&inactive_file_ratio);
79 }
80 
get_active_file_ratio(void)81 inline unsigned int get_active_file_ratio(void)
82 {
83 	return atomic_read(&active_file_ratio);
84 }
85 
get_avail_buffers(void)86 inline unsigned int get_avail_buffers(void)
87 {
88 	return atomic_read(&avail_buffers);
89 }
90 
get_min_avail_buffers(void)91 inline unsigned int get_min_avail_buffers(void)
92 {
93 	return atomic_read(&min_avail_buffers);
94 }
95 
get_high_avail_buffers(void)96 inline unsigned int get_high_avail_buffers(void)
97 {
98 	return atomic_read(&high_avail_buffers);
99 }
100 
get_zswapd_max_reclaim_size(void)101 inline unsigned int get_zswapd_max_reclaim_size(void)
102 {
103 	return atomic_read(&max_reclaim_size);
104 }
105 
get_free_swap_threshold(void)106 inline unsigned long long get_free_swap_threshold(void)
107 {
108 	return atomic64_read(&free_swap_threshold);
109 }
110 
get_area_anon_refault_threshold(void)111 inline unsigned long long get_area_anon_refault_threshold(void)
112 {
113 	return atomic64_read(&area_anon_refault_threshold);
114 }
115 
get_anon_refault_snapshot_min_interval(void)116 inline unsigned long long get_anon_refault_snapshot_min_interval(void)
117 {
118 	return atomic64_read(&anon_refault_snapshot_min_interval);
119 }
120 
get_empty_round_skip_interval(void)121 inline unsigned long long get_empty_round_skip_interval(void)
122 {
123 	return atomic64_read(&empty_round_skip_interval);
124 }
125 
get_max_skip_interval(void)126 inline unsigned long long get_max_skip_interval(void)
127 {
128 	return atomic64_read(&max_skip_interval);
129 }
130 
get_empty_round_check_threshold(void)131 inline unsigned long long get_empty_round_check_threshold(void)
132 {
133 	return atomic64_read(&empty_round_check_threshold);
134 }
135 
get_zram_critical_threshold(void)136 inline unsigned long long get_zram_critical_threshold(void)
137 {
138 	return atomic64_read(&zram_critical_threshold);
139 }
140 
avail_buffers_params_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)141 static ssize_t avail_buffers_params_write(struct kernfs_open_file *of,
142 				char *buf, size_t nbytes, loff_t off)
143 {
144 	unsigned long long threshold;
145 	unsigned int high_buffers;
146 	unsigned int min_buffers;
147 	unsigned int buffers;
148 
149 	buf = strstrip(buf);
150 
151 	if (sscanf(buf, "%u %u %u %llu", &buffers, &min_buffers, &high_buffers, &threshold) != 4)
152 		return -EINVAL;
153 
154 	if (CHECK_BUFFER_VALID(min_buffers, buffers) ||
155 	    CHECK_BUFFER_VALID(min_buffers, high_buffers) ||
156 	    CHECK_BUFFER_VALID(buffers, high_buffers))
157 		return -EINVAL;
158 
159 	atomic_set(&avail_buffers, buffers);
160 	atomic_set(&min_avail_buffers, min_buffers);
161 	atomic_set(&high_avail_buffers, high_buffers);
162 	atomic64_set(&free_swap_threshold, (threshold * (SZ_1M / PAGE_SIZE)));
163 
164 	if (atomic_read(&min_avail_buffers) == 0)
165 		set_snapshotd_init_flag(0);
166 	else
167 		set_snapshotd_init_flag(1);
168 
169 	wake_all_zswapd();
170 
171 	return nbytes;
172 }
173 
zswapd_max_reclaim_size_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)174 static ssize_t zswapd_max_reclaim_size_write(struct kernfs_open_file *of,
175 				char *buf, size_t nbytes, loff_t off)
176 {
177 	u32 max;
178 	int ret;
179 
180 	buf = strstrip(buf);
181 	ret = kstrtouint(buf, 10, &max);
182 	if (ret)
183 		return -EINVAL;
184 
185 	atomic_set(&max_reclaim_size, max);
186 
187 	return nbytes;
188 }
189 
buffers_ratio_params_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)190 static ssize_t buffers_ratio_params_write(struct kernfs_open_file *of,
191 				char *buf, size_t nbytes, loff_t off)
192 {
193 	unsigned int inactive;
194 	unsigned int active;
195 
196 	buf = strstrip(buf);
197 
198 	if (sscanf(buf, "%u %u", &inactive, &active) != 2)
199 		return -EINVAL;
200 
201 	if (inactive > MAX_RATIO || active > MAX_RATIO)
202 		return -EINVAL;
203 
204 	atomic_set(&inactive_file_ratio, inactive);
205 	atomic_set(&active_file_ratio, active);
206 
207 	return nbytes;
208 }
209 
area_anon_refault_threshold_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)210 static int area_anon_refault_threshold_write(struct cgroup_subsys_state *css,
211 				struct cftype *cft, u64 val)
212 {
213 	atomic64_set(&area_anon_refault_threshold, val);
214 
215 	return 0;
216 }
217 
empty_round_skip_interval_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)218 static int empty_round_skip_interval_write(struct cgroup_subsys_state *css,
219 				struct cftype *cft, u64 val)
220 {
221 	atomic64_set(&empty_round_skip_interval, val);
222 
223 	return 0;
224 }
225 
max_skip_interval_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)226 static int max_skip_interval_write(struct cgroup_subsys_state *css,
227 				struct cftype *cft, u64 val)
228 {
229 	atomic64_set(&max_skip_interval, val);
230 
231 	return 0;
232 }
233 
empty_round_check_threshold_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)234 static int empty_round_check_threshold_write(struct cgroup_subsys_state *css,
235 				struct cftype *cft, u64 val)
236 {
237 	atomic64_set(&empty_round_check_threshold, val);
238 
239 	return 0;
240 }
241 
anon_refault_snapshot_min_interval_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)242 static int anon_refault_snapshot_min_interval_write(struct cgroup_subsys_state *css,
243 				struct cftype *cft, u64 val)
244 {
245 	atomic64_set(&anon_refault_snapshot_min_interval, val);
246 
247 	return 0;
248 }
249 
zram_critical_thres_write(struct cgroup_subsys_state *css, struct cftype *cft, u64 val)250 static int zram_critical_thres_write(struct cgroup_subsys_state *css,
251 				struct cftype *cft, u64 val)
252 {
253 	atomic64_set(&zram_critical_threshold, val);
254 
255 	return 0;
256 }
257 
zswapd_pressure_event_control(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)258 static ssize_t zswapd_pressure_event_control(struct kernfs_open_file *of,
259 				char *buf, size_t nbytes, loff_t off)
260 {
261 	unsigned int level;
262 	unsigned int efd;
263 	struct fd efile;
264 	int ret;
265 
266 	buf = strstrip(buf);
267 	if (sscanf(buf, "%u %u", &efd, &level) != 2)
268 		return -EINVAL;
269 
270 	if (level >= LEVEL_COUNT)
271 		return -EINVAL;
272 
273 	mutex_lock(&pressure_event_lock);
274 	efile = fdget(efd);
275 	if (!efile.file) {
276 		ret = -EBADF;
277 		goto out;
278 	}
279 
280 	zswapd_press_efd[level] = eventfd_ctx_fileget(efile.file);
281 	if (IS_ERR(zswapd_press_efd[level])) {
282 		ret = PTR_ERR(zswapd_press_efd[level]);
283 		goto out_put_efile;
284 	}
285 	fdput(efile);
286 	mutex_unlock(&pressure_event_lock);
287 	return nbytes;
288 
289 out_put_efile:
290 	fdput(efile);
291 out:
292 	mutex_unlock(&pressure_event_lock);
293 
294 	return ret;
295 }
296 
zswapd_pressure_report(enum zswapd_pressure_level level)297 void zswapd_pressure_report(enum zswapd_pressure_level level)
298 {
299 	int ret;
300 
301 	if (zswapd_press_efd[level] == NULL)
302 		return;
303 
304 	ret = eventfd_signal(zswapd_press_efd[level], 1);
305 	if (ret < 0)
306 		pr_err("SWAP-MM: %s : level:%u, ret:%d ", __func__, level, ret);
307 }
308 
zswapd_pid_read(struct cgroup_subsys_state *css, struct cftype *cft)309 static u64 zswapd_pid_read(struct cgroup_subsys_state *css, struct cftype *cft)
310 {
311 	return get_zswapd_pid();
312 }
313 
zswapd_memcgs_param_parse(int level_num)314 static void zswapd_memcgs_param_parse(int level_num)
315 {
316 	struct mem_cgroup *memcg = NULL;
317 	u64 score;
318 	int i;
319 
320 	while ((memcg = get_next_memcg(memcg))) {
321 		score = atomic64_read(&memcg->memcg_reclaimed.app_score);
322 		for (i = 0; i < level_num; ++i)
323 			if (score >= zswap_param[i].min_score &&
324 			    score <= zswap_param[i].max_score)
325 				break;
326 
327 		atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio,
328 			zswap_param[i].ub_mem2zram_ratio);
329 		atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio,
330 			zswap_param[i].ub_zram2ufs_ratio);
331 		atomic_set(&memcg->memcg_reclaimed.refault_threshold,
332 			zswap_param[i].refault_threshold);
333 	}
334 }
335 
zswapd_memcgs_param_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)336 static ssize_t zswapd_memcgs_param_write(struct kernfs_open_file *of, char *buf,
337 				size_t nbytes, loff_t off)
338 {
339 	char *token = NULL;
340 	int level_num;
341 	int i;
342 
343 	buf = strstrip(buf);
344 	token = strsep(&buf, " ");
345 
346 	if (!token)
347 		return -EINVAL;
348 
349 	if (kstrtoint(token, 0, &level_num))
350 		return -EINVAL;
351 
352 	if (level_num > ZSWAPD_MAX_LEVEL_NUM)
353 		return -EINVAL;
354 
355 	mutex_lock(&reclaim_para_lock);
356 	for (i = 0; i < level_num; ++i) {
357 		token = strsep(&buf, " ");
358 		if (!token)
359 			goto out;
360 
361 		if (kstrtoint(token, 0, &zswap_param[i].min_score) ||
362 			zswap_param[i].min_score > MAX_APP_SCORE)
363 			goto out;
364 
365 		token = strsep(&buf, " ");
366 		if (!token)
367 			goto out;
368 
369 		if (kstrtoint(token, 0, &zswap_param[i].max_score) ||
370 			zswap_param[i].max_score > MAX_APP_SCORE)
371 			goto out;
372 
373 		token = strsep(&buf, " ");
374 		if (!token)
375 			goto out;
376 
377 		if (kstrtoint(token, 0, &zswap_param[i].ub_mem2zram_ratio) ||
378 			zswap_param[i].ub_mem2zram_ratio > MAX_RATIO)
379 			goto out;
380 
381 		token = strsep(&buf, " ");
382 		if (!token)
383 			goto out;
384 
385 		if (kstrtoint(token, 0, &zswap_param[i].ub_zram2ufs_ratio) ||
386 			zswap_param[i].ub_zram2ufs_ratio > MAX_RATIO)
387 			goto out;
388 
389 		token = strsep(&buf, " ");
390 		if (!token)
391 			goto out;
392 
393 		if (kstrtoint(token, 0, &zswap_param[i].refault_threshold))
394 			goto out;
395 	}
396 
397 	zswapd_memcgs_param_parse(level_num);
398 	mutex_unlock(&reclaim_para_lock);
399 
400 	return nbytes;
401 
402 out:
403 	mutex_unlock(&reclaim_para_lock);
404 	return -EINVAL;
405 }
406 
zswapd_single_memcg_param_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)407 static ssize_t zswapd_single_memcg_param_write(struct kernfs_open_file *of,
408 				char *buf, size_t nbytes, loff_t off)
409 {
410 	struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of));
411 	unsigned int ub_mem2zram_ratio;
412 	unsigned int ub_zram2ufs_ratio;
413 	unsigned int refault_threshold;
414 
415 	buf = strstrip(buf);
416 
417 	if (sscanf(buf, "%u %u %u", &ub_mem2zram_ratio, &ub_zram2ufs_ratio,
418 			&refault_threshold) != 3)
419 		return -EINVAL;
420 
421 	if (ub_mem2zram_ratio > MAX_RATIO || ub_zram2ufs_ratio > MAX_RATIO ||
422 	    refault_threshold > MAX_RATIO)
423 		return -EINVAL;
424 
425 	atomic_set(&memcg->memcg_reclaimed.ub_mem2zram_ratio,
426 		ub_mem2zram_ratio);
427 	atomic_set(&memcg->memcg_reclaimed.ub_zram2ufs_ratio,
428 		ub_zram2ufs_ratio);
429 	atomic_set(&memcg->memcg_reclaimed.refault_threshold,
430 		refault_threshold);
431 
432 	return nbytes;
433 }
434 
mem_cgroup_zram_wm_ratio_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)435 static ssize_t mem_cgroup_zram_wm_ratio_write(struct kernfs_open_file *of,
436 				char *buf, size_t nbytes, loff_t off)
437 {
438 	unsigned int ratio;
439 	int ret;
440 
441 	buf = strstrip(buf);
442 
443 	ret = kstrtouint(buf, 10, &ratio);
444 	if (ret)
445 		return -EINVAL;
446 
447 	if (ratio > MAX_RATIO)
448 		return -EINVAL;
449 
450 	atomic_set(&zram_wm_ratio, ratio);
451 
452 	return nbytes;
453 }
454 
mem_cgroup_compress_ratio_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off)455 static ssize_t mem_cgroup_compress_ratio_write(struct kernfs_open_file *of,
456 				char *buf, size_t nbytes, loff_t off)
457 {
458 	unsigned int ratio;
459 	int ret;
460 
461 	buf = strstrip(buf);
462 
463 	ret = kstrtouint(buf, 10, &ratio);
464 	if (ret)
465 		return -EINVAL;
466 
467 	if (ratio > MAX_RATIO)
468 		return -EINVAL;
469 
470 	atomic_set(&compress_ratio, ratio);
471 
472 	return nbytes;
473 }
474 
zswapd_pressure_show(struct seq_file *m, void *v)475 static int zswapd_pressure_show(struct seq_file *m, void *v)
476 {
477 	zswapd_status_show(m);
478 
479 	return 0;
480 }
481 
memcg_active_app_info_list_show(struct seq_file *m, void *v)482 static int memcg_active_app_info_list_show(struct seq_file *m, void *v)
483 {
484 	struct mem_cgroup_per_node *mz = NULL;
485 	struct mem_cgroup *memcg = NULL;
486 	struct lruvec *lruvec = NULL;
487 	unsigned long eswap_size;
488 	unsigned long anon_size;
489 	unsigned long zram_size;
490 
491 	while ((memcg = get_next_memcg(memcg))) {
492 		u64 score = atomic64_read(&memcg->memcg_reclaimed.app_score);
493 
494 		mz = mem_cgroup_nodeinfo(memcg, 0);
495 		if (!mz) {
496 			get_next_memcg_break(memcg);
497 			return 0;
498 		}
499 
500 		lruvec = &mz->lruvec;
501 		if (!lruvec) {
502 			get_next_memcg_break(memcg);
503 			return 0;
504 		}
505 
506 		anon_size = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON,
507 			MAX_NR_ZONES) +	lruvec_lru_size(lruvec,
508 			LRU_INACTIVE_ANON, MAX_NR_ZONES);
509 		eswap_size = memcg_data_size(memcg, SWAP_SIZE);
510 		zram_size = memcg_data_size(memcg, CACHE_SIZE);
511 
512 		if (anon_size + zram_size + eswap_size == 0)
513 			continue;
514 
515 		if (!strlen(memcg->name))
516 			continue;
517 
518 		anon_size *= PAGE_SIZE / SZ_1K;
519 		zram_size *= PAGE_SIZE / SZ_1K;
520 		eswap_size *= PAGE_SIZE / SZ_1K;
521 
522 		seq_printf(m, "%s %llu %lu %lu %lu %llu\n", memcg->name, score,
523 			anon_size, zram_size, eswap_size,
524 			memcg->memcg_reclaimed.reclaimed_pagefault);
525 	}
526 	return 0;
527 }
528 
529 #ifdef CONFIG_HYPERHOLD_DEBUG
avail_buffers_params_show(struct seq_file *m, void *v)530 static int avail_buffers_params_show(struct seq_file *m, void *v)
531 {
532 	seq_printf(m, "avail_buffers: %u\n", atomic_read(&avail_buffers));
533 	seq_printf(m, "min_avail_buffers: %u\n", atomic_read(&min_avail_buffers));
534 	seq_printf(m, "high_avail_buffers: %u\n", atomic_read(&high_avail_buffers));
535 	seq_printf(m, "free_swap_threshold: %llu\n",
536 		atomic64_read(&free_swap_threshold) * PAGE_SIZE / SZ_1M);
537 
538 	return 0;
539 }
540 
zswapd_max_reclaim_size_show(struct seq_file *m, void *v)541 static int zswapd_max_reclaim_size_show(struct seq_file *m, void *v)
542 {
543 	seq_printf(m, "zswapd_max_reclaim_size: %u\n",
544 		atomic_read(&max_reclaim_size));
545 
546 	return 0;
547 }
548 
buffers_ratio_params_show(struct seq_file *m, void *v)549 static int buffers_ratio_params_show(struct seq_file *m, void *v)
550 {
551 	seq_printf(m, "inactive_file_ratio: %u\n", atomic_read(&inactive_file_ratio));
552 	seq_printf(m, "active_file_ratio: %u\n", atomic_read(&active_file_ratio));
553 
554 	return 0;
555 }
556 
area_anon_refault_threshold_read(struct cgroup_subsys_state *css, struct cftype *cft)557 static u64 area_anon_refault_threshold_read(struct cgroup_subsys_state *css,
558 					struct cftype *cft)
559 {
560 	return atomic64_read(&area_anon_refault_threshold);
561 }
562 
empty_round_skip_interval_read(struct cgroup_subsys_state *css, struct cftype *cft)563 static u64 empty_round_skip_interval_read(struct cgroup_subsys_state *css,
564 					struct cftype *cft)
565 {
566 	return atomic64_read(&empty_round_skip_interval);
567 }
568 
max_skip_interval_read(struct cgroup_subsys_state *css, struct cftype *cft)569 static u64 max_skip_interval_read(struct cgroup_subsys_state *css,
570 					struct cftype *cft)
571 {
572 	return atomic64_read(&max_skip_interval);
573 }
574 
empty_round_check_threshold_read(struct cgroup_subsys_state *css, struct cftype *cft)575 static u64 empty_round_check_threshold_read(struct cgroup_subsys_state *css,
576 					struct cftype *cft)
577 {
578 	return atomic64_read(&empty_round_check_threshold);
579 }
580 
anon_refault_snapshot_min_interval_read( struct cgroup_subsys_state *css, struct cftype *cft)581 static u64 anon_refault_snapshot_min_interval_read(
582 		struct cgroup_subsys_state *css, struct cftype *cft)
583 {
584 	return atomic64_read(&anon_refault_snapshot_min_interval);
585 }
586 
zram_critical_threshold_read(struct cgroup_subsys_state *css, struct cftype *cft)587 static u64 zram_critical_threshold_read(struct cgroup_subsys_state *css,
588 					struct cftype *cft)
589 {
590 	return atomic64_read(&zram_critical_threshold);
591 }
592 
zswapd_memcgs_param_show(struct seq_file *m, void *v)593 static int zswapd_memcgs_param_show(struct seq_file *m, void *v)
594 {
595 	int i;
596 
597 	for (i = 0; i < ZSWAPD_MAX_LEVEL_NUM; ++i) {
598 		seq_printf(m, "level %d min score: %u\n", i,
599 			zswap_param[i].min_score);
600 		seq_printf(m, "level %d max score: %u\n", i,
601 			zswap_param[i].max_score);
602 		seq_printf(m, "level %d ub_mem2zram_ratio: %u\n", i,
603 			zswap_param[i].ub_mem2zram_ratio);
604 		seq_printf(m, "level %d ub_zram2ufs_ratio: %u\n", i,
605 			zswap_param[i].ub_zram2ufs_ratio);
606 		seq_printf(m, "level %d refault_threshold: %u\n", i,
607 			zswap_param[i].refault_threshold);
608 	}
609 
610 	return 0;
611 }
612 
zswapd_single_memcg_param_show(struct seq_file *m, void *v)613 static int zswapd_single_memcg_param_show(struct seq_file *m, void *v)
614 {
615 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
616 
617 	seq_printf(m, "memcg score: %llu\n",
618 		atomic64_read(&memcg->memcg_reclaimed.app_score));
619 	seq_printf(m, "memcg ub_mem2zram_ratio: %u\n",
620 		atomic_read(&memcg->memcg_reclaimed.ub_mem2zram_ratio));
621 	seq_printf(m, "memcg ub_zram2ufs_ratio: %u\n",
622 		atomic_read(&memcg->memcg_reclaimed.ub_zram2ufs_ratio));
623 	seq_printf(m, "memcg refault_threshold: %u\n",
624 		atomic_read(&memcg->memcg_reclaimed.refault_threshold));
625 
626 	return 0;
627 }
628 
zram_wm_ratio_show(struct seq_file *m, void *v)629 static int zram_wm_ratio_show(struct seq_file *m, void *v)
630 {
631 	seq_printf(m, "zram_wm_ratio: %u\n", atomic_read(&zram_wm_ratio));
632 
633 	return 0;
634 }
635 
compress_ratio_show(struct seq_file *m, void *v)636 static int compress_ratio_show(struct seq_file *m, void *v)
637 {
638 	seq_printf(m, "compress_ratio: %u\n", atomic_read(&compress_ratio));
639 
640 	return 0;
641 }
642 
zswapd_vmstat_show(struct seq_file *m, void *v)643 static int zswapd_vmstat_show(struct seq_file *m, void *v)
644 {
645 #ifdef CONFIG_VM_EVENT_COUNTERS
646 	unsigned long *vm_buf = NULL;
647 
648 	vm_buf = kzalloc(sizeof(struct vm_event_state), GFP_KERNEL);
649 	if (!vm_buf)
650 		return -ENOMEM;
651 	all_vm_events(vm_buf);
652 
653 	seq_printf(m, "zswapd_wake_up:%lu\n", vm_buf[ZSWAPD_WAKEUP]);
654 	seq_printf(m, "zswapd_area_refault:%lu\n", vm_buf[ZSWAPD_REFAULT]);
655 	seq_printf(m, "zswapd_medium_press:%lu\n", vm_buf[ZSWAPD_MEDIUM_PRESS]);
656 	seq_printf(m, "zswapd_critical_press:%lu\n", vm_buf[ZSWAPD_CRITICAL_PRESS]);
657 	seq_printf(m, "zswapd_memcg_ratio_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_RATIO_SKIP]);
658 	seq_printf(m, "zswapd_memcg_refault_skip:%lu\n", vm_buf[ZSWAPD_MEMCG_REFAULT_SKIP]);
659 	seq_printf(m, "zswapd_swapout:%lu\n", vm_buf[ZSWAPD_SWAPOUT]);
660 	seq_printf(m, "zswapd_snapshot_times:%lu\n", vm_buf[ZSWAPD_SNAPSHOT_TIMES]);
661 	seq_printf(m, "zswapd_reclaimed:%lu\n", vm_buf[ZSWAPD_RECLAIMED]);
662 	seq_printf(m, "zswapd_scanned:%lu\n", vm_buf[ZSWAPD_SCANNED]);
663 
664 	kfree(vm_buf);
665 #endif
666 
667 	return 0;
668 }
669 
eswap_info_show(struct seq_file *m, void *v)670 static int eswap_info_show(struct seq_file *m, void *v)
671 {
672 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
673 	unsigned long long eswap_size;
674 
675 	eswap_size = memcg_data_size(memcg, WRITE_SIZE) / SZ_1K;
676 	seq_printf(m, "Total Swapout Size: %llu kB\n", eswap_size);
677 
678 	return 0;
679 }
680 
memcg_eswap_info_show(struct seq_file *m)681 void memcg_eswap_info_show(struct seq_file *m)
682 {
683 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
684 	struct mem_cgroup_per_node *mz = NULL;
685 	struct lruvec *lruvec = NULL;
686 	unsigned long anon;
687 	unsigned long file;
688 	unsigned long zram;
689 	unsigned long eswap;
690 
691 	mz = mem_cgroup_nodeinfo(memcg, 0);
692 	if (!mz)
693 		return;
694 
695 	lruvec = &mz->lruvec;
696 	if (!lruvec)
697 		return;
698 
699 	anon = lruvec_lru_size(lruvec, LRU_ACTIVE_ANON, MAX_NR_ZONES) +
700 		lruvec_lru_size(lruvec, LRU_INACTIVE_ANON, MAX_NR_ZONES);
701 	file = lruvec_lru_size(lruvec, LRU_ACTIVE_FILE, MAX_NR_ZONES) +
702 		lruvec_lru_size(lruvec, LRU_INACTIVE_FILE, MAX_NR_ZONES);
703 	zram = memcg_data_size(memcg, CACHE_SIZE) / SZ_1K;
704 	eswap = memcg_data_size(memcg, SWAP_SIZE) / SZ_1K;
705 	anon *= PAGE_SIZE / SZ_1K;
706 	file *= PAGE_SIZE / SZ_1K;
707 	seq_printf(m, "Anon:\t%12lu kB\nFile:\t%12lu kB\nzram:\t%12lu kB\nEswap:\t%12lu kB\n",
708 		anon, file, zram, eswap);
709 }
710 #endif
711 
712 static struct cftype zswapd_policy_files[] = {
713 	{
714 		.name = "active_app_info_list",
715 		.flags = CFTYPE_ONLY_ON_ROOT,
716 		.seq_show = memcg_active_app_info_list_show,
717 	},
718 	{
719 		.name = "zram_wm_ratio",
720 		.flags = CFTYPE_ONLY_ON_ROOT,
721 		.write = mem_cgroup_zram_wm_ratio_write,
722 #ifdef CONFIG_HYPERHOLD_DEBUG
723 		.seq_show = zram_wm_ratio_show,
724 #endif
725 	},
726 	{
727 		.name = "compress_ratio",
728 		.flags = CFTYPE_ONLY_ON_ROOT,
729 		.write = mem_cgroup_compress_ratio_write,
730 #ifdef CONFIG_HYPERHOLD_DEBUG
731 		.seq_show = compress_ratio_show,
732 #endif
733 	},
734 	{
735 		.name = "zswapd_pressure",
736 		.flags = CFTYPE_ONLY_ON_ROOT,
737 		.write = zswapd_pressure_event_control,
738 	},
739 	{
740 		.name = "zswapd_pid",
741 		.flags = CFTYPE_ONLY_ON_ROOT,
742 		.read_u64 = zswapd_pid_read,
743 	},
744 	{
745 		.name = "avail_buffers",
746 		.flags = CFTYPE_ONLY_ON_ROOT,
747 		.write = avail_buffers_params_write,
748 #ifdef CONFIG_HYPERHOLD_DEBUG
749 		.seq_show = avail_buffers_params_show,
750 #endif
751 	},
752 	{
753 		.name = "zswapd_max_reclaim_size",
754 		.flags = CFTYPE_ONLY_ON_ROOT,
755 		.write = zswapd_max_reclaim_size_write,
756 #ifdef CONFIG_HYPERHOLD_DEBUG
757 		.seq_show = zswapd_max_reclaim_size_show,
758 #endif
759 	},
760 	{
761 		.name = "area_anon_refault_threshold",
762 		.flags = CFTYPE_ONLY_ON_ROOT,
763 		.write_u64 = area_anon_refault_threshold_write,
764 #ifdef CONFIG_HYPERHOLD_DEBUG
765 		.read_u64 = area_anon_refault_threshold_read,
766 #endif
767 	},
768 	{
769 		.name = "empty_round_skip_interval",
770 		.flags = CFTYPE_ONLY_ON_ROOT,
771 		.write_u64 = empty_round_skip_interval_write,
772 #ifdef CONFIG_HYPERHOLD_DEBUG
773 		.read_u64 = empty_round_skip_interval_read,
774 #endif
775 	},
776 	{
777 		.name = "max_skip_interval",
778 		.flags = CFTYPE_ONLY_ON_ROOT,
779 		.write_u64 = max_skip_interval_write,
780 #ifdef CONFIG_HYPERHOLD_DEBUG
781 		.read_u64 = max_skip_interval_read,
782 #endif
783 	},
784 	{
785 		.name = "empty_round_check_threshold",
786 		.flags = CFTYPE_ONLY_ON_ROOT,
787 		.write_u64 = empty_round_check_threshold_write,
788 #ifdef CONFIG_HYPERHOLD_DEBUG
789 		.read_u64 = empty_round_check_threshold_read,
790 #endif
791 	},
792 	{
793 		.name = "anon_refault_snapshot_min_interval",
794 		.flags = CFTYPE_ONLY_ON_ROOT,
795 		.write_u64 = anon_refault_snapshot_min_interval_write,
796 #ifdef CONFIG_HYPERHOLD_DEBUG
797 		.read_u64 = anon_refault_snapshot_min_interval_read,
798 #endif
799 	},
800 	{
801 		.name = "zswapd_memcgs_param",
802 		.flags = CFTYPE_ONLY_ON_ROOT,
803 		.write = zswapd_memcgs_param_write,
804 #ifdef CONFIG_HYPERHOLD_DEBUG
805 		.seq_show = zswapd_memcgs_param_show,
806 #endif
807 	},
808 	{
809 		.name = "zswapd_single_memcg_param",
810 		.write = zswapd_single_memcg_param_write,
811 #ifdef CONFIG_HYPERHOLD_DEBUG
812 		.seq_show = zswapd_single_memcg_param_show,
813 #endif
814 	},
815 	{
816 		.name = "buffer_ratio_params",
817 		.flags = CFTYPE_ONLY_ON_ROOT,
818 		.write = buffers_ratio_params_write,
819 #ifdef CONFIG_HYPERHOLD_DEBUG
820 		.seq_show = buffers_ratio_params_show,
821 #endif
822 	},
823 	{
824 		.name = "zswapd_pressure_show",
825 		.flags = CFTYPE_ONLY_ON_ROOT,
826 		.seq_show = zswapd_pressure_show,
827 	},
828 	{
829 		.name = "zram_critical_threshold",
830 		.flags = CFTYPE_ONLY_ON_ROOT,
831 		.write_u64 = zram_critical_thres_write,
832 #ifdef CONFIG_HYPERHOLD_DEBUG
833 		.read_u64 = zram_critical_threshold_read,
834 #endif
835 	},
836 
837 #ifdef CONFIG_HYPERHOLD_DEBUG
838 	{
839 		.name = "zswapd_vmstat_show",
840 		.flags = CFTYPE_ONLY_ON_ROOT,
841 		.seq_show = zswapd_vmstat_show,
842 	},
843 #endif
844 	{
845 		.name = "eswap_info",
846 		.flags = CFTYPE_ONLY_ON_ROOT,
847 		.seq_show = eswap_info_show,
848 	},
849 
850 	{ },	/* terminate */
851 };
852 
zswapd_policy_init(void)853 static int __init zswapd_policy_init(void)
854 {
855 	if (!mem_cgroup_disabled())
856 		WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, zswapd_policy_files));
857 
858 	return 0;
859 }
860 subsys_initcall(zswapd_policy_init);
861