1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/block/zram/zram_group/group_writeback.c
4  *
5  * Copyright (c) 2020-2022 Huawei Technologies Co., Ltd.
6  */
7 
8 #include <linux/mm.h>
9 #include <linux/memcontrol.h>
10 #include <linux/blk_types.h>
11 #include <linux/zswapd.h>
12 
13 #include "../zram_drv.h"
14 #include "zram_group.h"
15 
16 #ifdef CONFIG_HYPERHOLD
17 #include "hyperhold.h"
18 #endif
19 
20 #define CHECK(cond, ...) ((cond) || (pr_err(__VA_ARGS__), false))
21 #define CHECK_BOUND(var, min, max) \
22 	CHECK((var) >= (min) && (var) <= (max), \
23 			"%s %u out of bounds %u ~ %u!\n", \
24 			#var, (var), (min), (max))
25 
zram_get_memcg_id(struct zram *zram, u32 index)26 static u16 zram_get_memcg_id(struct zram *zram, u32 index)
27 {
28 	return (zram->table[index].flags & ZRAM_GRPID_MASK) >> ZRAM_SIZE_SHIFT;
29 }
30 
zram_set_memcg_id(struct zram *zram, u32 index, u16 gid)31 static void zram_set_memcg_id(struct zram *zram, u32 index, u16 gid)
32 {
33 	unsigned long old = zram->table[index].flags & (~ZRAM_GRPID_MASK);
34 
35 	zram->table[index].flags = old | ((u64)gid << ZRAM_SIZE_SHIFT);
36 }
37 
38 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
obj_can_wb(struct zram *zram, u32 index, u16 gid)39 static bool obj_can_wb(struct zram *zram, u32 index, u16 gid)
40 {
41 	/* overwrited obj, just skip */
42 	if (zram_get_memcg_id(zram, index) != gid) {
43 		pr_debug("obj %u is from group %u instead of group %u.\n",
44 				index, zram_get_memcg_id(zram, index), gid);
45 		return false;
46 	}
47 	if (!zgrp_obj_is_isolated(zram->zgrp, index)) {
48 		pr_debug("obj %u is not isolated.\n", index);
49 		return false;
50 	}
51 	/* need not to writeback, put back the obj as HOTEST */
52 	if (zram_test_flag(zram, index, ZRAM_SAME)) {
53 		pr_debug("obj %u is filled with same element.\n", index);
54 		goto insert;
55 	}
56 	if (zram_test_flag(zram, index, ZRAM_WB)) {
57 		pr_debug("obj %u is writeback.\n", index);
58 		goto insert;
59 	}
60 	/* obj is needed by a pagefault req, do not writeback it. */
61 	if (zram_test_flag(zram, index, ZRAM_FAULT)) {
62 		pr_debug("obj %u is needed by a pagefault request.\n", index);
63 		goto insert;
64 	}
65 	/* should never happen */
66 	if (zram_test_flag(zram, index, ZRAM_GWB)) {
67 		pr_debug("obj %u is group writeback.\n", index);
68 		BUG();
69 		return false;
70 	}
71 
72 	return true;
73 insert:
74 	zgrp_obj_insert(zram->zgrp, index, gid);
75 
76 	return false;
77 }
78 
copy_obj(struct hpio *hpio, u32 offset, char *obj, u32 size, bool to)79 static void copy_obj(struct hpio *hpio, u32 offset, char *obj, u32 size, bool to)
80 {
81 	u32 page_id, start;
82 	char *buf = NULL;
83 
84 	page_id = offset / PAGE_SIZE;
85 	start = offset % PAGE_SIZE;
86 	if (size + start <= PAGE_SIZE) {
87 		buf = page_to_virt(hyperhold_io_page(hpio, page_id));
88 		if (to)
89 			memcpy(buf + start, obj, size);
90 		else
91 			memcpy(obj, buf + start, size);
92 
93 		return;
94 	}
95 	buf = page_to_virt(hyperhold_io_page(hpio, page_id));
96 	if (to)
97 		memcpy(buf + start, obj, PAGE_SIZE - start);
98 	else
99 		memcpy(obj, buf + start, PAGE_SIZE - start);
100 	buf = page_to_virt(hyperhold_io_page(hpio, page_id + 1));
101 	if (to)
102 		memcpy(buf, obj + PAGE_SIZE - start, size + start - PAGE_SIZE);
103 	else
104 		memcpy(obj + PAGE_SIZE - start, buf, size + start - PAGE_SIZE);
105 }
106 
move_obj_to_hpio(struct zram *zram, u32 index, u16 gid, struct hpio *hpio, u32 offset)107 static u32 move_obj_to_hpio(struct zram *zram, u32 index, u16 gid,
108 				struct hpio *hpio, u32 offset)
109 {
110 	u32 size = 0;
111 	unsigned long handle;
112 	char *src = NULL;
113 	u32 ext_size;
114 	u32 eid;
115 
116 	eid = hyperhold_io_extent(hpio);
117 	ext_size = hyperhold_extent_size(eid);
118 
119 	zram_slot_lock(zram, index);
120 	if (!obj_can_wb(zram, index, gid))
121 		goto unlock;
122 	size = zram_get_obj_size(zram, index);
123 	/* no space, put back the obj as COLDEST */
124 	if (size + offset > ext_size) {
125 		pr_debug("obj %u size is %u, but ext %u only %u space left.\n",
126 				index, size, eid, ext_size - offset);
127 		zgrp_obj_putback(zram->zgrp, index, gid);
128 		size = 0;
129 		goto unlock;
130 	}
131 	handle = zram_get_handle(zram, index);
132 	src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
133 	copy_obj(hpio, offset, src, size, true);
134 	zs_unmap_object(zram->mem_pool, handle);
135 	zs_free(zram->mem_pool, handle);
136 	zram_set_handle(zram, index, hyperhold_address(eid, offset));
137 	zram_set_flag(zram, index, ZRAM_GWB);
138 	wbgrp_obj_insert(zram->zgrp, index, eid);
139 	wbgrp_obj_stats_inc(zram->zgrp, gid, eid, size);
140 	zgrp_obj_stats_dec(zram->zgrp, gid, size);
141 	pr_debug("move obj %u of group %u to hpio %p of eid %u, size = %u, offset = %u\n",
142 		index, gid, hpio, eid, size, offset);
143 unlock:
144 	zram_slot_unlock(zram, index);
145 
146 	return size;
147 }
148 
move_obj_from_hpio(struct zram *zram, int index, struct hpio *hpio)149 static void move_obj_from_hpio(struct zram *zram, int index, struct hpio *hpio)
150 {
151 	u32 size = 0;
152 	unsigned long handle = 0;
153 	u32 eid, offset;
154 	u64 addr;
155 	char *dst = NULL;
156 	u16 gid;
157 
158 	eid = hyperhold_io_extent(hpio);
159 retry:
160 	zram_slot_lock(zram, index);
161 	if (!zram_test_flag(zram, index, ZRAM_GWB))
162 		goto unlock;
163 	addr = zram_get_handle(zram, index);
164 	if (hyperhold_addr_extent(addr) != eid)
165 		goto unlock;
166 	size = zram_get_obj_size(zram, index);
167 	if (handle)
168 		goto move;
169 	handle = zs_malloc(zram->mem_pool, size, GFP_NOWAIT);
170 	if (handle)
171 		goto move;
172 	zram_slot_unlock(zram, index);
173 	handle = zs_malloc(zram->mem_pool, size, GFP_NOIO | __GFP_NOFAIL);
174 	if (handle)
175 		goto retry;
176 	BUG();
177 
178 	return;
179 move:
180 	offset = hyperhold_addr_offset(addr);
181 	dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
182 	copy_obj(hpio, offset, dst, size, false);
183 	zs_unmap_object(zram->mem_pool, handle);
184 	zram_set_handle(zram, index, handle);
185 	zram_clear_flag(zram, index, ZRAM_GWB);
186 	gid = zram_get_memcg_id(zram, index);
187 	zgrp_obj_insert(zram->zgrp, index, gid);
188 	wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
189 	zgrp_obj_stats_inc(zram->zgrp, gid, size);
190 	pr_debug("move obj %u of group %u from hpio %p of eid %u, size = %u, offset = %u\n",
191 		index, gid, hpio, eid, size, offset);
192 unlock:
193 	zram_slot_unlock(zram, index);
194 }
195 
196 
197 #define NR_ISOLATE 32
move_extent_from_hpio(struct zram *zram, struct hpio *hpio)198 static bool move_extent_from_hpio(struct zram *zram, struct hpio *hpio)
199 {
200 	u32 idxs[NR_ISOLATE];
201 	u32 eid;
202 	u32 nr;
203 	int i;
204 	bool last = false;
205 
206 	eid = hyperhold_io_extent(hpio);
207 repeat:
208 	nr = wbgrp_isolate_objs(zram->zgrp, eid, idxs, NR_ISOLATE, &last);
209 	for (i = 0; i < nr; i++)
210 		move_obj_from_hpio(zram, idxs[i], hpio);
211 	if (last)
212 		return true;
213 	if (nr)
214 		goto repeat;
215 
216 	return false;
217 }
218 
219 struct hpio_priv {
220 	struct zram *zram;
221 	u16 gid;
222 };
223 
write_endio(struct hpio *hpio)224 static void write_endio(struct hpio *hpio)
225 {
226 	struct hpio_priv *priv = hyperhold_io_private(hpio);
227 	struct zram *zram = priv->zram;
228 	u16 gid = priv->gid;
229 	u32 eid = hyperhold_io_extent(hpio);
230 
231 	if (hyperhold_io_success(hpio))
232 		goto out;
233 	if (move_extent_from_hpio(zram, hpio)) {
234 		zgrp_ext_delete(zram->zgrp, eid, gid);
235 		hyperhold_should_free_extent(eid);
236 	}
237 out:
238 	hyperhold_io_complete(hpio);
239 	hyperhold_io_put(hpio);
240 	kfree(priv);
241 }
242 
collect_objs(struct zram *zram, u16 gid, struct hpio *hpio, u32 ext_size)243 static u32 collect_objs(struct zram *zram, u16 gid, struct hpio *hpio, u32 ext_size)
244 {
245 	u32 offset = 0;
246 	u32 last_offset;
247 	u32 nr;
248 	u32 idxs[NR_ISOLATE];
249 	int i;
250 
251 more:
252 	last_offset = offset;
253 	nr = zgrp_isolate_objs(zram->zgrp, gid, idxs, NR_ISOLATE, NULL);
254 	for (i = 0; i < nr; i++)
255 		offset += move_obj_to_hpio(zram, idxs[i], gid, hpio, offset);
256 	pr_debug("%u data attached, offset = %u.\n", offset - last_offset, offset);
257 	if (offset < ext_size && offset != last_offset)
258 		goto more;
259 
260 	return offset;
261 }
262 
write_one_extent(struct zram *zram, u16 gid)263 static u64 write_one_extent(struct zram *zram, u16 gid)
264 {
265 	int eid;
266 	struct hpio *hpio = NULL;
267 	struct hpio_priv *priv = NULL;
268 	u32 size = 0;
269 	int ret;
270 
271 	priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
272 	if (!priv)
273 		return 0;
274 	priv->gid = gid;
275 	priv->zram = zram;
276 	eid = hyperhold_alloc_extent();
277 	if (eid < 0)
278 		goto err;
279 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_WRITE);
280 	if (!hpio)
281 		goto free_extent;
282 
283 	zgrp_get_ext(zram->zgrp, eid);
284 	size = collect_objs(zram, gid, hpio, hyperhold_extent_size(eid));
285 	if (size == 0) {
286 		pr_err("group %u has no data in zram.\n", gid);
287 		zgrp_put_ext(zram->zgrp, eid);
288 		goto put_hpio;
289 	}
290 	zgrp_ext_insert(zram->zgrp, eid, gid);
291 	if (zgrp_put_ext(zram->zgrp, eid)) {
292 		zgrp_ext_delete(zram->zgrp, eid, gid);
293 		hyperhold_should_free_extent(eid);
294 	}
295 
296 	ret = hyperhold_write_async(hpio, write_endio, priv);
297 	if (ret)
298 		goto move_back;
299 
300 	return size;
301 move_back:
302 	if (move_extent_from_hpio(zram, hpio)) {
303 		zgrp_ext_delete(zram->zgrp, eid, gid);
304 		hyperhold_should_free_extent(eid);
305 	}
306 	eid = -EINVAL;
307 put_hpio:
308 	hyperhold_io_put(hpio);
309 free_extent:
310 	if (eid >= 0)
311 		hyperhold_free_extent(eid);
312 err:
313 	kfree(priv);
314 
315 	return 0;
316 }
317 
read_endio(struct hpio *hpio)318 static void read_endio(struct hpio *hpio)
319 {
320 	struct hpio_priv *priv = hyperhold_io_private(hpio);
321 	struct zram *zram = priv->zram;
322 	u16 gid = priv->gid;
323 	u32 eid = hyperhold_io_extent(hpio);
324 
325 	if (!hyperhold_io_success(hpio)) {
326 		BUG();
327 		goto out;
328 	}
329 	if (move_extent_from_hpio(zram, hpio)) {
330 		zgrp_ext_delete(zram->zgrp, eid, gid);
331 		hyperhold_should_free_extent(eid);
332 	}
333 out:
334 	hyperhold_io_complete(hpio);
335 	hyperhold_io_put(hpio);
336 	kfree(priv);
337 }
338 
read_one_extent(struct zram *zram, u32 eid, u16 gid)339 static u64 read_one_extent(struct zram *zram, u32 eid, u16 gid)
340 {
341 	struct hpio *hpio = NULL;
342 	u32 ext_size = 0;
343 	int ret;
344 	struct hpio_priv *priv = NULL;
345 
346 	priv = kmalloc(sizeof(struct hpio_priv), GFP_NOIO);
347 	if (!priv)
348 		goto err;
349 	priv->gid = gid;
350 	priv->zram = zram;
351 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
352 	if (!hpio)
353 		goto err;
354 	ext_size = hyperhold_extent_size(eid);
355 	ret = hyperhold_read_async(hpio, read_endio, priv);
356 	if (ret)
357 		goto err;
358 
359 	return ext_size;
360 err:
361 	hyperhold_io_put(hpio);
362 	kfree(priv);
363 
364 	return 0;
365 }
366 
sync_read_endio(struct hpio *hpio)367 static void sync_read_endio(struct hpio *hpio)
368 {
369 	hyperhold_io_complete(hpio);
370 }
371 
read_one_obj_sync(struct zram *zram, u32 index)372 static int read_one_obj_sync(struct zram *zram, u32 index)
373 {
374 	struct hpio *hpio = NULL;
375 	int ret;
376 	u32 eid;
377 	u16 gid;
378 	u32 size;
379 
380 	if (!zram_test_flag(zram, index, ZRAM_GWB))
381 		return 0;
382 
383 	pr_debug("read obj %u.\n", index);
384 
385 	gid = zram_get_memcg_id(zram, index);
386 	eid = hyperhold_addr_extent(zram_get_handle(zram, index));
387 	size = zram_get_obj_size(zram, index);
388 	wbgrp_fault_stats_inc(zram->zgrp, gid, eid, size);
389 check:
390 	if (!zram_test_flag(zram, index, ZRAM_GWB))
391 		return 0;
392 	if (!zram_test_flag(zram, index, ZRAM_FAULT))
393 		goto read;
394 	zram_slot_unlock(zram, index);
395 	wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
396 	zram_slot_lock(zram, index);
397 	goto check;
398 read:
399 	zram_set_flag(zram, index, ZRAM_FAULT);
400 	zram_slot_unlock(zram, index);
401 
402 	hpio = hyperhold_io_get(eid, GFP_NOIO, REQ_OP_READ);
403 	if (!hpio) {
404 		ret = -ENOMEM;
405 		goto out;
406 	}
407 	ret = hyperhold_read_async(hpio, sync_read_endio, NULL);
408 	/* io submit error */
409 	if (ret && ret != -EAGAIN)
410 		goto out;
411 
412 	hyperhold_io_wait(hpio);
413 
414 	/* if not reset to zero, will return err sometimes and cause SIG_BUS error */
415 	ret = 0;
416 
417 	/* get a write io, data is ready, copy the pages even write failed */
418 	if (op_is_write(hyperhold_io_operate(hpio)))
419 		goto move;
420 	/* read io failed, return -EIO */
421 	if (!hyperhold_io_success(hpio)) {
422 		ret = -EIO;
423 		goto out;
424 	}
425 	/* success, copy the data and free extent */
426 move:
427 	if (move_extent_from_hpio(zram, hpio)) {
428 		zgrp_ext_delete(zram->zgrp, eid, gid);
429 		hyperhold_should_free_extent(eid);
430 	}
431 	move_obj_from_hpio(zram, index, hpio);
432 out:
433 	hyperhold_io_put(hpio);
434 	zram_slot_lock(zram, index);
435 	zram_clear_flag(zram, index, ZRAM_FAULT);
436 	wake_up(&zram->zgrp->wbgrp.fault_wq);
437 
438 	return ret;
439 }
440 
read_group_objs(struct zram *zram, u16 gid, u64 req_size)441 u64 read_group_objs(struct zram *zram, u16 gid, u64 req_size)
442 {
443 	u32 eid;
444 	u64 read_size = 0;
445 	u32 nr;
446 
447 	if (!(zram->zgrp)) {
448 		pr_debug("zram group is not enable!\n");
449 		return 0;
450 	}
451 	if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
452 		return 0;
453 
454 	pr_debug("read %llu data of group %u.\n", req_size, gid);
455 
456 	while (!req_size || req_size > read_size) {
457 		nr = zgrp_isolate_exts(zram->zgrp, gid, &eid, 1, NULL);
458 		if (!nr)
459 			break;
460 		read_size += read_one_extent(zram, eid, gid);
461 	}
462 
463 	return read_size;
464 }
465 
write_group_objs(struct zram *zram, u16 gid, u64 req_size)466 u64 write_group_objs(struct zram *zram, u16 gid, u64 req_size)
467 {
468 	u64 write_size = 0;
469 	u64 size = 0;
470 
471 	if (!(zram->zgrp)) {
472 		pr_debug("zram group is not enable!\n");
473 		return 0;
474 	}
475 	if (!CHECK(zram->zgrp->wbgrp.enable, "zram group writeback is not enable!\n"))
476 		return 0;
477 	if (!CHECK_BOUND(gid, 1, zram->zgrp->nr_grp - 1))
478 		return 0;
479 
480 	pr_debug("write %llu data of group %u.\n", req_size, gid);
481 
482 	while (!req_size || req_size > write_size) {
483 		size = write_one_extent(zram, gid);
484 		if (!size)
485 			break;
486 		write_size += size;
487 	}
488 
489 	atomic64_add(write_size, &zram->zgrp->stats[0].write_size);
490 	atomic64_add(write_size, &zram->zgrp->stats[gid].write_size);
491 	return write_size;
492 }
493 #endif
494 
495 #ifdef CONFIG_ZRAM_GROUP_DEBUG
496 #include <linux/random.h>
497 #define ZGRP_TEST_MAX_GRP 101
498 #endif
499 
zram_group_fault_obj(struct zram *zram, u32 index)500 int zram_group_fault_obj(struct zram *zram, u32 index)
501 {
502 	u16 gid;
503 	u32 size;
504 
505 	if (!(zram->zgrp)) {
506 		pr_debug("zram group is not enable!\n");
507 		return 0;
508 	}
509 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
510 		return 0;
511 
512 	gid = zram_get_memcg_id(zram, index);
513 	size = zram_get_obj_size(zram, index);
514 	zgrp_fault_stats_inc(zram->zgrp, gid, size);
515 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
516 	return read_one_obj_sync(zram, index);
517 #else
518 	return 0;
519 #endif
520 }
521 
zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg)522 void zram_group_track_obj(struct zram *zram, u32 index, struct mem_cgroup *memcg)
523 {
524 	u16 gid;
525 
526 	if (!(zram->zgrp)) {
527 		pr_debug("zram group is not enable!\n");
528 		return;
529 	}
530 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
531 		return;
532 	if (!CHECK(memcg || !memcg->id.id, "obj %u has no memcg!\n", index))
533 		return;
534 	gid = zram_get_memcg_id(zram, index);
535 	if (!CHECK(!gid, "obj %u has gid %u.\n", index, gid))
536 		BUG();
537 
538 	gid = memcg->id.id;
539 	zram_set_memcg_id(zram, index, gid);
540 	zgrp_obj_insert(zram->zgrp, index, gid);
541 	zgrp_obj_stats_inc(zram->zgrp, gid, zram_get_obj_size(zram, index));
542 }
543 
zram_group_untrack_obj(struct zram *zram, u32 index)544 void zram_group_untrack_obj(struct zram *zram, u32 index)
545 {
546 	u16 gid;
547 	u32 size;
548 
549 	if (!(zram->zgrp)) {
550 		pr_debug("zram group is not enable!\n");
551 		return;
552 	}
553 	if (!CHECK_BOUND(index, 0, zram->zgrp->nr_obj - 1))
554 		return;
555 
556 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
557 check:
558 	if (!zram_test_flag(zram, index, ZRAM_FAULT))
559 		goto clear;
560 	zram_slot_unlock(zram, index);
561 	wait_event(zram->zgrp->wbgrp.fault_wq, !zram_test_flag(zram, index, ZRAM_FAULT));
562 	zram_slot_lock(zram, index);
563 	goto check;
564 clear:
565 #endif
566 	gid = zram_get_memcg_id(zram, index);
567 	size = zram_get_obj_size(zram, index);
568 	if (!gid)
569 		return;
570 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
571 	if (zram_test_flag(zram, index, ZRAM_GWB)) {
572 		u32 eid = hyperhold_addr_extent(zram_get_handle(zram, index));
573 
574 		if (wbgrp_obj_delete(zram->zgrp, index, eid)) {
575 			zgrp_ext_delete(zram->zgrp, eid, gid);
576 			hyperhold_should_free_extent(eid);
577 		}
578 		zram_clear_flag(zram, index, ZRAM_GWB);
579 		zram_set_memcg_id(zram, index, 0);
580 		wbgrp_obj_stats_dec(zram->zgrp, gid, eid, size);
581 		zram_set_handle(zram, index, 0);
582 		return;
583 	}
584 #endif
585 	zgrp_obj_delete(zram->zgrp, index, gid);
586 	zram_set_memcg_id(zram, index, 0);
587 	zgrp_obj_stats_dec(zram->zgrp, gid, size);
588 }
589 
590 #ifdef CONFIG_ZRAM_GROUP_DEBUG
group_debug(struct zram *zram, u32 op, u32 index, u32 gid)591 void group_debug(struct zram *zram, u32 op, u32 index, u32 gid)
592 {
593 	if (op == 0)
594 		zram_group_dump(zram->zgrp, gid, index);
595 
596 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
597 	if (op == 22)
598 		read_group_objs(zram, gid, index);
599 	if (op == 23)
600 		write_group_objs(zram, gid, index);
601 	if (op == 20) {
602 		if (index)
603 			zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
604 		else
605 			zram_group_remove_writeback(zram->zgrp);
606 	}
607 #endif
608 }
609 #endif
610 
group_obj_stats(struct zram *zram, u16 gid, int type)611 static u64 group_obj_stats(struct zram *zram, u16 gid, int type)
612 {
613 	if (!(zram->zgrp)) {
614 		pr_debug("zram group is not enable!\n");
615 		return 0;
616 	}
617 	if (!CHECK_BOUND(gid, 0, zram->zgrp->nr_grp - 1))
618 		return 0;
619 
620 	if (type == CACHE_SIZE)
621 		return atomic64_read(&zram->zgrp->stats[gid].zram_size);
622 	else if (type == CACHE_PAGE)
623 		return atomic_read(&zram->zgrp->stats[gid].zram_pages);
624 	else if (type == CACHE_FAULT)
625 		return atomic64_read(&zram->zgrp->stats[gid].zram_fault);
626 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
627 	else if (type == SWAP_SIZE)
628 		return atomic64_read(&zram->zgrp->stats[gid].wb_size);
629 	else if (type == SWAP_PAGE)
630 		return atomic_read(&zram->zgrp->stats[gid].wb_pages);
631 	else if (type == READ_SIZE)
632 		return atomic64_read(&zram->zgrp->stats[gid].read_size);
633 	else if (type == WRITE_SIZE)
634 		return atomic64_read(&zram->zgrp->stats[gid].write_size);
635 	else if (type == SWAP_FAULT)
636 		return atomic64_read(&zram->zgrp->stats[gid].wb_fault);
637 	BUG();
638 #endif
639 
640 	return 0;
641 }
642 
643 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
zram_group_read(u16 gid, u64 req_size, void *priv)644 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
645 {
646 	if (!CHECK(priv, "priv is NULL!\n"))
647 		return 0;
648 
649 	return read_group_objs((struct zram *)priv, gid, req_size);
650 }
651 
zram_group_write(u16 gid, u64 req_size, void *priv)652 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
653 {
654 	if (!CHECK(priv, "priv is NULL!\n"))
655 		return 0;
656 
657 	return write_group_objs((struct zram *)priv, gid, req_size);
658 }
659 #else
zram_group_read(u16 gid, u64 req_size, void *priv)660 static u64 zram_group_read(u16 gid, u64 req_size, void *priv)
661 {
662 	return 0;
663 }
zram_group_write(u16 gid, u64 req_size, void *priv)664 static u64 zram_group_write(u16 gid, u64 req_size, void *priv)
665 {
666 	return 0;
667 }
668 #endif
669 
670 
zram_group_data_size(u16 gid, int type, void *priv)671 static u64 zram_group_data_size(u16 gid, int type, void *priv)
672 {
673 	if (!CHECK(priv, "priv is NULL!\n"))
674 		return 0;
675 
676 	return group_obj_stats((struct zram *)priv, gid, type);
677 }
678 
679 struct group_swap_ops zram_group_ops = {
680 	.group_read = zram_group_read,
681 	.group_write = zram_group_write,
682 	.group_data_size = zram_group_data_size,
683 };
684 
register_zram_group(struct zram *zram)685 static int register_zram_group(struct zram *zram)
686 {
687 	if (!CHECK(zram, "zram is NULL!\n"))
688 		return -EINVAL;
689 	if (!(zram->zgrp)) {
690 		pr_debug("zram group is not enable!\n");
691 		return -EINVAL;
692 	}
693 
694 	zram->zgrp->gsdev = register_group_swap(&zram_group_ops, zram);
695 	if (!zram->zgrp->gsdev) {
696 		pr_err("register zram group failed!\n");
697 		return -ENOMEM;
698 	}
699 
700 	return 0;
701 }
702 
unregister_zram_group(struct zram *zram)703 static void unregister_zram_group(struct zram *zram)
704 {
705 	if (!CHECK(zram, "zram is NULL!\n"))
706 		return;
707 	if (!(zram->zgrp)) {
708 		pr_debug("zram group is not enable!\n");
709 		return;
710 	}
711 
712 	unregister_group_swap(zram->zgrp->gsdev);
713 	zram->zgrp->gsdev = NULL;
714 }
715 
zram_group_init(struct zram *zram, u32 nr_obj)716 void zram_group_init(struct zram *zram, u32 nr_obj)
717 {
718 	unsigned int ctrl = zram->zgrp_ctrl;
719 
720 	if (ctrl == ZGRP_NONE)
721 		return;
722 	zram->zgrp = zram_group_meta_alloc(nr_obj, ZGRP_MAX_GRP - 1);
723 #ifdef CONFIG_ZRAM_GROUP_WRITEBACK
724 	if (ctrl == ZGRP_WRITE)
725 		zram_group_apply_writeback(zram->zgrp, hyperhold_nr_extent());
726 #endif
727 	register_zram_group(zram);
728 }
729 
zram_group_deinit(struct zram *zram)730 void zram_group_deinit(struct zram *zram)
731 {
732 	unregister_zram_group(zram);
733 	zram_group_meta_free(zram->zgrp);
734 	zram->zgrp = NULL;
735 }
736