xref: /kernel/linux/linux-6.6/fs/btrfs/compression.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2008 Oracle.  All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/bio.h>
8#include <linux/file.h>
9#include <linux/fs.h>
10#include <linux/pagemap.h>
11#include <linux/pagevec.h>
12#include <linux/highmem.h>
13#include <linux/kthread.h>
14#include <linux/time.h>
15#include <linux/init.h>
16#include <linux/string.h>
17#include <linux/backing-dev.h>
18#include <linux/writeback.h>
19#include <linux/psi.h>
20#include <linux/slab.h>
21#include <linux/sched/mm.h>
22#include <linux/log2.h>
23#include <crypto/hash.h>
24#include "misc.h"
25#include "ctree.h"
26#include "fs.h"
27#include "disk-io.h"
28#include "transaction.h"
29#include "btrfs_inode.h"
30#include "bio.h"
31#include "ordered-data.h"
32#include "compression.h"
33#include "extent_io.h"
34#include "extent_map.h"
35#include "subpage.h"
36#include "zoned.h"
37#include "file-item.h"
38#include "super.h"
39
40static struct bio_set btrfs_compressed_bioset;
41
42static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
43
44const char* btrfs_compress_type2str(enum btrfs_compression_type type)
45{
46	switch (type) {
47	case BTRFS_COMPRESS_ZLIB:
48	case BTRFS_COMPRESS_LZO:
49	case BTRFS_COMPRESS_ZSTD:
50	case BTRFS_COMPRESS_NONE:
51		return btrfs_compress_types[type];
52	default:
53		break;
54	}
55
56	return NULL;
57}
58
59static inline struct compressed_bio *to_compressed_bio(struct btrfs_bio *bbio)
60{
61	return container_of(bbio, struct compressed_bio, bbio);
62}
63
64static struct compressed_bio *alloc_compressed_bio(struct btrfs_inode *inode,
65						   u64 start, blk_opf_t op,
66						   btrfs_bio_end_io_t end_io)
67{
68	struct btrfs_bio *bbio;
69
70	bbio = btrfs_bio(bio_alloc_bioset(NULL, BTRFS_MAX_COMPRESSED_PAGES, op,
71					  GFP_NOFS, &btrfs_compressed_bioset));
72	btrfs_bio_init(bbio, inode->root->fs_info, end_io, NULL);
73	bbio->inode = inode;
74	bbio->file_offset = start;
75	return to_compressed_bio(bbio);
76}
77
78bool btrfs_compress_is_valid_type(const char *str, size_t len)
79{
80	int i;
81
82	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
83		size_t comp_len = strlen(btrfs_compress_types[i]);
84
85		if (len < comp_len)
86			continue;
87
88		if (!strncmp(btrfs_compress_types[i], str, comp_len))
89			return true;
90	}
91	return false;
92}
93
94static int compression_compress_pages(int type, struct list_head *ws,
95               struct address_space *mapping, u64 start, struct page **pages,
96               unsigned long *out_pages, unsigned long *total_in,
97               unsigned long *total_out)
98{
99	switch (type) {
100	case BTRFS_COMPRESS_ZLIB:
101		return zlib_compress_pages(ws, mapping, start, pages,
102				out_pages, total_in, total_out);
103	case BTRFS_COMPRESS_LZO:
104		return lzo_compress_pages(ws, mapping, start, pages,
105				out_pages, total_in, total_out);
106	case BTRFS_COMPRESS_ZSTD:
107		return zstd_compress_pages(ws, mapping, start, pages,
108				out_pages, total_in, total_out);
109	case BTRFS_COMPRESS_NONE:
110	default:
111		/*
112		 * This can happen when compression races with remount setting
113		 * it to 'no compress', while caller doesn't call
114		 * inode_need_compress() to check if we really need to
115		 * compress.
116		 *
117		 * Not a big deal, just need to inform caller that we
118		 * haven't allocated any pages yet.
119		 */
120		*out_pages = 0;
121		return -E2BIG;
122	}
123}
124
125static int compression_decompress_bio(struct list_head *ws,
126				      struct compressed_bio *cb)
127{
128	switch (cb->compress_type) {
129	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
130	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
131	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
132	case BTRFS_COMPRESS_NONE:
133	default:
134		/*
135		 * This can't happen, the type is validated several times
136		 * before we get here.
137		 */
138		BUG();
139	}
140}
141
142static int compression_decompress(int type, struct list_head *ws,
143               const u8 *data_in, struct page *dest_page,
144               unsigned long start_byte, size_t srclen, size_t destlen)
145{
146	switch (type) {
147	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
148						start_byte, srclen, destlen);
149	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
150						start_byte, srclen, destlen);
151	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
152						start_byte, srclen, destlen);
153	case BTRFS_COMPRESS_NONE:
154	default:
155		/*
156		 * This can't happen, the type is validated several times
157		 * before we get here.
158		 */
159		BUG();
160	}
161}
162
163static void btrfs_free_compressed_pages(struct compressed_bio *cb)
164{
165	for (unsigned int i = 0; i < cb->nr_pages; i++)
166		put_page(cb->compressed_pages[i]);
167	kfree(cb->compressed_pages);
168}
169
170static int btrfs_decompress_bio(struct compressed_bio *cb);
171
172static void end_compressed_bio_read(struct btrfs_bio *bbio)
173{
174	struct compressed_bio *cb = to_compressed_bio(bbio);
175	blk_status_t status = bbio->bio.bi_status;
176
177	if (!status)
178		status = errno_to_blk_status(btrfs_decompress_bio(cb));
179
180	btrfs_free_compressed_pages(cb);
181	btrfs_bio_end_io(cb->orig_bbio, status);
182	bio_put(&bbio->bio);
183}
184
185/*
186 * Clear the writeback bits on all of the file
187 * pages for a compressed write
188 */
189static noinline void end_compressed_writeback(const struct compressed_bio *cb)
190{
191	struct inode *inode = &cb->bbio.inode->vfs_inode;
192	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
193	unsigned long index = cb->start >> PAGE_SHIFT;
194	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
195	struct folio_batch fbatch;
196	const int errno = blk_status_to_errno(cb->bbio.bio.bi_status);
197	int i;
198	int ret;
199
200	if (errno)
201		mapping_set_error(inode->i_mapping, errno);
202
203	folio_batch_init(&fbatch);
204	while (index <= end_index) {
205		ret = filemap_get_folios(inode->i_mapping, &index, end_index,
206				&fbatch);
207
208		if (ret == 0)
209			return;
210
211		for (i = 0; i < ret; i++) {
212			struct folio *folio = fbatch.folios[i];
213
214			btrfs_page_clamp_clear_writeback(fs_info, &folio->page,
215							 cb->start, cb->len);
216		}
217		folio_batch_release(&fbatch);
218	}
219	/* the inode may be gone now */
220}
221
222static void btrfs_finish_compressed_write_work(struct work_struct *work)
223{
224	struct compressed_bio *cb =
225		container_of(work, struct compressed_bio, write_end_work);
226
227	btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len,
228				    cb->bbio.bio.bi_status == BLK_STS_OK);
229
230	if (cb->writeback)
231		end_compressed_writeback(cb);
232	/* Note, our inode could be gone now */
233
234	btrfs_free_compressed_pages(cb);
235	bio_put(&cb->bbio.bio);
236}
237
238/*
239 * Do the cleanup once all the compressed pages hit the disk.  This will clear
240 * writeback on the file pages and free the compressed pages.
241 *
242 * This also calls the writeback end hooks for the file pages so that metadata
243 * and checksums can be updated in the file.
244 */
245static void end_compressed_bio_write(struct btrfs_bio *bbio)
246{
247	struct compressed_bio *cb = to_compressed_bio(bbio);
248	struct btrfs_fs_info *fs_info = bbio->inode->root->fs_info;
249
250	queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
251}
252
253static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb)
254{
255	struct bio *bio = &cb->bbio.bio;
256	u32 offset = 0;
257
258	while (offset < cb->compressed_len) {
259		u32 len = min_t(u32, cb->compressed_len - offset, PAGE_SIZE);
260
261		/* Maximum compressed extent is smaller than bio size limit. */
262		__bio_add_page(bio, cb->compressed_pages[offset >> PAGE_SHIFT],
263			       len, 0);
264		offset += len;
265	}
266}
267
268/*
269 * worker function to build and submit bios for previously compressed pages.
270 * The corresponding pages in the inode should be marked for writeback
271 * and the compressed pages should have a reference on them for dropping
272 * when the IO is complete.
273 *
274 * This also checksums the file bytes and gets things ready for
275 * the end io hooks.
276 */
277void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered,
278				   struct page **compressed_pages,
279				   unsigned int nr_pages,
280				   blk_opf_t write_flags,
281				   bool writeback)
282{
283	struct btrfs_inode *inode = BTRFS_I(ordered->inode);
284	struct btrfs_fs_info *fs_info = inode->root->fs_info;
285	struct compressed_bio *cb;
286
287	ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize));
288	ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize));
289
290	cb = alloc_compressed_bio(inode, ordered->file_offset,
291				  REQ_OP_WRITE | write_flags,
292				  end_compressed_bio_write);
293	cb->start = ordered->file_offset;
294	cb->len = ordered->num_bytes;
295	cb->compressed_pages = compressed_pages;
296	cb->compressed_len = ordered->disk_num_bytes;
297	cb->writeback = writeback;
298	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
299	cb->nr_pages = nr_pages;
300	cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT;
301	cb->bbio.ordered = ordered;
302	btrfs_add_compressed_bio_pages(cb);
303
304	btrfs_submit_bio(&cb->bbio, 0);
305}
306
307/*
308 * Add extra pages in the same compressed file extent so that we don't need to
309 * re-read the same extent again and again.
310 *
311 * NOTE: this won't work well for subpage, as for subpage read, we lock the
312 * full page then submit bio for each compressed/regular extents.
313 *
314 * This means, if we have several sectors in the same page points to the same
315 * on-disk compressed data, we will re-read the same extent many times and
316 * this function can only help for the next page.
317 */
318static noinline int add_ra_bio_pages(struct inode *inode,
319				     u64 compressed_end,
320				     struct compressed_bio *cb,
321				     int *memstall, unsigned long *pflags)
322{
323	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
324	unsigned long end_index;
325	struct bio *orig_bio = &cb->orig_bbio->bio;
326	u64 cur = cb->orig_bbio->file_offset + orig_bio->bi_iter.bi_size;
327	u64 isize = i_size_read(inode);
328	int ret;
329	struct page *page;
330	struct extent_map *em;
331	struct address_space *mapping = inode->i_mapping;
332	struct extent_map_tree *em_tree;
333	struct extent_io_tree *tree;
334	int sectors_missed = 0;
335
336	em_tree = &BTRFS_I(inode)->extent_tree;
337	tree = &BTRFS_I(inode)->io_tree;
338
339	if (isize == 0)
340		return 0;
341
342	/*
343	 * For current subpage support, we only support 64K page size,
344	 * which means maximum compressed extent size (128K) is just 2x page
345	 * size.
346	 * This makes readahead less effective, so here disable readahead for
347	 * subpage for now, until full compressed write is supported.
348	 */
349	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
350		return 0;
351
352	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
353
354	while (cur < compressed_end) {
355		u64 page_end;
356		u64 pg_index = cur >> PAGE_SHIFT;
357		u32 add_size;
358
359		if (pg_index > end_index)
360			break;
361
362		page = xa_load(&mapping->i_pages, pg_index);
363		if (page && !xa_is_value(page)) {
364			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
365					  fs_info->sectorsize_bits;
366
367			/* Beyond threshold, no need to continue */
368			if (sectors_missed > 4)
369				break;
370
371			/*
372			 * Jump to next page start as we already have page for
373			 * current offset.
374			 */
375			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
376			continue;
377		}
378
379		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
380								 ~__GFP_FS));
381		if (!page)
382			break;
383
384		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
385			put_page(page);
386			/* There is already a page, skip to page end */
387			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
388			continue;
389		}
390
391		if (!*memstall && PageWorkingset(page)) {
392			psi_memstall_enter(pflags);
393			*memstall = 1;
394		}
395
396		ret = set_page_extent_mapped(page);
397		if (ret < 0) {
398			unlock_page(page);
399			put_page(page);
400			break;
401		}
402
403		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
404		lock_extent(tree, cur, page_end, NULL);
405		read_lock(&em_tree->lock);
406		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
407		read_unlock(&em_tree->lock);
408
409		/*
410		 * At this point, we have a locked page in the page cache for
411		 * these bytes in the file.  But, we have to make sure they map
412		 * to this compressed extent on disk.
413		 */
414		if (!em || cur < em->start ||
415		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
416		    (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) {
417			free_extent_map(em);
418			unlock_extent(tree, cur, page_end, NULL);
419			unlock_page(page);
420			put_page(page);
421			break;
422		}
423		free_extent_map(em);
424
425		if (page->index == end_index) {
426			size_t zero_offset = offset_in_page(isize);
427
428			if (zero_offset) {
429				int zeros;
430				zeros = PAGE_SIZE - zero_offset;
431				memzero_page(page, zero_offset, zeros);
432			}
433		}
434
435		add_size = min(em->start + em->len, page_end + 1) - cur;
436		ret = bio_add_page(orig_bio, page, add_size, offset_in_page(cur));
437		if (ret != add_size) {
438			unlock_extent(tree, cur, page_end, NULL);
439			unlock_page(page);
440			put_page(page);
441			break;
442		}
443		/*
444		 * If it's subpage, we also need to increase its
445		 * subpage::readers number, as at endio we will decrease
446		 * subpage::readers and to unlock the page.
447		 */
448		if (fs_info->sectorsize < PAGE_SIZE)
449			btrfs_subpage_start_reader(fs_info, page, cur, add_size);
450		put_page(page);
451		cur += add_size;
452	}
453	return 0;
454}
455
456/*
457 * for a compressed read, the bio we get passed has all the inode pages
458 * in it.  We don't actually do IO on those pages but allocate new ones
459 * to hold the compressed pages on disk.
460 *
461 * bio->bi_iter.bi_sector points to the compressed extent on disk
462 * bio->bi_io_vec points to all of the inode pages
463 *
464 * After the compressed pages are read, we copy the bytes into the
465 * bio we were passed and then call the bio end_io calls
466 */
467void btrfs_submit_compressed_read(struct btrfs_bio *bbio)
468{
469	struct btrfs_inode *inode = bbio->inode;
470	struct btrfs_fs_info *fs_info = inode->root->fs_info;
471	struct extent_map_tree *em_tree = &inode->extent_tree;
472	struct compressed_bio *cb;
473	unsigned int compressed_len;
474	u64 file_offset = bbio->file_offset;
475	u64 em_len;
476	u64 em_start;
477	struct extent_map *em;
478	unsigned long pflags;
479	int memstall = 0;
480	blk_status_t ret;
481	int ret2;
482
483	/* we need the actual starting offset of this extent in the file */
484	read_lock(&em_tree->lock);
485	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
486	read_unlock(&em_tree->lock);
487	if (!em) {
488		ret = BLK_STS_IOERR;
489		goto out;
490	}
491
492	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
493	compressed_len = em->block_len;
494
495	cb = alloc_compressed_bio(inode, file_offset, REQ_OP_READ,
496				  end_compressed_bio_read);
497
498	cb->start = em->orig_start;
499	em_len = em->len;
500	em_start = em->start;
501
502	cb->len = bbio->bio.bi_iter.bi_size;
503	cb->compressed_len = compressed_len;
504	cb->compress_type = em->compress_type;
505	cb->orig_bbio = bbio;
506
507	free_extent_map(em);
508
509	cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
510	cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
511	if (!cb->compressed_pages) {
512		ret = BLK_STS_RESOURCE;
513		goto out_free_bio;
514	}
515
516	ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
517	if (ret2) {
518		ret = BLK_STS_RESOURCE;
519		goto out_free_compressed_pages;
520	}
521
522	add_ra_bio_pages(&inode->vfs_inode, em_start + em_len, cb, &memstall,
523			 &pflags);
524
525	/* include any pages we added in add_ra-bio_pages */
526	cb->len = bbio->bio.bi_iter.bi_size;
527	cb->bbio.bio.bi_iter.bi_sector = bbio->bio.bi_iter.bi_sector;
528	btrfs_add_compressed_bio_pages(cb);
529
530	if (memstall)
531		psi_memstall_leave(&pflags);
532
533	btrfs_submit_bio(&cb->bbio, 0);
534	return;
535
536out_free_compressed_pages:
537	kfree(cb->compressed_pages);
538out_free_bio:
539	bio_put(&cb->bbio.bio);
540out:
541	btrfs_bio_end_io(bbio, ret);
542}
543
544/*
545 * Heuristic uses systematic sampling to collect data from the input data
546 * range, the logic can be tuned by the following constants:
547 *
548 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
549 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
550 */
551#define SAMPLING_READ_SIZE	(16)
552#define SAMPLING_INTERVAL	(256)
553
554/*
555 * For statistical analysis of the input data we consider bytes that form a
556 * Galois Field of 256 objects. Each object has an attribute count, ie. how
557 * many times the object appeared in the sample.
558 */
559#define BUCKET_SIZE		(256)
560
561/*
562 * The size of the sample is based on a statistical sampling rule of thumb.
563 * The common way is to perform sampling tests as long as the number of
564 * elements in each cell is at least 5.
565 *
566 * Instead of 5, we choose 32 to obtain more accurate results.
567 * If the data contain the maximum number of symbols, which is 256, we obtain a
568 * sample size bound by 8192.
569 *
570 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
571 * from up to 512 locations.
572 */
573#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
574				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)
575
576struct bucket_item {
577	u32 count;
578};
579
580struct heuristic_ws {
581	/* Partial copy of input data */
582	u8 *sample;
583	u32 sample_size;
584	/* Buckets store counters for each byte value */
585	struct bucket_item *bucket;
586	/* Sorting buffer */
587	struct bucket_item *bucket_b;
588	struct list_head list;
589};
590
591static struct workspace_manager heuristic_wsm;
592
593static void free_heuristic_ws(struct list_head *ws)
594{
595	struct heuristic_ws *workspace;
596
597	workspace = list_entry(ws, struct heuristic_ws, list);
598
599	kvfree(workspace->sample);
600	kfree(workspace->bucket);
601	kfree(workspace->bucket_b);
602	kfree(workspace);
603}
604
605static struct list_head *alloc_heuristic_ws(unsigned int level)
606{
607	struct heuristic_ws *ws;
608
609	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
610	if (!ws)
611		return ERR_PTR(-ENOMEM);
612
613	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
614	if (!ws->sample)
615		goto fail;
616
617	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
618	if (!ws->bucket)
619		goto fail;
620
621	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
622	if (!ws->bucket_b)
623		goto fail;
624
625	INIT_LIST_HEAD(&ws->list);
626	return &ws->list;
627fail:
628	free_heuristic_ws(&ws->list);
629	return ERR_PTR(-ENOMEM);
630}
631
632const struct btrfs_compress_op btrfs_heuristic_compress = {
633	.workspace_manager = &heuristic_wsm,
634};
635
636static const struct btrfs_compress_op * const btrfs_compress_op[] = {
637	/* The heuristic is represented as compression type 0 */
638	&btrfs_heuristic_compress,
639	&btrfs_zlib_compress,
640	&btrfs_lzo_compress,
641	&btrfs_zstd_compress,
642};
643
644static struct list_head *alloc_workspace(int type, unsigned int level)
645{
646	switch (type) {
647	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
648	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
649	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
650	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
651	default:
652		/*
653		 * This can't happen, the type is validated several times
654		 * before we get here.
655		 */
656		BUG();
657	}
658}
659
660static void free_workspace(int type, struct list_head *ws)
661{
662	switch (type) {
663	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
664	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
665	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
666	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
667	default:
668		/*
669		 * This can't happen, the type is validated several times
670		 * before we get here.
671		 */
672		BUG();
673	}
674}
675
676static void btrfs_init_workspace_manager(int type)
677{
678	struct workspace_manager *wsm;
679	struct list_head *workspace;
680
681	wsm = btrfs_compress_op[type]->workspace_manager;
682	INIT_LIST_HEAD(&wsm->idle_ws);
683	spin_lock_init(&wsm->ws_lock);
684	atomic_set(&wsm->total_ws, 0);
685	init_waitqueue_head(&wsm->ws_wait);
686
687	/*
688	 * Preallocate one workspace for each compression type so we can
689	 * guarantee forward progress in the worst case
690	 */
691	workspace = alloc_workspace(type, 0);
692	if (IS_ERR(workspace)) {
693		pr_warn(
694	"BTRFS: cannot preallocate compression workspace, will try later\n");
695	} else {
696		atomic_set(&wsm->total_ws, 1);
697		wsm->free_ws = 1;
698		list_add(workspace, &wsm->idle_ws);
699	}
700}
701
702static void btrfs_cleanup_workspace_manager(int type)
703{
704	struct workspace_manager *wsman;
705	struct list_head *ws;
706
707	wsman = btrfs_compress_op[type]->workspace_manager;
708	while (!list_empty(&wsman->idle_ws)) {
709		ws = wsman->idle_ws.next;
710		list_del(ws);
711		free_workspace(type, ws);
712		atomic_dec(&wsman->total_ws);
713	}
714}
715
716/*
717 * This finds an available workspace or allocates a new one.
718 * If it's not possible to allocate a new one, waits until there's one.
719 * Preallocation makes a forward progress guarantees and we do not return
720 * errors.
721 */
722struct list_head *btrfs_get_workspace(int type, unsigned int level)
723{
724	struct workspace_manager *wsm;
725	struct list_head *workspace;
726	int cpus = num_online_cpus();
727	unsigned nofs_flag;
728	struct list_head *idle_ws;
729	spinlock_t *ws_lock;
730	atomic_t *total_ws;
731	wait_queue_head_t *ws_wait;
732	int *free_ws;
733
734	wsm = btrfs_compress_op[type]->workspace_manager;
735	idle_ws	 = &wsm->idle_ws;
736	ws_lock	 = &wsm->ws_lock;
737	total_ws = &wsm->total_ws;
738	ws_wait	 = &wsm->ws_wait;
739	free_ws	 = &wsm->free_ws;
740
741again:
742	spin_lock(ws_lock);
743	if (!list_empty(idle_ws)) {
744		workspace = idle_ws->next;
745		list_del(workspace);
746		(*free_ws)--;
747		spin_unlock(ws_lock);
748		return workspace;
749
750	}
751	if (atomic_read(total_ws) > cpus) {
752		DEFINE_WAIT(wait);
753
754		spin_unlock(ws_lock);
755		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
756		if (atomic_read(total_ws) > cpus && !*free_ws)
757			schedule();
758		finish_wait(ws_wait, &wait);
759		goto again;
760	}
761	atomic_inc(total_ws);
762	spin_unlock(ws_lock);
763
764	/*
765	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
766	 * to turn it off here because we might get called from the restricted
767	 * context of btrfs_compress_bio/btrfs_compress_pages
768	 */
769	nofs_flag = memalloc_nofs_save();
770	workspace = alloc_workspace(type, level);
771	memalloc_nofs_restore(nofs_flag);
772
773	if (IS_ERR(workspace)) {
774		atomic_dec(total_ws);
775		wake_up(ws_wait);
776
777		/*
778		 * Do not return the error but go back to waiting. There's a
779		 * workspace preallocated for each type and the compression
780		 * time is bounded so we get to a workspace eventually. This
781		 * makes our caller's life easier.
782		 *
783		 * To prevent silent and low-probability deadlocks (when the
784		 * initial preallocation fails), check if there are any
785		 * workspaces at all.
786		 */
787		if (atomic_read(total_ws) == 0) {
788			static DEFINE_RATELIMIT_STATE(_rs,
789					/* once per minute */ 60 * HZ,
790					/* no burst */ 1);
791
792			if (__ratelimit(&_rs)) {
793				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
794			}
795		}
796		goto again;
797	}
798	return workspace;
799}
800
801static struct list_head *get_workspace(int type, int level)
802{
803	switch (type) {
804	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
805	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
806	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
807	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
808	default:
809		/*
810		 * This can't happen, the type is validated several times
811		 * before we get here.
812		 */
813		BUG();
814	}
815}
816
817/*
818 * put a workspace struct back on the list or free it if we have enough
819 * idle ones sitting around
820 */
821void btrfs_put_workspace(int type, struct list_head *ws)
822{
823	struct workspace_manager *wsm;
824	struct list_head *idle_ws;
825	spinlock_t *ws_lock;
826	atomic_t *total_ws;
827	wait_queue_head_t *ws_wait;
828	int *free_ws;
829
830	wsm = btrfs_compress_op[type]->workspace_manager;
831	idle_ws	 = &wsm->idle_ws;
832	ws_lock	 = &wsm->ws_lock;
833	total_ws = &wsm->total_ws;
834	ws_wait	 = &wsm->ws_wait;
835	free_ws	 = &wsm->free_ws;
836
837	spin_lock(ws_lock);
838	if (*free_ws <= num_online_cpus()) {
839		list_add(ws, idle_ws);
840		(*free_ws)++;
841		spin_unlock(ws_lock);
842		goto wake;
843	}
844	spin_unlock(ws_lock);
845
846	free_workspace(type, ws);
847	atomic_dec(total_ws);
848wake:
849	cond_wake_up(ws_wait);
850}
851
852static void put_workspace(int type, struct list_head *ws)
853{
854	switch (type) {
855	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
856	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
857	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
858	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
859	default:
860		/*
861		 * This can't happen, the type is validated several times
862		 * before we get here.
863		 */
864		BUG();
865	}
866}
867
868/*
869 * Adjust @level according to the limits of the compression algorithm or
870 * fallback to default
871 */
872static unsigned int btrfs_compress_set_level(int type, unsigned level)
873{
874	const struct btrfs_compress_op *ops = btrfs_compress_op[type];
875
876	if (level == 0)
877		level = ops->default_level;
878	else
879		level = min(level, ops->max_level);
880
881	return level;
882}
883
884/*
885 * Given an address space and start and length, compress the bytes into @pages
886 * that are allocated on demand.
887 *
888 * @type_level is encoded algorithm and level, where level 0 means whatever
889 * default the algorithm chooses and is opaque here;
890 * - compression algo are 0-3
891 * - the level are bits 4-7
892 *
893 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
894 * and returns number of actually allocated pages
895 *
896 * @total_in is used to return the number of bytes actually read.  It
897 * may be smaller than the input length if we had to exit early because we
898 * ran out of room in the pages array or because we cross the
899 * max_out threshold.
900 *
901 * @total_out is an in/out parameter, must be set to the input length and will
902 * be also used to return the total number of compressed bytes
903 */
904int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
905			 u64 start, struct page **pages,
906			 unsigned long *out_pages,
907			 unsigned long *total_in,
908			 unsigned long *total_out)
909{
910	int type = btrfs_compress_type(type_level);
911	int level = btrfs_compress_level(type_level);
912	struct list_head *workspace;
913	int ret;
914
915	level = btrfs_compress_set_level(type, level);
916	workspace = get_workspace(type, level);
917	ret = compression_compress_pages(type, workspace, mapping, start, pages,
918					 out_pages, total_in, total_out);
919	put_workspace(type, workspace);
920	return ret;
921}
922
923static int btrfs_decompress_bio(struct compressed_bio *cb)
924{
925	struct list_head *workspace;
926	int ret;
927	int type = cb->compress_type;
928
929	workspace = get_workspace(type, 0);
930	ret = compression_decompress_bio(workspace, cb);
931	put_workspace(type, workspace);
932
933	if (!ret)
934		zero_fill_bio(&cb->orig_bbio->bio);
935	return ret;
936}
937
938/*
939 * a less complex decompression routine.  Our compressed data fits in a
940 * single page, and we want to read a single page out of it.
941 * start_byte tells us the offset into the compressed data we're interested in
942 */
943int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
944		     unsigned long start_byte, size_t srclen, size_t destlen)
945{
946	struct list_head *workspace;
947	int ret;
948
949	workspace = get_workspace(type, 0);
950	ret = compression_decompress(type, workspace, data_in, dest_page,
951				     start_byte, srclen, destlen);
952	put_workspace(type, workspace);
953
954	return ret;
955}
956
957int __init btrfs_init_compress(void)
958{
959	if (bioset_init(&btrfs_compressed_bioset, BIO_POOL_SIZE,
960			offsetof(struct compressed_bio, bbio.bio),
961			BIOSET_NEED_BVECS))
962		return -ENOMEM;
963	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
964	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
965	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
966	zstd_init_workspace_manager();
967	return 0;
968}
969
970void __cold btrfs_exit_compress(void)
971{
972	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
973	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
974	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
975	zstd_cleanup_workspace_manager();
976	bioset_exit(&btrfs_compressed_bioset);
977}
978
979/*
980 * Copy decompressed data from working buffer to pages.
981 *
982 * @buf:		The decompressed data buffer
983 * @buf_len:		The decompressed data length
984 * @decompressed:	Number of bytes that are already decompressed inside the
985 * 			compressed extent
986 * @cb:			The compressed extent descriptor
987 * @orig_bio:		The original bio that the caller wants to read for
988 *
989 * An easier to understand graph is like below:
990 *
991 * 		|<- orig_bio ->|     |<- orig_bio->|
992 * 	|<-------      full decompressed extent      ----->|
993 * 	|<-----------    @cb range   ---->|
994 * 	|			|<-- @buf_len -->|
995 * 	|<--- @decompressed --->|
996 *
997 * Note that, @cb can be a subpage of the full decompressed extent, but
998 * @cb->start always has the same as the orig_file_offset value of the full
999 * decompressed extent.
1000 *
1001 * When reading compressed extent, we have to read the full compressed extent,
1002 * while @orig_bio may only want part of the range.
1003 * Thus this function will ensure only data covered by @orig_bio will be copied
1004 * to.
1005 *
1006 * Return 0 if we have copied all needed contents for @orig_bio.
1007 * Return >0 if we need continue decompress.
1008 */
1009int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
1010			      struct compressed_bio *cb, u32 decompressed)
1011{
1012	struct bio *orig_bio = &cb->orig_bbio->bio;
1013	/* Offset inside the full decompressed extent */
1014	u32 cur_offset;
1015
1016	cur_offset = decompressed;
1017	/* The main loop to do the copy */
1018	while (cur_offset < decompressed + buf_len) {
1019		struct bio_vec bvec;
1020		size_t copy_len;
1021		u32 copy_start;
1022		/* Offset inside the full decompressed extent */
1023		u32 bvec_offset;
1024
1025		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
1026		/*
1027		 * cb->start may underflow, but subtracting that value can still
1028		 * give us correct offset inside the full decompressed extent.
1029		 */
1030		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1031
1032		/* Haven't reached the bvec range, exit */
1033		if (decompressed + buf_len <= bvec_offset)
1034			return 1;
1035
1036		copy_start = max(cur_offset, bvec_offset);
1037		copy_len = min(bvec_offset + bvec.bv_len,
1038			       decompressed + buf_len) - copy_start;
1039		ASSERT(copy_len);
1040
1041		/*
1042		 * Extra range check to ensure we didn't go beyond
1043		 * @buf + @buf_len.
1044		 */
1045		ASSERT(copy_start - decompressed < buf_len);
1046		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
1047			       buf + copy_start - decompressed, copy_len);
1048		cur_offset += copy_len;
1049
1050		bio_advance(orig_bio, copy_len);
1051		/* Finished the bio */
1052		if (!orig_bio->bi_iter.bi_size)
1053			return 0;
1054	}
1055	return 1;
1056}
1057
1058/*
1059 * Shannon Entropy calculation
1060 *
1061 * Pure byte distribution analysis fails to determine compressibility of data.
1062 * Try calculating entropy to estimate the average minimum number of bits
1063 * needed to encode the sampled data.
1064 *
1065 * For convenience, return the percentage of needed bits, instead of amount of
1066 * bits directly.
1067 *
1068 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
1069 *			    and can be compressible with high probability
1070 *
1071 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
1072 *
1073 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
1074 */
1075#define ENTROPY_LVL_ACEPTABLE		(65)
1076#define ENTROPY_LVL_HIGH		(80)
1077
1078/*
1079 * For increasead precision in shannon_entropy calculation,
1080 * let's do pow(n, M) to save more digits after comma:
1081 *
1082 * - maximum int bit length is 64
1083 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
1084 * - 13 * 4 = 52 < 64		-> M = 4
1085 *
1086 * So use pow(n, 4).
1087 */
1088static inline u32 ilog2_w(u64 n)
1089{
1090	return ilog2(n * n * n * n);
1091}
1092
1093static u32 shannon_entropy(struct heuristic_ws *ws)
1094{
1095	const u32 entropy_max = 8 * ilog2_w(2);
1096	u32 entropy_sum = 0;
1097	u32 p, p_base, sz_base;
1098	u32 i;
1099
1100	sz_base = ilog2_w(ws->sample_size);
1101	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1102		p = ws->bucket[i].count;
1103		p_base = ilog2_w(p);
1104		entropy_sum += p * (sz_base - p_base);
1105	}
1106
1107	entropy_sum /= ws->sample_size;
1108	return entropy_sum * 100 / entropy_max;
1109}
1110
1111#define RADIX_BASE		4U
1112#define COUNTERS_SIZE		(1U << RADIX_BASE)
1113
1114static u8 get4bits(u64 num, int shift) {
1115	u8 low4bits;
1116
1117	num >>= shift;
1118	/* Reverse order */
1119	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
1120	return low4bits;
1121}
1122
1123/*
1124 * Use 4 bits as radix base
1125 * Use 16 u32 counters for calculating new position in buf array
1126 *
1127 * @array     - array that will be sorted
1128 * @array_buf - buffer array to store sorting results
1129 *              must be equal in size to @array
1130 * @num       - array size
1131 */
1132static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1133		       int num)
1134{
1135	u64 max_num;
1136	u64 buf_num;
1137	u32 counters[COUNTERS_SIZE];
1138	u32 new_addr;
1139	u32 addr;
1140	int bitlen;
1141	int shift;
1142	int i;
1143
1144	/*
1145	 * Try avoid useless loop iterations for small numbers stored in big
1146	 * counters.  Example: 48 33 4 ... in 64bit array
1147	 */
1148	max_num = array[0].count;
1149	for (i = 1; i < num; i++) {
1150		buf_num = array[i].count;
1151		if (buf_num > max_num)
1152			max_num = buf_num;
1153	}
1154
1155	buf_num = ilog2(max_num);
1156	bitlen = ALIGN(buf_num, RADIX_BASE * 2);
1157
1158	shift = 0;
1159	while (shift < bitlen) {
1160		memset(counters, 0, sizeof(counters));
1161
1162		for (i = 0; i < num; i++) {
1163			buf_num = array[i].count;
1164			addr = get4bits(buf_num, shift);
1165			counters[addr]++;
1166		}
1167
1168		for (i = 1; i < COUNTERS_SIZE; i++)
1169			counters[i] += counters[i - 1];
1170
1171		for (i = num - 1; i >= 0; i--) {
1172			buf_num = array[i].count;
1173			addr = get4bits(buf_num, shift);
1174			counters[addr]--;
1175			new_addr = counters[addr];
1176			array_buf[new_addr] = array[i];
1177		}
1178
1179		shift += RADIX_BASE;
1180
1181		/*
1182		 * Normal radix expects to move data from a temporary array, to
1183		 * the main one.  But that requires some CPU time. Avoid that
1184		 * by doing another sort iteration to original array instead of
1185		 * memcpy()
1186		 */
1187		memset(counters, 0, sizeof(counters));
1188
1189		for (i = 0; i < num; i ++) {
1190			buf_num = array_buf[i].count;
1191			addr = get4bits(buf_num, shift);
1192			counters[addr]++;
1193		}
1194
1195		for (i = 1; i < COUNTERS_SIZE; i++)
1196			counters[i] += counters[i - 1];
1197
1198		for (i = num - 1; i >= 0; i--) {
1199			buf_num = array_buf[i].count;
1200			addr = get4bits(buf_num, shift);
1201			counters[addr]--;
1202			new_addr = counters[addr];
1203			array[new_addr] = array_buf[i];
1204		}
1205
1206		shift += RADIX_BASE;
1207	}
1208}
1209
1210/*
1211 * Size of the core byte set - how many bytes cover 90% of the sample
1212 *
1213 * There are several types of structured binary data that use nearly all byte
1214 * values. The distribution can be uniform and counts in all buckets will be
1215 * nearly the same (eg. encrypted data). Unlikely to be compressible.
1216 *
1217 * Other possibility is normal (Gaussian) distribution, where the data could
1218 * be potentially compressible, but we have to take a few more steps to decide
1219 * how much.
1220 *
1221 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
1222 *                       compression algo can easy fix that
1223 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
1224 *                       probability is not compressible
1225 */
1226#define BYTE_CORE_SET_LOW		(64)
1227#define BYTE_CORE_SET_HIGH		(200)
1228
1229static int byte_core_set_size(struct heuristic_ws *ws)
1230{
1231	u32 i;
1232	u32 coreset_sum = 0;
1233	const u32 core_set_threshold = ws->sample_size * 90 / 100;
1234	struct bucket_item *bucket = ws->bucket;
1235
1236	/* Sort in reverse order */
1237	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1238
1239	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
1240		coreset_sum += bucket[i].count;
1241
1242	if (coreset_sum > core_set_threshold)
1243		return i;
1244
1245	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
1246		coreset_sum += bucket[i].count;
1247		if (coreset_sum > core_set_threshold)
1248			break;
1249	}
1250
1251	return i;
1252}
1253
1254/*
1255 * Count byte values in buckets.
1256 * This heuristic can detect textual data (configs, xml, json, html, etc).
1257 * Because in most text-like data byte set is restricted to limited number of
1258 * possible characters, and that restriction in most cases makes data easy to
1259 * compress.
1260 *
1261 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
1262 *	less - compressible
1263 *	more - need additional analysis
1264 */
1265#define BYTE_SET_THRESHOLD		(64)
1266
1267static u32 byte_set_size(const struct heuristic_ws *ws)
1268{
1269	u32 i;
1270	u32 byte_set_size = 0;
1271
1272	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
1273		if (ws->bucket[i].count > 0)
1274			byte_set_size++;
1275	}
1276
1277	/*
1278	 * Continue collecting count of byte values in buckets.  If the byte
1279	 * set size is bigger then the threshold, it's pointless to continue,
1280	 * the detection technique would fail for this type of data.
1281	 */
1282	for (; i < BUCKET_SIZE; i++) {
1283		if (ws->bucket[i].count > 0) {
1284			byte_set_size++;
1285			if (byte_set_size > BYTE_SET_THRESHOLD)
1286				return byte_set_size;
1287		}
1288	}
1289
1290	return byte_set_size;
1291}
1292
1293static bool sample_repeated_patterns(struct heuristic_ws *ws)
1294{
1295	const u32 half_of_sample = ws->sample_size / 2;
1296	const u8 *data = ws->sample;
1297
1298	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
1299}
1300
1301static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
1302				     struct heuristic_ws *ws)
1303{
1304	struct page *page;
1305	u64 index, index_end;
1306	u32 i, curr_sample_pos;
1307	u8 *in_data;
1308
1309	/*
1310	 * Compression handles the input data by chunks of 128KiB
1311	 * (defined by BTRFS_MAX_UNCOMPRESSED)
1312	 *
1313	 * We do the same for the heuristic and loop over the whole range.
1314	 *
1315	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
1316	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
1317	 */
1318	if (end - start > BTRFS_MAX_UNCOMPRESSED)
1319		end = start + BTRFS_MAX_UNCOMPRESSED;
1320
1321	index = start >> PAGE_SHIFT;
1322	index_end = end >> PAGE_SHIFT;
1323
1324	/* Don't miss unaligned end */
1325	if (!PAGE_ALIGNED(end))
1326		index_end++;
1327
1328	curr_sample_pos = 0;
1329	while (index < index_end) {
1330		page = find_get_page(inode->i_mapping, index);
1331		in_data = kmap_local_page(page);
1332		/* Handle case where the start is not aligned to PAGE_SIZE */
1333		i = start % PAGE_SIZE;
1334		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
1335			/* Don't sample any garbage from the last page */
1336			if (start > end - SAMPLING_READ_SIZE)
1337				break;
1338			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1339					SAMPLING_READ_SIZE);
1340			i += SAMPLING_INTERVAL;
1341			start += SAMPLING_INTERVAL;
1342			curr_sample_pos += SAMPLING_READ_SIZE;
1343		}
1344		kunmap_local(in_data);
1345		put_page(page);
1346
1347		index++;
1348	}
1349
1350	ws->sample_size = curr_sample_pos;
1351}
1352
1353/*
1354 * Compression heuristic.
1355 *
1356 * For now is's a naive and optimistic 'return true', we'll extend the logic to
1357 * quickly (compared to direct compression) detect data characteristics
1358 * (compressible/incompressible) to avoid wasting CPU time on incompressible
1359 * data.
1360 *
1361 * The following types of analysis can be performed:
1362 * - detect mostly zero data
1363 * - detect data with low "byte set" size (text, etc)
1364 * - detect data with low/high "core byte" set
1365 *
1366 * Return non-zero if the compression should be done, 0 otherwise.
1367 */
1368int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
1369{
1370	struct list_head *ws_list = get_workspace(0, 0);
1371	struct heuristic_ws *ws;
1372	u32 i;
1373	u8 byte;
1374	int ret = 0;
1375
1376	ws = list_entry(ws_list, struct heuristic_ws, list);
1377
1378	heuristic_collect_sample(inode, start, end, ws);
1379
1380	if (sample_repeated_patterns(ws)) {
1381		ret = 1;
1382		goto out;
1383	}
1384
1385	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1386
1387	for (i = 0; i < ws->sample_size; i++) {
1388		byte = ws->sample[i];
1389		ws->bucket[byte].count++;
1390	}
1391
1392	i = byte_set_size(ws);
1393	if (i < BYTE_SET_THRESHOLD) {
1394		ret = 2;
1395		goto out;
1396	}
1397
1398	i = byte_core_set_size(ws);
1399	if (i <= BYTE_CORE_SET_LOW) {
1400		ret = 3;
1401		goto out;
1402	}
1403
1404	if (i >= BYTE_CORE_SET_HIGH) {
1405		ret = 0;
1406		goto out;
1407	}
1408
1409	i = shannon_entropy(ws);
1410	if (i <= ENTROPY_LVL_ACEPTABLE) {
1411		ret = 4;
1412		goto out;
1413	}
1414
1415	/*
1416	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
1417	 * needed to give green light to compression.
1418	 *
1419	 * For now just assume that compression at that level is not worth the
1420	 * resources because:
1421	 *
1422	 * 1. it is possible to defrag the data later
1423	 *
1424	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
1425	 * values, every bucket has counter at level ~54. The heuristic would
1426	 * be confused. This can happen when data have some internal repeated
1427	 * patterns like "abbacbbc...". This can be detected by analyzing
1428	 * pairs of bytes, which is too costly.
1429	 */
1430	if (i < ENTROPY_LVL_HIGH) {
1431		ret = 5;
1432		goto out;
1433	} else {
1434		ret = 0;
1435		goto out;
1436	}
1437
1438out:
1439	put_workspace(0, ws_list);
1440	return ret;
1441}
1442
1443/*
1444 * Convert the compression suffix (eg. after "zlib" starting with ":") to
1445 * level, unrecognized string will set the default level
1446 */
1447unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1448{
1449	unsigned int level = 0;
1450	int ret;
1451
1452	if (!type)
1453		return 0;
1454
1455	if (str[0] == ':') {
1456		ret = kstrtouint(str + 1, 10, &level);
1457		if (ret)
1458			level = 0;
1459	}
1460
1461	level = btrfs_compress_set_level(type, level);
1462
1463	return level;
1464}
1465