Lines Matching refs:ws

94 static int compression_compress_pages(int type, struct list_head *ws,
101 return zlib_compress_pages(ws, mapping, start, pages,
104 return lzo_compress_pages(ws, mapping, start, pages,
107 return zstd_compress_pages(ws, mapping, start, pages,
125 static int compression_decompress_bio(struct list_head *ws,
129 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
130 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
131 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
142 static int compression_decompress(int type, struct list_head *ws,
147 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
149 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
151 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
593 static void free_heuristic_ws(struct list_head *ws)
597 workspace = list_entry(ws, struct heuristic_ws, list);
607 struct heuristic_ws *ws;
609 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
610 if (!ws)
613 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
614 if (!ws->sample)
617 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
618 if (!ws->bucket)
621 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
622 if (!ws->bucket_b)
625 INIT_LIST_HEAD(&ws->list);
626 return &ws->list;
628 free_heuristic_ws(&ws->list);
660 static void free_workspace(int type, struct list_head *ws)
663 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
664 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
665 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
666 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
705 struct list_head *ws;
709 ws = wsman->idle_ws.next;
710 list_del(ws);
711 free_workspace(type, ws);
821 void btrfs_put_workspace(int type, struct list_head *ws)
839 list_add(ws, idle_ws);
846 free_workspace(type, ws);
852 static void put_workspace(int type, struct list_head *ws)
855 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
856 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
857 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
858 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1093 static u32 shannon_entropy(struct heuristic_ws *ws)
1100 sz_base = ilog2_w(ws->sample_size);
1101 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1102 p = ws->bucket[i].count;
1107 entropy_sum /= ws->sample_size;
1229 static int byte_core_set_size(struct heuristic_ws *ws)
1233 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1234 struct bucket_item *bucket = ws->bucket;
1237 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1267 static u32 byte_set_size(const struct heuristic_ws *ws)
1273 if (ws->bucket[i].count > 0)
1283 if (ws->bucket[i].count > 0) {
1293 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1295 const u32 half_of_sample = ws->sample_size / 2;
1296 const u8 *data = ws->sample;
1302 struct heuristic_ws *ws)
1338 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1350 ws->sample_size = curr_sample_pos;
1371 struct heuristic_ws *ws;
1376 ws = list_entry(ws_list, struct heuristic_ws, list);
1378 heuristic_collect_sample(inode, start, end, ws);
1380 if (sample_repeated_patterns(ws)) {
1385 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1387 for (i = 0; i < ws->sample_size; i++) {
1388 byte = ws->sample[i];
1389 ws->bucket[byte].count++;
1392 i = byte_set_size(ws);
1398 i = byte_core_set_size(ws);
1409 i = shannon_entropy(ws);