Lines Matching refs:ws

65 static int compression_compress_pages(int type, struct list_head *ws,
72 return zlib_compress_pages(ws, mapping, start, pages,
75 return lzo_compress_pages(ws, mapping, start, pages,
78 return zstd_compress_pages(ws, mapping, start, pages,
96 static int compression_decompress_bio(int type, struct list_head *ws,
100 case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
101 case BTRFS_COMPRESS_LZO: return lzo_decompress_bio(ws, cb);
102 case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
113 static int compression_decompress(int type, struct list_head *ws,
118 case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
120 case BTRFS_COMPRESS_LZO: return lzo_decompress(ws, data_in, dest_page,
122 case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
835 static void free_heuristic_ws(struct list_head *ws)
839 workspace = list_entry(ws, struct heuristic_ws, list);
849 struct heuristic_ws *ws;
851 ws = kzalloc(sizeof(*ws), GFP_KERNEL);
852 if (!ws)
855 ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
856 if (!ws->sample)
859 ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
860 if (!ws->bucket)
863 ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
864 if (!ws->bucket_b)
867 INIT_LIST_HEAD(&ws->list);
868 return &ws->list;
870 free_heuristic_ws(&ws->list);
902 static void free_workspace(int type, struct list_head *ws)
905 case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
906 case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
907 case BTRFS_COMPRESS_LZO: return lzo_free_workspace(ws);
908 case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
947 struct list_head *ws;
951 ws = wsman->idle_ws.next;
952 list_del(ws);
953 free_workspace(type, ws);
1063 void btrfs_put_workspace(int type, struct list_head *ws)
1081 list_add(ws, idle_ws);
1088 free_workspace(type, ws);
1094 static void put_workspace(int type, struct list_head *ws)
1097 case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
1098 case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
1099 case BTRFS_COMPRESS_LZO: return btrfs_put_workspace(type, ws);
1100 case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
1361 static u32 shannon_entropy(struct heuristic_ws *ws)
1368 sz_base = ilog2_w(ws->sample_size);
1369 for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
1370 p = ws->bucket[i].count;
1375 entropy_sum /= ws->sample_size;
1497 static int byte_core_set_size(struct heuristic_ws *ws)
1501 const u32 core_set_threshold = ws->sample_size * 90 / 100;
1502 struct bucket_item *bucket = ws->bucket;
1505 radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1535 static u32 byte_set_size(const struct heuristic_ws *ws)
1541 if (ws->bucket[i].count > 0)
1551 if (ws->bucket[i].count > 0) {
1561 static bool sample_repeated_patterns(struct heuristic_ws *ws)
1563 const u32 half_of_sample = ws->sample_size / 2;
1564 const u8 *data = ws->sample;
1570 struct heuristic_ws *ws)
1606 memcpy(&ws->sample[curr_sample_pos], &in_data[i],
1618 ws->sample_size = curr_sample_pos;
1639 struct heuristic_ws *ws;
1644 ws = list_entry(ws_list, struct heuristic_ws, list);
1646 heuristic_collect_sample(inode, start, end, ws);
1648 if (sample_repeated_patterns(ws)) {
1653 memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);
1655 for (i = 0; i < ws->sample_size; i++) {
1656 byte = ws->sample[i];
1657 ws->bucket[byte].count++;
1660 i = byte_set_size(ws);
1666 i = byte_core_set_size(ws);
1677 i = shannon_entropy(ws);