1// SPDX-License-Identifier: MIT 2/* 3 * Implementation of libfsverity_compute_digest(). 4 * 5 * Copyright 2018 Google LLC 6 * Copyright (C) 2020 Facebook 7 * 8 * Use of this source code is governed by an MIT-style 9 * license that can be found in the LICENSE file or at 10 * https://opensource.org/licenses/MIT. 11 */ 12 13#include "lib_private.h" 14 15#include <stdlib.h> 16#include <string.h> 17 18#define FS_VERITY_MAX_LEVELS 64 19 20struct block_buffer { 21 u32 filled; 22 u8 *data; 23}; 24 25/* 26 * Hash a block, writing the result to the next level's pending block buffer. 27 */ 28static void hash_one_block(struct hash_ctx *hash, struct block_buffer *cur, 29 u32 block_size, const u8 *salt, u32 salt_size) 30{ 31 struct block_buffer *next = cur + 1; 32 33 /* Zero-pad the block if it's shorter than block_size. */ 34 memset(&cur->data[cur->filled], 0, block_size - cur->filled); 35 36 libfsverity_hash_init(hash); 37 libfsverity_hash_update(hash, salt, salt_size); 38 libfsverity_hash_update(hash, cur->data, block_size); 39 libfsverity_hash_final(hash, &next->data[next->filled]); 40 41 next->filled += hash->alg->digest_size; 42 cur->filled = 0; 43} 44 45static bool block_is_full(const struct block_buffer *block, u32 block_size, 46 struct hash_ctx *hash) 47{ 48 /* Would the next hash put us over the limit? */ 49 return block->filled + hash->alg->digest_size > block_size; 50} 51 52static int report_merkle_tree_size(const struct libfsverity_metadata_callbacks *cbs, 53 u64 size) 54{ 55 if (cbs && cbs->merkle_tree_size) { 56 int err = cbs->merkle_tree_size(cbs->ctx, size); 57 58 if (err) { 59 libfsverity_error_msg("error processing Merkle tree size"); 60 return err; 61 } 62 } 63 return 0; 64} 65 66static int report_merkle_tree_block(const struct libfsverity_metadata_callbacks *cbs, 67 const struct block_buffer *block, 68 u32 block_size, u64 *level_offset) 69{ 70 71 if (cbs && cbs->merkle_tree_block) { 72 int err = cbs->merkle_tree_block(cbs->ctx, block->data, 73 block_size, 74 *level_offset * block_size); 75 76 if (err) { 77 libfsverity_error_msg("error processing Merkle tree block"); 78 return err; 79 } 80 (*level_offset)++; 81 } 82 return 0; 83} 84 85static int report_descriptor(const struct libfsverity_metadata_callbacks *cbs, 86 const void *descriptor, size_t size) 87{ 88 if (cbs && cbs->descriptor) { 89 int err = cbs->descriptor(cbs->ctx, descriptor, size); 90 91 if (err) { 92 libfsverity_error_msg("error processing fs-verity descriptor"); 93 return err; 94 } 95 } 96 return 0; 97} 98 99/* 100 * Compute the file's Merkle tree root hash using the given hash algorithm, 101 * block size, and salt. 102 */ 103static int compute_root_hash(void *fd, libfsverity_read_fn_t read_fn, 104 u64 file_size, struct hash_ctx *hash, 105 u32 block_size, const u8 *salt, u32 salt_size, 106 const struct libfsverity_metadata_callbacks *metadata_cbs, 107 u8 *root_hash) 108{ 109 const u32 hashes_per_block = block_size / hash->alg->digest_size; 110 const u32 padded_salt_size = roundup(salt_size, hash->alg->block_size); 111 u8 *padded_salt = NULL; 112 u64 blocks; 113 int num_levels = 0; 114 int level; 115 u64 level_offset[FS_VERITY_MAX_LEVELS]; 116 struct block_buffer _buffers[1 + FS_VERITY_MAX_LEVELS + 1] = {}; 117 struct block_buffer *buffers = &_buffers[1]; 118 u64 offset; 119 int err = 0; 120 121 /* Root hash of empty file is all 0's */ 122 if (file_size == 0) { 123 memset(root_hash, 0, hash->alg->digest_size); 124 return report_merkle_tree_size(metadata_cbs, 0); 125 } 126 127 if (salt_size != 0) { 128 padded_salt = libfsverity_zalloc(padded_salt_size); 129 if (!padded_salt) 130 return -ENOMEM; 131 memcpy(padded_salt, salt, salt_size); 132 } 133 134 /* Compute number of levels and the number of blocks in each level. */ 135 blocks = DIV_ROUND_UP(file_size, block_size); 136 while (blocks > 1) { 137 if (WARN_ON(num_levels >= FS_VERITY_MAX_LEVELS)) { 138 err = -EINVAL; 139 goto out; 140 } 141 blocks = DIV_ROUND_UP(blocks, hashes_per_block); 142 /* 143 * Temporarily use level_offset[] to store the number of blocks 144 * in each level. It will be overwritten later. 145 */ 146 level_offset[num_levels++] = blocks; 147 } 148 149 /* 150 * Compute the starting block of each level, using the convention where 151 * the root level is first, i.e. the convention used by 152 * FS_IOC_READ_VERITY_METADATA. At the same time, compute the total 153 * size of the Merkle tree. These values are only needed for the 154 * metadata callbacks (if they were given), as the hash computation 155 * itself doesn't prescribe an ordering of the levels and doesn't 156 * prescribe any special meaning to the total size of the Merkle tree. 157 */ 158 offset = 0; 159 for (level = num_levels - 1; level >= 0; level--) { 160 blocks = level_offset[level]; 161 level_offset[level] = offset; 162 offset += blocks; 163 } 164 err = report_merkle_tree_size(metadata_cbs, offset * block_size); 165 if (err) 166 goto out; 167 168 /* 169 * Allocate the block buffers. Buffer "-1" is for data blocks. 170 * Buffers 0 <= level < num_levels are for the actual tree levels. 171 * Buffer 'num_levels' is for the root hash. 172 */ 173 for (level = -1; level < num_levels; level++) { 174 buffers[level].data = libfsverity_zalloc(block_size); 175 if (!buffers[level].data) { 176 err = -ENOMEM; 177 goto out; 178 } 179 } 180 buffers[num_levels].data = root_hash; 181 182 /* Hash each data block, also hashing the tree blocks as they fill up */ 183 for (offset = 0; offset < file_size; offset += block_size) { 184 buffers[-1].filled = min(block_size, file_size - offset); 185 186 err = read_fn(fd, buffers[-1].data, buffers[-1].filled); 187 if (err) { 188 libfsverity_error_msg("error reading file"); 189 goto out; 190 } 191 192 hash_one_block(hash, &buffers[-1], block_size, 193 padded_salt, padded_salt_size); 194 for (level = 0; level < num_levels; level++) { 195 if (!block_is_full(&buffers[level], block_size, hash)) 196 break; 197 hash_one_block(hash, &buffers[level], block_size, 198 padded_salt, padded_salt_size); 199 err = report_merkle_tree_block(metadata_cbs, 200 &buffers[level], 201 block_size, 202 &level_offset[level]); 203 if (err) 204 goto out; 205 } 206 } 207 /* Finish all nonempty pending tree blocks */ 208 for (level = 0; level < num_levels; level++) { 209 if (buffers[level].filled != 0) { 210 hash_one_block(hash, &buffers[level], block_size, 211 padded_salt, padded_salt_size); 212 err = report_merkle_tree_block(metadata_cbs, 213 &buffers[level], 214 block_size, 215 &level_offset[level]); 216 if (err) 217 goto out; 218 } 219 } 220 221 /* Root hash was filled by the last call to hash_one_block() */ 222 if (WARN_ON(buffers[num_levels].filled != hash->alg->digest_size)) { 223 err = -EINVAL; 224 goto out; 225 } 226 err = 0; 227out: 228 for (level = -1; level < num_levels; level++) 229 free(buffers[level].data); 230 free(padded_salt); 231 return err; 232} 233 234LIBEXPORT int 235libfsverity_compute_digest(void *fd, libfsverity_read_fn_t read_fn, 236 const struct libfsverity_merkle_tree_params *params, 237 struct libfsverity_digest **digest_ret) 238{ 239 u32 alg_num; 240 u32 block_size; 241 const struct fsverity_hash_alg *hash_alg; 242 struct hash_ctx *hash = NULL; 243 struct libfsverity_digest *digest; 244 struct fsverity_descriptor desc; 245 int err; 246 247 if (!read_fn || !params || !digest_ret) { 248 libfsverity_error_msg("missing required parameters for compute_digest"); 249 return -EINVAL; 250 } 251 if (params->version != 1) { 252 libfsverity_error_msg("unsupported version (%u)", 253 params->version); 254 return -EINVAL; 255 } 256 257 alg_num = params->hash_algorithm ?: FS_VERITY_HASH_ALG_DEFAULT; 258 block_size = params->block_size ?: FS_VERITY_BLOCK_SIZE_DEFAULT; 259 260 if (!is_power_of_2(block_size)) { 261 libfsverity_error_msg("unsupported block size (%u)", 262 block_size); 263 return -EINVAL; 264 } 265 if (params->salt_size > sizeof(desc.salt)) { 266 libfsverity_error_msg("unsupported salt size (%u)", 267 params->salt_size); 268 return -EINVAL; 269 } 270 if (params->salt_size && !params->salt) { 271 libfsverity_error_msg("salt_size specified, but salt is NULL"); 272 return -EINVAL; 273 } 274 if (!libfsverity_mem_is_zeroed(params->reserved1, 275 sizeof(params->reserved1)) || 276 !libfsverity_mem_is_zeroed(params->reserved2, 277 sizeof(params->reserved2))) { 278 libfsverity_error_msg("reserved bits set in merkle_tree_params"); 279 return -EINVAL; 280 } 281 282 hash_alg = libfsverity_find_hash_alg_by_num(alg_num); 283 if (!hash_alg) { 284 libfsverity_error_msg("unknown hash algorithm: %u", alg_num); 285 return -EINVAL; 286 } 287 288 if (block_size < 2 * hash_alg->digest_size) { 289 libfsverity_error_msg("block size (%u) too small for hash algorithm %s", 290 block_size, hash_alg->name); 291 return -EINVAL; 292 } 293 294 hash = hash_alg->create_ctx(hash_alg); 295 if (!hash) 296 return -ENOMEM; 297 298 memset(&desc, 0, sizeof(desc)); 299 desc.version = 1; 300 desc.hash_algorithm = alg_num; 301 desc.log_blocksize = ilog2(block_size); 302 desc.data_size = cpu_to_le64(params->file_size); 303 if (params->salt_size != 0) { 304 memcpy(desc.salt, params->salt, params->salt_size); 305 desc.salt_size = params->salt_size; 306 } 307 308 err = compute_root_hash(fd, read_fn, params->file_size, hash, 309 block_size, params->salt, params->salt_size, 310 params->metadata_callbacks, desc.root_hash); 311 if (err) 312 goto out; 313 314 err = report_descriptor(params->metadata_callbacks, 315 &desc, sizeof(desc)); 316 if (err) 317 goto out; 318 319 digest = libfsverity_zalloc(sizeof(*digest) + hash_alg->digest_size); 320 if (!digest) { 321 err = -ENOMEM; 322 goto out; 323 } 324 digest->digest_algorithm = alg_num; 325 digest->digest_size = hash_alg->digest_size; 326 libfsverity_hash_full(hash, &desc, sizeof(desc), digest->digest); 327 *digest_ret = digest; 328 err = 0; 329out: 330 libfsverity_free_hash_ctx(hash); 331 return err; 332} 333