Lines Matching defs:n_blocks
74 __le64 n_blocks;
150 size_t n_blocks;
965 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
968 for (b = 0; b < wc->n_blocks; b++) {
1041 for (b = 0; b < wc->n_blocks; b++) {
1064 for (b = 0; b < wc->n_blocks; b++) {
1161 wc->freelist_high_watermark = wc->n_blocks;
1162 wc->freelist_low_watermark = wc->n_blocks;
2117 uint64_t n_blocks, offset;
2120 n_blocks = device_size;
2121 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
2124 if (!n_blocks)
2126 /* Verify the following entries[n_blocks] won't overflow */
2127 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
2130 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
2132 if (offset + n_blocks * block_size <= device_size)
2134 n_blocks--;
2138 e.index = n_blocks;
2139 if (e.index != n_blocks)
2143 *n_blocks_p = n_blocks;
2154 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
2166 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
2169 for (b = 0; b < wc->n_blocks; b++) {
2520 size_t n_blocks, n_metadata_blocks;
2535 &n_blocks, &n_metadata_blocks);
2619 wc->n_blocks = le64_to_cpu(s.n_blocks);
2621 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2622 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2632 data_size = wc->n_blocks * (size_t)wc->block_size;
2633 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2645 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2649 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2691 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,