Lines Matching refs:data
107 * Meta data block descriptor (for cached metadata blocks).
116 void *data;
442 mblk->data = page_address(mblk->page);
849 /* Flush drive cache (this will also sync data) */
1125 zmd->sb[1].sb = mblk->data;
1162 sb->sb = mblk->data;
1215 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1354 static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
1356 struct dmz_dev *dev = data;
1560 void *data)
1562 struct dm_zone *zone = data;
1704 dmap = dmap_mblk->data;
1709 /* Check data zone */
1715 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1722 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1778 * At this point, only meta zones and mapped data zones were
1779 * fully initialized. All remaining zones are unmapped data
1803 /* Unmapped data zone */
1829 * Set a data chunk mapping.
1835 struct dmz_map *dmap = dmap_mblk->data;
1956 * data zone that can be reclaimed.
2022 * (1) There is no free sequential zones. Then a random data zone
2026 * the oldest random zone (data or buffer) that can be locked.
2048 struct dmz_map *dmap = dmap_mblk->data;
2121 * Write and discard change the block validity of data zones and their buffer
2144 /* Deactivate the data zone */
2149 /* Unbuffered inactive empty data zone: reclaim it */
2349 * Unmapping the chunk data zone: the zone must
2431 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2500 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2579 count = dmz_clear_bits((unsigned long *)mblk->data,
2620 (unsigned long *) mblk->data) != 0;
2650 bitmap = (unsigned long *) mblk->data;
2761 bitmap = mblk->data;
2960 dmz_zmd_debug(zmd, " %u data zones for %u chunks",
2972 dmz_zmd_debug(zmd, " %u reserved sequential data zones",
2977 dmz_zmd_debug(zmd, " %u data zone mapping blocks",