Lines Matching defs:zone

33  * blocks indicating zone block validity.
39 * the first conventional zone found on disk.
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
91 * writeable zone.
137 struct dm_zone *zone;
221 static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
223 if (WARN_ON(!zone))
226 return zone->id - zone->dev->zone_offset;
229 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
236 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
313 if (!zone)
316 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
317 kfree(zone);
321 INIT_LIST_HEAD(&zone->link);
322 atomic_set(&zone->refcount, 0);
323 zone->id = zone_id;
324 zone->chunk = DMZ_MAP_UNMAPPED;
325 zone->dev = dev;
327 return zone;
359 * The map lock also protects all the zone lists.
375 * the map lock and zone state management (active and reclaim state are
807 sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
1030 if (sb_block != (u64)dsb->zone->id << zmd->zone_nr_blocks_shift ) {
1034 (u64)dsb->zone->id << zmd->zone_nr_blocks_shift);
1134 unsigned int zone_id = zmd->sb[0].zone->id;
1147 zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1155 zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
1160 zmd->sb[1].zone = NULL;
1206 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1208 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1252 if (!zmd->sb[0].zone) {
1253 dmz_zmd_err(zmd, "Primary super block zone not set");
1258 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1259 zmd->sb[0].dev = zmd->sb[0].zone->dev;
1271 if (!zmd->sb[1].zone) {
1273 zmd->sb[0].zone->id + zmd->nr_meta_zones;
1275 zmd->sb[1].zone = dmz_get(zmd, zone_id);
1277 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1342 sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
1344 if (!dmz_is_meta(sb->zone)) {
1346 "Tertiary super block zone %u not marked as metadata zone",
1347 sb->zone->id);
1370 * Initialize a zone descriptor.
1377 struct dm_zone *zone;
1379 zone = dmz_insert(zmd, idx, dev);
1380 if (IS_ERR(zone))
1381 return PTR_ERR(zone);
1385 /* Ignore the eventual runt (smaller) zone */
1386 set_bit(DMZ_OFFLINE, &zone->flags);
1394 * Devices that have zones with a capacity smaller than the zone size
1402 set_bit(DMZ_RND, &zone->flags);
1406 set_bit(DMZ_SEQ, &zone->flags);
1412 if (dmz_is_rnd(zone))
1413 zone->wp_block = 0;
1415 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1418 set_bit(DMZ_OFFLINE, &zone->flags);
1420 set_bit(DMZ_READ_ONLY, &zone->flags);
1423 if (dmz_is_rnd(zone)) {
1425 if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1426 /* Primary super block zone */
1427 zmd->sb[0].zone = zone;
1434 * as metadata zone.
1436 set_bit(DMZ_META, &zone->flags);
1448 struct dm_zone *zone;
1450 zone = dmz_insert(zmd, idx, dev);
1451 if (IS_ERR(zone))
1452 return PTR_ERR(zone);
1453 set_bit(DMZ_CACHE, &zone->flags);
1454 zone->wp_block = 0;
1458 /* Disable runt zone */
1459 set_bit(DMZ_OFFLINE, &zone->flags);
1475 struct dm_zone *zone = xa_load(&zmd->zones, idx);
1477 kfree(zone);
1484 * Allocate and initialize zone descriptors using the zone
1503 /* Allocate zone array */
1526 DMDEBUG("(%s): Using %zu B for zone information",
1539 * Primary superblock zone is always at zone 0 when multiple
1542 zmd->sb[0].zone = dmz_get(zmd, 0);
1561 * Get zone information and initialize zone descriptors. At the same
1563 * first randomly writable zone.
1580 struct dm_zone *zone = data;
1582 clear_bit(DMZ_OFFLINE, &zone->flags);
1583 clear_bit(DMZ_READ_ONLY, &zone->flags);
1585 set_bit(DMZ_OFFLINE, &zone->flags);
1587 set_bit(DMZ_READ_ONLY, &zone->flags);
1589 if (dmz_is_seq(zone))
1590 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1592 zone->wp_block = 0;
1597 * Update a zone information.
1599 static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1601 struct dmz_dev *dev = zone->dev;
1609 * Get zone information from disk. Since blkdev_report_zones() uses
1615 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
1616 dmz_update_zone_cb, zone);
1622 dmz_dev_err(dev, "Get zone %u report failed",
1623 zone->id);
1632 * Check a zone write pointer position when the zone is marked
1636 struct dm_zone *zone)
1638 struct dmz_dev *dev = zone->dev;
1642 wp = zone->wp_block;
1643 ret = dmz_update_zone(zmd, zone);
1647 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
1648 zone->id, zone->wp_block, wp);
1650 if (zone->wp_block < wp) {
1651 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1652 wp - zone->wp_block);
1659 * Reset a zone write pointer.
1661 static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1669 if (dmz_is_offline(zone) ||
1670 dmz_is_readonly(zone) ||
1671 dmz_is_rnd(zone))
1674 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
1675 struct dmz_dev *dev = zone->dev;
1678 dmz_start_sect(zmd, zone),
1681 dmz_dev_err(dev, "Reset zone %u failed %d",
1682 zone->id, ret);
1688 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1689 zone->wp_block = 0;
1694 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1714 /* Get chunk mapping table blocks and initialize zone mapping */
1727 /* Check data zone */
1733 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
1740 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1755 /* Check buffer zone */
1761 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
1768 dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1773 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
1821 /* Unmapped data zone */
1863 * This rotates a zone at the end of its map list.
1865 static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1867 if (list_empty(&zone->link))
1870 list_del_init(&zone->link);
1871 if (dmz_is_seq(zone)) {
1872 /* LRU rotate sequential zone */
1873 list_add_tail(&zone->link, &zone->dev->map_seq_list);
1874 } else if (dmz_is_cache(zone)) {
1875 /* LRU rotate cache zone */
1876 list_add_tail(&zone->link, &zmd->map_cache_list);
1878 /* LRU rotate random zone */
1879 list_add_tail(&zone->link, &zone->dev->map_rnd_list);
1885 * in LRU order. This rotates a zone at the end of the list.
1887 static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1889 __dmz_lru_zone(zmd, zone);
1890 if (zone->bzone)
1891 __dmz_lru_zone(zmd, zone->bzone);
1895 * Wait for any zone to be freed.
1913 * Lock a zone for reclaim (set the zone RECLAIM bit).
1914 * Returns false if the zone cannot be locked or if it is already locked
1917 int dmz_lock_zone_reclaim(struct dm_zone *zone)
1920 if (dmz_is_active(zone))
1923 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1927 * Clear a zone reclaim flag.
1929 void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1931 WARN_ON(dmz_is_active(zone));
1932 WARN_ON(!dmz_in_reclaim(zone));
1934 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1936 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1940 * Wait for a zone reclaim to complete.
1942 static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1946 set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1947 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1948 clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
1954 * Select a cache or random write zone for reclaim.
1960 struct dm_zone *zone, *maxw_z = NULL;
1963 /* If we have cache zones select from the cache zone list */
1973 * Find the buffer zone with the heaviest weight or the first (oldest)
1974 * data zone that can be reclaimed.
1976 list_for_each_entry(zone, zone_list, link) {
1977 if (dmz_is_buf(zone)) {
1978 dzone = zone->bzone;
1984 dzone = zone;
1996 * first zone that can be reclaimed regardless of its weitght.
1998 list_for_each_entry(zone, zone_list, link) {
1999 if (dmz_is_buf(zone)) {
2000 dzone = zone->bzone;
2004 dzone = zone;
2013 * Select a buffered sequential zone for reclaim.
2018 struct dm_zone *zone;
2020 list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
2021 if (!zone->bzone)
2023 if (dmz_lock_zone_reclaim(zone))
2024 return zone;
2031 * Select a zone for reclaim.
2036 struct dm_zone *zone = NULL;
2039 * Search for a zone candidate to reclaim: 2 cases are possible.
2040 * (1) There is no free sequential zones. Then a random data zone
2041 * cannot be reclaimed. So choose a sequential zone to reclaim so
2042 * that afterward a random zone can be reclaimed.
2043 * (2) At least one free sequential zone is available, then choose
2044 * the oldest random zone (data or buffer) that can be locked.
2048 zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
2049 if (!zone)
2050 zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
2053 return zone;
2057 * Get the zone mapping a chunk, if the chunk is mapped already.
2058 * If no mapping exist and the operation is WRITE, a zone is
2060 * The zone returned will be set to the active state.
2084 /* Allocate a random zone */
2098 /* The chunk is already mapped: get the mapping zone */
2121 * If the zone is being reclaimed, the chunk mapping may change
2122 * to a different zone. So wait for reclaim and retry. Otherwise,
2123 * activate the zone (this will prevent reclaim from touching it).
2154 /* Empty buffer zone: reclaim it */
2161 /* Deactivate the data zone */
2166 /* Unbuffered inactive empty data zone: reclaim it */
2175 * Allocate and map a random zone to buffer a chunk
2176 * already mapped to a sequential zone.
2190 /* Allocate a random zone */
2219 * Get an unmapped (free) zone.
2226 struct dm_zone *zone;
2246 * No free zone: return NULL if this is for not reclaim.
2262 zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2264 if (zone) {
2265 list_del_init(&zone->link);
2268 return zone;
2271 zone = list_first_entry(list, struct dm_zone, link);
2272 list_del_init(&zone->link);
2274 if (dmz_is_cache(zone))
2276 else if (dmz_is_rnd(zone))
2277 atomic_dec(&zone->dev->unmap_nr_rnd);
2279 atomic_dec(&zone->dev->unmap_nr_seq);
2281 if (dmz_is_offline(zone)) {
2282 dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
2283 zone = NULL;
2286 if (dmz_is_meta(zone)) {
2287 dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
2288 zone = NULL;
2291 return zone;
2295 * Free a zone.
2298 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2300 /* If this is a sequential zone, reset it */
2301 if (dmz_is_seq(zone))
2302 dmz_reset_zone(zmd, zone);
2304 /* Return the zone to its type unmap list */
2305 if (dmz_is_cache(zone)) {
2306 list_add_tail(&zone->link, &zmd->unmap_cache_list);
2308 } else if (dmz_is_rnd(zone)) {
2309 list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
2310 atomic_inc(&zone->dev->unmap_nr_rnd);
2311 } else if (dmz_is_reserved(zone)) {
2312 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2315 list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
2316 atomic_inc(&zone->dev->unmap_nr_seq);
2323 * Map a chunk to a zone.
2342 * Unmap a zone.
2345 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2347 unsigned int chunk = zone->chunk;
2355 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2357 * Unmapping the chunk buffer zone: clear only
2360 dzone_id = zone->bzone->id;
2361 zone->bzone->bzone = NULL;
2362 zone->bzone = NULL;
2366 * Unmapping the chunk data zone: the zone must
2369 if (WARN_ON(zone->bzone)) {
2370 zone->bzone->bzone = NULL;
2371 zone->bzone = NULL;
2378 zone->chunk = DMZ_MAP_UNMAPPED;
2379 list_del_init(&zone->link);
2415 * Get the bitmap block storing the bit for chunk_block in zone.
2418 struct dm_zone *zone,
2422 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
2474 /* Get a valid region from the source zone */
2493 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2501 dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
2502 zone->id, (unsigned long long)chunk_block,
2509 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2528 if (likely(zone->weight + n <= zone_nr_blocks))
2529 zone->weight += n;
2532 zone->id, zone->weight,
2534 zone->weight = zone_nr_blocks;
2574 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2581 dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
2582 zone->id, (u64)chunk_block, nr_blocks);
2588 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2608 if (zone->weight >= n)
2609 zone->weight -= n;
2612 zone->id, zone->weight, n);
2613 zone->weight = 0;
2622 static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2631 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2648 static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2662 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2691 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2696 valid = dmz_test_block(zmd, zone, chunk_block);
2701 return dmz_to_next_set_block(zmd, zone, chunk_block,
2706 * Find the first valid block from @chunk_block in @zone.
2711 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2717 ret = dmz_to_next_set_block(zmd, zone, start_block,
2725 return dmz_to_next_set_block(zmd, zone, start_block,
2758 * Get a zone weight.
2760 static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2771 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2789 zone->weight = n;
2801 /* Release zone mapping resources */
2845 /* Free the zone descriptors */
2888 struct dm_zone *zone;
2916 /* Initialize zone descriptors */
2928 zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
2929 if (!zone) {
2931 "metadata zone %u not present", i);
2935 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
2937 "metadata zone %d is not random", i);
2941 set_bit(DMZ_META, &zone->flags);
2992 dmz_zmd_debug(zmd, " %u data zone mapping blocks",
3019 * Check zone information on resume.
3023 struct dm_zone *zone;
3030 zone = dmz_get(zmd, i);
3031 if (!zone) {
3032 dmz_zmd_err(zmd, "Unable to get zone %u", i);
3035 wp_block = zone->wp_block;
3037 ret = dmz_update_zone(zmd, zone);
3039 dmz_zmd_err(zmd, "Broken zone %u", i);
3043 if (dmz_is_offline(zone)) {
3049 if (!dmz_is_seq(zone))
3050 zone->wp_block = 0;
3051 else if (zone->wp_block != wp_block) {
3053 i, (u64)zone->wp_block, (u64)wp_block);
3054 zone->wp_block = wp_block;
3055 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
3056 zmd->zone_nr_blocks - zone->wp_block);