Lines Matching refs:cblock
743 * This assumes the cblock hasn't already been allocated.
1193 work.cblock = infer_cblock(mq, e);
1224 work.cblock = infer_cblock(mq, e);
1256 * We allocate the entry now to reserve the cblock. If the
1264 work.cblock = infer_cblock(mq, e);
1371 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1385 *cblock = infer_cblock(mq, e);
1406 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1415 r = __lookup(mq, oblock, cblock,
1424 dm_oblock_t oblock, dm_cblock_t *cblock,
1434 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1462 * case of promotion free the entry for the destination cblock.
1469 from_cblock(work->cblock));
1523 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1525 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1536 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1542 __smq_set_clear_dirty(mq, cblock, true);
1546 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1552 __smq_set_clear_dirty(mq, cblock, false);
1556 static unsigned random_level(dm_cblock_t cblock)
1558 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1562 dm_oblock_t oblock, dm_cblock_t cblock,
1568 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1571 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1583 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1586 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1598 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1601 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));