Lines Matching refs:mg

546 	struct dm_cache_migration *mg;
548 mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
550 memset(mg, 0, sizeof(*mg));
552 mg->cache = cache;
555 return mg;
558 static void free_migration(struct dm_cache_migration *mg)
560 struct cache *cache = mg->cache;
565 mempool_free(mg, &cache->migration_pool);
1137 static void quiesce(struct dm_cache_migration *mg,
1140 init_continuation(&mg->k, continuation);
1141 dm_cell_quiesce_v2(mg->cache->prison, mg->cell, &mg->k.ws);
1152 struct dm_cache_migration *mg = container_of(context, struct dm_cache_migration, k);
1155 mg->k.input = BLK_STS_IOERR;
1157 queue_continuation(mg->cache->wq, &mg->k);
1160 static void copy(struct dm_cache_migration *mg, bool promote)
1163 struct cache *cache = mg->cache;
1166 o_region.sector = from_oblock(mg->op->oblock) * cache->sectors_per_block;
1170 c_region.sector = from_cblock(mg->op->cblock) * cache->sectors_per_block;
1174 dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, &mg->k);
1176 dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, &mg->k);
1190 struct dm_cache_migration *mg = bio->bi_private;
1191 struct cache *cache = mg->cache;
1197 mg->k.input = bio->bi_status;
1199 queue_continuation(cache->wq, &mg->k);
1202 static void overwrite(struct dm_cache_migration *mg,
1205 struct bio *bio = mg->overwrite_bio;
1208 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
1214 if (mg->op->op == POLICY_PROMOTE)
1215 remap_to_cache(mg->cache, bio, mg->op->cblock);
1217 remap_to_origin(mg->cache, bio);
1219 init_continuation(&mg->k, continuation);
1220 accounted_request(mg->cache, bio);
1234 static void mg_complete(struct dm_cache_migration *mg, bool success)
1237 struct cache *cache = mg->cache;
1238 struct policy_work *op = mg->op;
1249 if (mg->overwrite_bio) {
1252 else if (mg->k.input)
1253 mg->overwrite_bio->bi_status = mg->k.input;
1255 mg->overwrite_bio->bi_status = BLK_STS_IOERR;
1256 bio_endio(mg->overwrite_bio);
1283 if (mg->cell) {
1284 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1285 free_prison_cell(cache, mg->cell);
1288 free_migration(mg);
1297 struct dm_cache_migration *mg = ws_to_mg(ws);
1298 mg_complete(mg, mg->k.input == 0);
1304 struct dm_cache_migration *mg = ws_to_mg(ws);
1305 struct cache *cache = mg->cache;
1306 struct policy_work *op = mg->op;
1316 mg_complete(mg, false);
1319 mg_complete(mg, true);
1329 mg_complete(mg, false);
1352 init_continuation(&mg->k, mg_success);
1353 continue_after_commit(&cache->committer, &mg->k);
1358 mg_complete(mg, true);
1365 struct dm_cache_migration *mg = ws_to_mg(ws);
1370 if (mg->k.input)
1371 mg_complete(mg, false);
1379 struct dm_cache_migration *mg = ws_to_mg(ws);
1384 if (mg->k.input)
1385 mg_complete(mg, false);
1391 r = dm_cell_lock_promote_v2(mg->cache->prison, mg->cell,
1394 mg_complete(mg, false);
1397 quiesce(mg, mg_update_metadata);
1406 struct dm_cache_migration *mg = ws_to_mg(ws);
1407 struct cache *cache = mg->cache;
1408 struct policy_work *op = mg->op;
1417 init_continuation(&mg->k, mg_upgrade_lock);
1418 copy(mg, is_policy_promote);
1423 struct dm_cache_migration *mg = ws_to_mg(ws);
1425 if (mg->overwrite_bio) {
1431 if (!optimisable_bio(mg->cache, mg->overwrite_bio, mg->op->oblock)) {
1435 bool rb = bio_detain_shared(mg->cache, mg->op->oblock, mg->overwrite_bio);
1437 mg->overwrite_bio = NULL;
1438 inc_io_migrations(mg->cache);
1450 overwrite(mg, mg_update_metadata_after_copy);
1456 static int mg_lock_writes(struct dm_cache_migration *mg)
1460 struct cache *cache = mg->cache;
1470 build_key(mg->op->oblock, oblock_succ(mg->op->oblock), &key);
1472 mg->overwrite_bio ? READ_WRITE_LOCK_LEVEL : WRITE_LOCK_LEVEL,
1473 prealloc, &mg->cell);
1476 mg_complete(mg, false);
1480 if (mg->cell != prealloc)
1484 mg_copy(&mg->k.ws);
1486 quiesce(mg, mg_copy);
1493 struct dm_cache_migration *mg;
1500 mg = alloc_migration(cache);
1502 mg->op = op;
1503 mg->overwrite_bio = bio;
1508 return mg_lock_writes(mg);
1515 static void invalidate_complete(struct dm_cache_migration *mg, bool success)
1518 struct cache *cache = mg->cache;
1521 if (dm_cell_unlock_v2(cache->prison, mg->cell, &bios))
1522 free_prison_cell(cache, mg->cell);
1524 if (!success && mg->overwrite_bio)
1525 bio_io_error(mg->overwrite_bio);
1527 free_migration(mg);
1535 struct dm_cache_migration *mg = ws_to_mg(ws);
1536 invalidate_complete(mg, !mg->k.input);
1565 struct dm_cache_migration *mg = ws_to_mg(ws);
1566 struct cache *cache = mg->cache;
1568 r = invalidate_cblock(cache, mg->invalidate_cblock);
1570 invalidate_complete(mg, false);
1574 init_continuation(&mg->k, invalidate_completed);
1575 continue_after_commit(&cache->committer, &mg->k);
1576 remap_to_origin_clear_discard(cache, mg->overwrite_bio, mg->invalidate_oblock);
1577 mg->overwrite_bio = NULL;
1581 static int invalidate_lock(struct dm_cache_migration *mg)
1585 struct cache *cache = mg->cache;
1590 build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
1592 READ_WRITE_LOCK_LEVEL, prealloc, &mg->cell);
1595 invalidate_complete(mg, false);
1599 if (mg->cell != prealloc)
1603 quiesce(mg, invalidate_remove);
1610 init_continuation(&mg->k, invalidate_remove);
1611 queue_work(cache->wq, &mg->k.ws);
1620 struct dm_cache_migration *mg;
1625 mg = alloc_migration(cache);
1627 mg->overwrite_bio = bio;
1628 mg->invalidate_cblock = cblock;
1629 mg->invalidate_oblock = oblock;
1631 return invalidate_lock(mg);