Lines Matching refs:ctx

1633 					    struct r5l_recovery_ctx *ctx)
1637 ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, &log->bs);
1638 if (!ctx->ra_bio)
1641 ctx->valid_pages = 0;
1642 ctx->total_pages = 0;
1643 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1648 ctx->ra_pool[ctx->total_pages] = page;
1649 ctx->total_pages += 1;
1652 if (ctx->total_pages == 0) {
1653 bio_put(ctx->ra_bio);
1657 ctx->pool_offset = 0;
1662 struct r5l_recovery_ctx *ctx)
1666 for (i = 0; i < ctx->total_pages; ++i)
1667 put_page(ctx->ra_pool[i]);
1668 bio_put(ctx->ra_bio);
1672 * fetch ctx->valid_pages pages from offset
1673 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1675 * ctx->valid_pages could be smaller than ctx->total_pages
1678 struct r5l_recovery_ctx *ctx,
1681 bio_reset(ctx->ra_bio);
1682 bio_set_dev(ctx->ra_bio, log->rdev->bdev);
1683 bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
1684 ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
1686 ctx->valid_pages = 0;
1687 ctx->pool_offset = offset;
1689 while (ctx->valid_pages < ctx->total_pages) {
1690 bio_add_page(ctx->ra_bio,
1691 ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
1692 ctx->valid_pages += 1;
1700 return submit_bio_wait(ctx->ra_bio);
1708 struct r5l_recovery_ctx *ctx,
1714 if (offset < ctx->pool_offset ||
1715 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1716 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1721 BUG_ON(offset < ctx->pool_offset ||
1722 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1725 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1732 struct r5l_recovery_ctx *ctx)
1734 struct page *page = ctx->meta_page;
1739 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1748 le64_to_cpu(mb->seq) != ctx->seq ||
1750 le64_to_cpu(mb->position) != ctx->pos)
1760 ctx->meta_total_blocks = BLOCK_SECTORS;
1812 struct r5l_recovery_ctx *ctx,
1823 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1826 ctx->meta_total_blocks += BLOCK_SECTORS;
1834 struct r5l_recovery_ctx *ctx,
1841 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1842 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1849 log, ctx, sh->dev[sh->qd_idx].page,
1871 struct r5l_recovery_ctx *ctx)
1921 ctx->data_parity_stripes++;
1956 struct r5l_recovery_ctx *ctx)
1969 struct r5l_recovery_ctx *ctx)
1975 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1984 struct r5l_recovery_ctx *ctx,
1991 r5l_recovery_read_page(log, ctx, page, log_offset);
2004 struct r5l_recovery_ctx *ctx)
2008 struct r5l_meta_block *mb = page_address(ctx->meta_page);
2010 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2025 log, ctx, page, log_offset,
2030 log, ctx, page, log_offset,
2035 log, ctx, page,
2077 struct r5l_recovery_ctx *ctx,
2096 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2102 mb = page_address(ctx->meta_page);
2104 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2151 cached_stripe_list, ctx);
2184 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2187 r5l_recovery_load_data(log, sh, ctx, payload,
2190 r5l_recovery_load_parity(log, sh, ctx, payload,
2242 struct r5l_recovery_ctx *ctx)
2249 if (r5l_recovery_read_meta_block(log, ctx))
2252 ret = r5c_recovery_analyze_meta_block(log, ctx,
2253 &ctx->cached_list);
2260 ctx->seq++;
2261 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2265 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2270 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2273 list_for_each_entry(sh, &ctx->cached_list, lru) {
2276 ctx->data_only_stripes++;
2283 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2312 * |- log->last_checkpoint |- ctx->pos
2313 * |- log->last_cp_seq |- ctx->seq
2322 * |- log->last_checkpoint |- ctx->pos+1
2323 * |- log->last_cp_seq |- ctx->seq+10001
2333 * |- log->last_checkpoint |- ctx->pos+n
2334 * |- log->last_cp_seq |- ctx->seq+10000+n
2345 * |- log->last_checkpoint |- ctx->pos+n
2346 * |- log->last_cp_seq |- ctx->seq+10000+n
2353 struct r5l_recovery_ctx *ctx)
2367 WARN_ON(list_empty(&ctx->cached_list));
2369 list_for_each_entry(sh, &ctx->cached_list, lru) {
2377 ctx->pos, ctx->seq);
2380 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2411 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2413 sh->log_start = ctx->pos;
2416 ctx->pos = write_pos;
2417 ctx->seq += 1;
2426 struct r5l_recovery_ctx *ctx)
2433 if (ctx->data_only_stripes == 0)
2442 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2461 struct r5l_recovery_ctx *ctx;
2465 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2466 if (!ctx)
2469 ctx->pos = log->last_checkpoint;
2470 ctx->seq = log->last_cp_seq;
2471 INIT_LIST_HEAD(&ctx->cached_list);
2472 ctx->meta_page = alloc_page(GFP_KERNEL);
2474 if (!ctx->meta_page) {
2479 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2484 ret = r5c_recovery_flush_log(log, ctx);
2489 pos = ctx->pos;
2490 ctx->seq += 10000;
2492 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2497 mdname(mddev), ctx->data_only_stripes,
2498 ctx->data_parity_stripes);
2500 if (ctx->data_only_stripes == 0) {
2501 log->next_checkpoint = ctx->pos;
2502 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2503 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2504 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2511 log->log_start = ctx->pos;
2512 log->seq = ctx->seq;
2516 r5c_recovery_flush_data_only_stripes(log, ctx);
2519 r5l_recovery_free_ra_pool(log, ctx);
2521 __free_page(ctx->meta_page);
2523 kfree(ctx);