Lines Matching refs:ctx

1629 					    struct r5l_recovery_ctx *ctx)
1633 ctx->valid_pages = 0;
1634 ctx->total_pages = 0;
1635 while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
1640 ctx->ra_pool[ctx->total_pages] = page;
1641 ctx->total_pages += 1;
1644 if (ctx->total_pages == 0)
1647 ctx->pool_offset = 0;
1652 struct r5l_recovery_ctx *ctx)
1656 for (i = 0; i < ctx->total_pages; ++i)
1657 put_page(ctx->ra_pool[i]);
1661 * fetch ctx->valid_pages pages from offset
1662 * In normal cases, ctx->valid_pages == ctx->total_pages after the call.
1664 * ctx->valid_pages could be smaller than ctx->total_pages
1667 struct r5l_recovery_ctx *ctx,
1673 bio_init(&bio, log->rdev->bdev, ctx->ra_bvec,
1677 ctx->valid_pages = 0;
1678 ctx->pool_offset = offset;
1680 while (ctx->valid_pages < ctx->total_pages) {
1681 __bio_add_page(&bio, ctx->ra_pool[ctx->valid_pages], PAGE_SIZE,
1683 ctx->valid_pages += 1;
1701 struct r5l_recovery_ctx *ctx,
1707 if (offset < ctx->pool_offset ||
1708 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
1709 ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
1714 BUG_ON(offset < ctx->pool_offset ||
1715 offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
1718 page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
1725 struct r5l_recovery_ctx *ctx)
1727 struct page *page = ctx->meta_page;
1732 ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
1741 le64_to_cpu(mb->seq) != ctx->seq ||
1743 le64_to_cpu(mb->position) != ctx->pos)
1753 ctx->meta_total_blocks = BLOCK_SECTORS;
1805 struct r5l_recovery_ctx *ctx,
1816 r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
1819 ctx->meta_total_blocks += BLOCK_SECTORS;
1827 struct r5l_recovery_ctx *ctx,
1834 ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
1835 r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
1842 log, ctx, sh->dev[sh->qd_idx].page,
1864 struct r5l_recovery_ctx *ctx)
1914 ctx->data_parity_stripes++;
1950 struct r5l_recovery_ctx *ctx)
1963 struct r5l_recovery_ctx *ctx)
1969 r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx);
1978 struct r5l_recovery_ctx *ctx,
1985 r5l_recovery_read_page(log, ctx, page, log_offset);
1998 struct r5l_recovery_ctx *ctx)
2002 struct r5l_meta_block *mb = page_address(ctx->meta_page);
2004 sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2019 log, ctx, page, log_offset,
2024 log, ctx, page, log_offset,
2029 log, ctx, page,
2071 struct r5l_recovery_ctx *ctx,
2090 ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx);
2096 mb = page_address(ctx->meta_page);
2098 log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2145 cached_stripe_list, ctx);
2178 r5l_recovery_replay_one_stripe(conf, sh, ctx);
2181 r5l_recovery_load_data(log, sh, ctx, payload,
2184 r5l_recovery_load_parity(log, sh, ctx, payload,
2236 struct r5l_recovery_ctx *ctx)
2243 if (r5l_recovery_read_meta_block(log, ctx))
2246 ret = r5c_recovery_analyze_meta_block(log, ctx,
2247 &ctx->cached_list);
2254 ctx->seq++;
2255 ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
2259 r5c_recovery_drop_stripes(&ctx->cached_list, ctx);
2264 r5c_recovery_replay_stripes(&ctx->cached_list, ctx);
2267 list_for_each_entry(sh, &ctx->cached_list, lru) {
2270 ctx->data_only_stripes++;
2277 * we did a recovery. Now ctx.pos points to an invalid meta block. New
2306 * |- log->last_checkpoint |- ctx->pos
2307 * |- log->last_cp_seq |- ctx->seq
2316 * |- log->last_checkpoint |- ctx->pos+1
2317 * |- log->last_cp_seq |- ctx->seq+10001
2327 * |- log->last_checkpoint |- ctx->pos+n
2328 * |- log->last_cp_seq |- ctx->seq+10000+n
2339 * |- log->last_checkpoint |- ctx->pos+n
2340 * |- log->last_cp_seq |- ctx->seq+10000+n
2347 struct r5l_recovery_ctx *ctx)
2361 WARN_ON(list_empty(&ctx->cached_list));
2363 list_for_each_entry(sh, &ctx->cached_list, lru) {
2371 ctx->pos, ctx->seq);
2374 write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2405 sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
2407 sh->log_start = ctx->pos;
2410 ctx->pos = write_pos;
2411 ctx->seq += 1;
2420 struct r5l_recovery_ctx *ctx)
2427 if (ctx->data_only_stripes == 0)
2436 list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
2455 struct r5l_recovery_ctx *ctx;
2459 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2460 if (!ctx)
2463 ctx->pos = log->last_checkpoint;
2464 ctx->seq = log->last_cp_seq;
2465 INIT_LIST_HEAD(&ctx->cached_list);
2466 ctx->meta_page = alloc_page(GFP_KERNEL);
2468 if (!ctx->meta_page) {
2473 if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
2478 ret = r5c_recovery_flush_log(log, ctx);
2483 pos = ctx->pos;
2484 ctx->seq += 10000;
2486 if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
2491 mdname(mddev), ctx->data_only_stripes,
2492 ctx->data_parity_stripes);
2494 if (ctx->data_only_stripes == 0) {
2495 log->next_checkpoint = ctx->pos;
2496 r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
2497 ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
2498 } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
2505 log->log_start = ctx->pos;
2506 log->seq = ctx->seq;
2510 r5c_recovery_flush_data_only_stripes(log, ctx);
2513 r5l_recovery_free_ra_pool(log, ctx);
2515 __free_page(ctx->meta_page);
2517 kfree(ctx);