Lines Matching defs:sctx

86 	struct scrub_ctx	*sctx;
107 struct scrub_ctx *sctx;
124 struct scrub_ctx *sctx;
210 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
211 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
236 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
238 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
250 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
252 static void scrub_wr_submit(struct scrub_ctx *sctx);
257 static void scrub_put_ctx(struct scrub_ctx *sctx);
265 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
267 refcount_inc(&sctx->refs);
268 atomic_inc(&sctx->bios_in_flight);
271 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
273 atomic_dec(&sctx->bios_in_flight);
274 wake_up(&sctx->list_wait);
275 scrub_put_ctx(sctx);
527 static void scrub_free_csums(struct scrub_ctx *sctx)
529 while (!list_empty(&sctx->csum_list)) {
531 sum = list_first_entry(&sctx->csum_list,
538 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
542 if (!sctx)
546 if (sctx->curr != -1) {
547 struct scrub_bio *sbio = sctx->bios[sctx->curr];
557 struct scrub_bio *sbio = sctx->bios[i];
564 kfree(sctx->wr_curr_bio);
565 scrub_free_csums(sctx);
566 kfree(sctx);
569 static void scrub_put_ctx(struct scrub_ctx *sctx)
571 if (refcount_dec_and_test(&sctx->refs))
572 scrub_free_ctx(sctx);
578 struct scrub_ctx *sctx;
581 sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
582 if (!sctx)
584 refcount_set(&sctx->refs, 1);
585 sctx->is_dev_replace = is_dev_replace;
586 sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
587 sctx->curr = -1;
588 sctx->fs_info = fs_info;
589 INIT_LIST_HEAD(&sctx->csum_list);
596 sctx->bios[i] = sbio;
599 sbio->sctx = sctx;
605 sctx->bios[i]->next_free = i + 1;
607 sctx->bios[i]->next_free = -1;
609 sctx->first_free = 0;
610 atomic_set(&sctx->bios_in_flight, 0);
611 atomic_set(&sctx->workers_pending, 0);
612 atomic_set(&sctx->cancel_req, 0);
613 sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
615 spin_lock_init(&sctx->list_lock);
616 spin_lock_init(&sctx->stat_lock);
617 init_waitqueue_head(&sctx->list_wait);
619 WARN_ON(sctx->wr_curr_bio != NULL);
620 mutex_init(&sctx->wr_lock);
621 sctx->wr_curr_bio = NULL;
624 sctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO;
625 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
626 sctx->flush_all_writes = false;
629 return sctx;
632 scrub_free_ctx(sctx);
747 fs_info = sblock->sctx->fs_info;
823 struct scrub_ctx *sctx = sblock_to_check->sctx;
842 fs_info = sctx->fs_info;
849 spin_lock(&sctx->stat_lock);
850 ++sctx->stat.super_errors;
851 spin_unlock(&sctx->stat_lock);
882 spin_lock(&sctx->stat_lock);
884 sctx->stat.malloc_errors++;
885 sctx->stat.read_errors++;
886 sctx->stat.uncorrectable_errors++;
887 spin_unlock(&sctx->stat_lock);
923 spin_lock(&sctx->stat_lock);
924 sctx->stat.malloc_errors++;
925 sctx->stat.read_errors++;
926 sctx->stat.uncorrectable_errors++;
927 spin_unlock(&sctx->stat_lock);
935 spin_lock(&sctx->stat_lock);
936 sctx->stat.read_errors++;
937 sctx->stat.uncorrectable_errors++;
938 spin_unlock(&sctx->stat_lock);
958 spin_lock(&sctx->stat_lock);
959 sctx->stat.unverified_errors++;
961 spin_unlock(&sctx->stat_lock);
963 if (sctx->is_dev_replace)
969 spin_lock(&sctx->stat_lock);
970 sctx->stat.read_errors++;
971 spin_unlock(&sctx->stat_lock);
976 spin_lock(&sctx->stat_lock);
977 sctx->stat.csum_errors++;
978 spin_unlock(&sctx->stat_lock);
984 spin_lock(&sctx->stat_lock);
985 sctx->stat.verify_errors++;
986 spin_unlock(&sctx->stat_lock);
998 if (sctx->readonly) {
999 ASSERT(!sctx->is_dev_replace);
1053 if (sctx->is_dev_replace) {
1065 if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
1099 if (!page_bad->io_error && !sctx->is_dev_replace)
1128 if (sctx->is_dev_replace) {
1156 if (success && !sctx->is_dev_replace) {
1176 spin_lock(&sctx->stat_lock);
1177 sctx->stat.corrected_errors++;
1179 spin_unlock(&sctx->stat_lock);
1186 spin_lock(&sctx->stat_lock);
1187 sctx->stat.uncorrectable_errors++;
1188 spin_unlock(&sctx->stat_lock);
1268 struct scrub_ctx *sctx = original_sblock->sctx;
1269 struct btrfs_fs_info *fs_info = sctx->fs_info;
1331 sblock->sctx = sctx;
1336 spin_lock(&sctx->stat_lock);
1337 sctx->stat.malloc_errors++;
1338 spin_unlock(&sctx->stat_lock);
1352 sctx->csum_size);
1553 struct btrfs_fs_info *fs_info = sblock_bad->sctx->fs_info;
1594 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
1622 return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1625 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1631 mutex_lock(&sctx->wr_lock);
1633 if (!sctx->wr_curr_bio) {
1634 sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
1636 if (!sctx->wr_curr_bio) {
1637 mutex_unlock(&sctx->wr_lock);
1640 sctx->wr_curr_bio->sctx = sctx;
1641 sctx->wr_curr_bio->page_count = 0;
1643 sbio = sctx->wr_curr_bio;
1649 sbio->dev = sctx->wr_tgtdev;
1652 bio = btrfs_io_bio_alloc(sctx->pages_per_wr_bio);
1666 scrub_wr_submit(sctx);
1675 mutex_unlock(&sctx->wr_lock);
1678 scrub_wr_submit(sctx);
1685 if (sbio->page_count == sctx->pages_per_wr_bio)
1686 scrub_wr_submit(sctx);
1687 mutex_unlock(&sctx->wr_lock);
1692 static void scrub_wr_submit(struct scrub_ctx *sctx)
1696 if (!sctx->wr_curr_bio)
1699 sbio = sctx->wr_curr_bio;
1700 sctx->wr_curr_bio = NULL;
1702 scrub_pending_bio_inc(sctx);
1725 struct scrub_ctx *sctx = sbio->sctx;
1731 &sbio->sctx->fs_info->dev_replace;
1746 scrub_pending_bio_dec(sctx);
1785 struct scrub_ctx *sctx = sblock->sctx;
1786 struct btrfs_fs_info *fs_info = sctx->fs_info;
1803 if (memcmp(csum, spage->csum, sctx->csum_size))
1811 struct scrub_ctx *sctx = sblock->sctx;
1813 struct btrfs_fs_info *fs_info = sctx->fs_info;
1817 const int num_pages = sctx->fs_info->nodesize >> PAGE_SHIFT;
1826 memcpy(on_disk_csum, h->csum, sctx->csum_size);
1859 if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1868 struct scrub_ctx *sctx = sblock->sctx;
1869 struct btrfs_fs_info *fs_info = sctx->fs_info;
1896 if (memcmp(calculated_csum, s->csum, sctx->csum_size))
1905 spin_lock(&sctx->stat_lock);
1906 ++sctx->stat.super_errors;
1907 spin_unlock(&sctx->stat_lock);
1952 static void scrub_submit(struct scrub_ctx *sctx)
1956 if (sctx->curr == -1)
1959 sbio = sctx->bios[sctx->curr];
1960 sctx->curr = -1;
1961 scrub_pending_bio_inc(sctx);
1965 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
1976 while (sctx->curr == -1) {
1977 spin_lock(&sctx->list_lock);
1978 sctx->curr = sctx->first_free;
1979 if (sctx->curr != -1) {
1980 sctx->first_free = sctx->bios[sctx->curr]->next_free;
1981 sctx->bios[sctx->curr]->next_free = -1;
1982 sctx->bios[sctx->curr]->page_count = 0;
1983 spin_unlock(&sctx->list_lock);
1985 spin_unlock(&sctx->list_lock);
1986 wait_event(sctx->list_wait, sctx->first_free != -1);
1989 sbio = sctx->bios[sctx->curr];
1998 bio = btrfs_io_bio_alloc(sctx->pages_per_rd_bio);
2013 scrub_submit(sctx);
2025 scrub_submit(sctx);
2032 if (sbio->page_count == sctx->pages_per_rd_bio)
2033 scrub_submit(sctx);
2041 struct btrfs_fs_info *fs_info = sblock->sctx->fs_info;
2054 struct scrub_ctx *sctx = sblock->sctx;
2055 struct btrfs_fs_info *fs_info = sctx->fs_info;
2066 spin_lock(&sctx->stat_lock);
2067 sctx->stat.read_errors++;
2068 spin_unlock(&sctx->stat_lock);
2073 spin_lock(&sctx->stat_lock);
2074 sctx->stat.uncorrectable_errors++;
2075 spin_unlock(&sctx->stat_lock);
2083 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2084 mutex_lock(&sctx->wr_lock);
2085 scrub_wr_submit(sctx);
2086 mutex_unlock(&sctx->wr_lock);
2090 scrub_pending_bio_dec(sctx);
2095 struct scrub_ctx *sctx = sblock->sctx;
2096 struct btrfs_fs_info *fs_info = sctx->fs_info;
2111 if (WARN_ON(!sctx->is_dev_replace ||
2139 scrub_pending_bio_inc(sctx);
2148 spin_lock(&sctx->stat_lock);
2149 sctx->stat.malloc_errors++;
2150 spin_unlock(&sctx->stat_lock);
2153 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2163 spin_lock(&sctx->stat_lock);
2164 sctx->stat.malloc_errors++;
2165 spin_unlock(&sctx->stat_lock);
2172 sblock->sctx = sctx;
2182 spin_lock(&sctx->stat_lock);
2183 sctx->stat.malloc_errors++;
2184 spin_unlock(&sctx->stat_lock);
2201 memcpy(spage->csum, csum, sctx->csum_size);
2227 ret = scrub_add_page_to_rd_bio(sctx, spage);
2235 scrub_submit(sctx);
2257 struct scrub_ctx *sctx = sbio->sctx;
2282 spin_lock(&sctx->list_lock);
2283 sbio->next_free = sctx->first_free;
2284 sctx->first_free = sbio->index;
2285 spin_unlock(&sctx->list_lock);
2287 if (sctx->is_dev_replace && sctx->flush_all_writes) {
2288 mutex_lock(&sctx->wr_lock);
2289 scrub_wr_submit(sctx);
2290 mutex_unlock(&sctx->wr_lock);
2293 scrub_pending_bio_dec(sctx);
2303 int sectorsize = sparity->sctx->fs_info->sectorsize;
2353 if (!corrupted && sblock->sctx->is_dev_replace)
2367 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum)
2373 while (!list_empty(&sctx->csum_list)) {
2374 sum = list_first_entry(&sctx->csum_list,
2381 ++sctx->stat.csum_discards;
2389 index = div_u64(logical - sum->bytenr, sctx->fs_info->sectorsize);
2392 num_sectors = sum->len / sctx->fs_info->sectorsize;
2393 memcpy(csum, sum->sums + index * sctx->csum_size, sctx->csum_size);
2402 static int scrub_extent(struct scrub_ctx *sctx, struct map_lookup *map,
2415 blocksize = sctx->fs_info->sectorsize;
2416 spin_lock(&sctx->stat_lock);
2417 sctx->stat.data_extents_scrubbed++;
2418 sctx->stat.data_bytes_scrubbed += len;
2419 spin_unlock(&sctx->stat_lock);
2424 blocksize = sctx->fs_info->nodesize;
2425 spin_lock(&sctx->stat_lock);
2426 sctx->stat.tree_extents_scrubbed++;
2427 sctx->stat.tree_bytes_scrubbed += len;
2428 spin_unlock(&sctx->stat_lock);
2430 blocksize = sctx->fs_info->sectorsize;
2440 have_csum = scrub_find_csum(sctx, logical, csum);
2442 ++sctx->stat.no_csum;
2444 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2462 struct scrub_ctx *sctx = sparity->sctx;
2468 spin_lock(&sctx->stat_lock);
2469 sctx->stat.malloc_errors++;
2470 spin_unlock(&sctx->stat_lock);
2477 sblock->sctx = sctx;
2489 spin_lock(&sctx->stat_lock);
2490 sctx->stat.malloc_errors++;
2491 spin_unlock(&sctx->stat_lock);
2511 memcpy(spage->csum, csum, sctx->csum_size);
2529 ret = scrub_add_page_to_rd_bio(sctx, spage);
2546 struct scrub_ctx *sctx = sparity->sctx;
2561 blocksize = sctx->fs_info->sectorsize;
2571 have_csum = scrub_find_csum(sctx, logical, csum);
2634 struct scrub_ctx *sctx = sparity->sctx;
2640 spin_lock(&sctx->stat_lock);
2641 sctx->stat.read_errors += nbits;
2642 sctx->stat.uncorrectable_errors += nbits;
2643 spin_unlock(&sctx->stat_lock);
2658 struct scrub_ctx *sctx = sparity->sctx;
2661 scrub_pending_bio_dec(sctx);
2667 struct btrfs_fs_info *fs_info = sparity->sctx->fs_info;
2682 struct scrub_ctx *sctx = sparity->sctx;
2683 struct btrfs_fs_info *fs_info = sctx->fs_info;
2714 scrub_pending_bio_inc(sctx);
2725 spin_lock(&sctx->stat_lock);
2726 sctx->stat.malloc_errors++;
2727 spin_unlock(&sctx->stat_lock);
2750 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2757 struct btrfs_fs_info *fs_info = sctx->fs_info;
2784 spin_lock(&sctx->stat_lock);
2785 sctx->stat.malloc_errors++;
2786 spin_unlock(&sctx->stat_lock);
2792 sparity->sctx = sctx;
2877 spin_lock(&sctx->stat_lock);
2878 sctx->stat.uncorrectable_errors++;
2879 spin_unlock(&sctx->stat_lock);
2920 &sctx->csum_list, 1);
2931 scrub_free_csums(sctx);
2966 scrub_submit(sctx);
2967 mutex_lock(&sctx->wr_lock);
2968 scrub_wr_submit(sctx);
2969 mutex_unlock(&sctx->wr_lock);
2975 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2982 struct btrfs_fs_info *fs_info = sctx->fs_info;
3074 wait_event(sctx->list_wait,
3075 atomic_read(&sctx->bios_in_flight) == 0);
3116 atomic_read(&sctx->cancel_req)) {
3125 sctx->flush_all_writes = true;
3126 scrub_submit(sctx);
3127 mutex_lock(&sctx->wr_lock);
3128 scrub_wr_submit(sctx);
3129 mutex_unlock(&sctx->wr_lock);
3130 wait_event(sctx->list_wait,
3131 atomic_read(&sctx->bios_in_flight) == 0);
3132 sctx->flush_all_writes = false;
3145 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3243 spin_lock(&sctx->stat_lock);
3244 sctx->stat.uncorrectable_errors++;
3245 spin_unlock(&sctx->stat_lock);
3269 if (sctx->is_dev_replace)
3279 &sctx->csum_list, 1);
3284 ret = scrub_extent(sctx, map, extent_logical, extent_len,
3289 scrub_free_csums(sctx);
3312 ret = scrub_raid56_parity(sctx,
3341 spin_lock(&sctx->stat_lock);
3343 sctx->stat.last_physical = map->stripes[num].physical +
3346 sctx->stat.last_physical = physical;
3347 spin_unlock(&sctx->stat_lock);
3353 scrub_submit(sctx);
3354 mutex_lock(&sctx->wr_lock);
3355 scrub_wr_submit(sctx);
3356 mutex_unlock(&sctx->wr_lock);
3364 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3370 struct btrfs_fs_info *fs_info = sctx->fs_info;
3404 ret = scrub_stripe(sctx, map, scrub_dev, i,
3417 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3422 struct btrfs_fs_info *fs_info = sctx->fs_info;
3559 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
3562 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
3599 if (sctx->is_dev_replace) {
3612 ret = scrub_chunk(sctx, scrub_dev, chunk_offset, length,
3625 sctx->flush_all_writes = true;
3626 scrub_submit(sctx);
3627 mutex_lock(&sctx->wr_lock);
3628 scrub_wr_submit(sctx);
3629 mutex_unlock(&sctx->wr_lock);
3631 wait_event(sctx->list_wait,
3632 atomic_read(&sctx->bios_in_flight) == 0);
3641 wait_event(sctx->list_wait,
3642 atomic_read(&sctx->workers_pending) == 0);
3643 sctx->flush_all_writes = false;
3679 if (sctx->is_dev_replace &&
3684 if (sctx->stat.malloc_errors > 0) {
3698 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3705 struct btrfs_fs_info *fs_info = sctx->fs_info;
3722 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3728 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3817 struct scrub_ctx *sctx;
3864 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
3865 if (IS_ERR(sctx))
3866 return PTR_ERR(sctx);
3912 sctx->readonly = readonly;
3913 dev->scrub_ctx = sctx;
3937 spin_lock(&sctx->stat_lock);
3938 old_super_errors = sctx->stat.super_errors;
3939 spin_unlock(&sctx->stat_lock);
3947 ret = scrub_supers(sctx, dev);
3950 spin_lock(&sctx->stat_lock);
3956 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
3958 spin_unlock(&sctx->stat_lock);
3962 ret = scrub_enumerate_chunks(sctx, dev, start, end);
3965 wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3969 wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3972 memcpy(progress, &sctx->stat, sizeof(*progress));
3983 scrub_put_ctx(sctx);
4008 scrub_free_ctx(sctx);
4058 struct scrub_ctx *sctx;
4061 sctx = dev->scrub_ctx;
4062 if (!sctx) {
4066 atomic_inc(&sctx->cancel_req);
4082 struct scrub_ctx *sctx = NULL;
4087 sctx = dev->scrub_ctx;
4088 if (sctx)
4089 memcpy(progress, &sctx->stat, sizeof(*progress));
4092 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;