Lines Matching refs:fs_info

188 	struct btrfs_fs_info	*fs_info;
250 static int init_scrub_stripe(struct btrfs_fs_info *fs_info,
257 stripe->nr_sectors = BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits;
275 stripe->csums = kcalloc(BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits,
276 fs_info->csum_size, GFP_KERNEL);
292 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
294 while (atomic_read(&fs_info->scrub_pause_req)) {
295 mutex_unlock(&fs_info->scrub_lock);
296 wait_event(fs_info->scrub_pause_wait,
297 atomic_read(&fs_info->scrub_pause_req) == 0);
298 mutex_lock(&fs_info->scrub_lock);
302 static void scrub_pause_on(struct btrfs_fs_info *fs_info)
304 atomic_inc(&fs_info->scrubs_paused);
305 wake_up(&fs_info->scrub_pause_wait);
308 static void scrub_pause_off(struct btrfs_fs_info *fs_info)
310 mutex_lock(&fs_info->scrub_lock);
311 __scrub_blocked_if_needed(fs_info);
312 atomic_dec(&fs_info->scrubs_paused);
313 mutex_unlock(&fs_info->scrub_lock);
315 wake_up(&fs_info->scrub_pause_wait);
318 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
320 scrub_pause_on(fs_info);
321 scrub_pause_off(fs_info);
344 struct btrfs_fs_info *fs_info, int is_dev_replace)
357 sctx->fs_info = fs_info;
365 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
378 WARN_ON(!fs_info->dev_replace.tgtdev);
379 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
399 struct btrfs_fs_info *fs_info = swarn->dev->fs_info;
404 local_root = btrfs_get_fs_root(fs_info, root, true);
454 btrfs_warn_in_rcu(fs_info,
460 fs_info->sectorsize, nlink,
468 btrfs_warn_in_rcu(fs_info,
482 struct btrfs_fs_info *fs_info = dev->fs_info;
494 btrfs_warn_in_rcu(fs_info, "%s on device %s, physical %llu",
507 ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
528 btrfs_warn(fs_info,
535 btrfs_warn_in_rcu(fs_info,
549 ctx.fs_info = fs_info;
566 if (!btrfs_is_zoned(sctx->fs_info))
585 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
586 int page_index = (sector_nr << fs_info->sectorsize_bits) >> PAGE_SHIFT;
594 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
596 return offset_in_page(sector_nr << fs_info->sectorsize_bits);
601 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
602 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
603 const u64 logical = stripe->logical + (sector_nr << fs_info->sectorsize_bits);
606 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
617 memcpy(on_disk_csum, header->csum, fs_info->csum_size);
622 btrfs_warn_rl(fs_info,
628 if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
632 btrfs_warn_rl(fs_info,
635 header->fsid, fs_info->fs_devices->fsid);
638 if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
642 btrfs_warn_rl(fs_info,
645 header->chunk_tree_uuid, fs_info->chunk_tree_uuid);
650 shash->tfm = fs_info->csum_shash;
653 BTRFS_CSUM_SIZE, fs_info->sectorsize - BTRFS_CSUM_SIZE);
660 fs_info->sectorsize);
664 if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
667 btrfs_warn_rl(fs_info,
670 CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
671 CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum));
678 btrfs_warn_rl(fs_info,
692 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
694 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
721 btrfs_warn_rl(fs_info,
724 (sector_nr << fs_info->sectorsize_bits),
741 ret = btrfs_check_sector_csum(fs_info, page, pgoff, csum_buf, sector->csum);
754 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
755 const u32 sectors_per_tree = fs_info->nodesize >> fs_info->sectorsize_bits;
787 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
800 bio_size >> fs_info->sectorsize_bits);
802 bio_size >> fs_info->sectorsize_bits);
805 bio_size >> fs_info->sectorsize_bits);
821 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
850 fs_info, scrub_repair_read_endio, stripe);
852 (i << fs_info->sectorsize_bits)) >> SECTOR_SHIFT;
855 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
856 ASSERT(ret == fs_info->sectorsize);
872 struct btrfs_fs_info *fs_info = sctx->fs_info;
891 u64 mapped_len = fs_info->sectorsize;
898 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
940 btrfs_err_rl_in_rcu(fs_info,
945 btrfs_err_rl_in_rcu(fs_info,
954 btrfs_err_rl_in_rcu(fs_info,
959 btrfs_err_rl_in_rcu(fs_info,
981 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
982 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
1013 struct btrfs_fs_info *fs_info = sctx->fs_info;
1014 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1071 fs_info->sectorsize, true);
1082 if (btrfs_is_zoned(fs_info)) {
1084 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1111 num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
1123 queue_work(stripe->bg->fs_info->scrub_workers, &stripe->work);
1130 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1144 bio_size >> fs_info->sectorsize_bits);
1157 struct btrfs_fs_info *fs_info = sctx->fs_info;
1165 if (!btrfs_is_zoned(fs_info))
1177 if (!test_bit(bio_off >> fs_info->sectorsize_bits,
1198 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1217 fs_info, scrub_write_endio, stripe);
1219 (sector_nr << fs_info->sectorsize_bits)) >>
1222 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1223 ASSERT(ret == fs_info->sectorsize);
1337 struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
1345 len = fs_info->nodesize;
1376 struct btrfs_fs_info *fs_info = extent_root->fs_info;
1384 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1447 *size_ret = path->nodes[0]->fs_info->nodesize;
1458 struct btrfs_fs_info *fs_info = sctx->fs_info;
1461 if (!btrfs_is_zoned(fs_info))
1470 btrfs_err(fs_info,
1479 static void fill_one_extent_info(struct btrfs_fs_info *fs_info,
1487 cur_logical += fs_info->sectorsize) {
1489 fs_info->sectorsize_bits;
1529 struct btrfs_fs_info *fs_info = bg->fs_info;
1530 struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start);
1531 struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start);
1576 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1596 fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
1613 ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1625 sector_nr * fs_info->csum_size;
1651 struct btrfs_fs_info *fs_info = sctx->fs_info;
1655 fs_info->sectorsize_bits;
1662 bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
1672 ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
1674 ASSERT(ret == fs_info->sectorsize);
1683 (fs_info->dev_replace.cont_reading_from_srcdev_mode ==
1686 int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
1700 struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
1702 btrfs_err(fs_info,
1705 stripe->logical + (i << fs_info->sectorsize_bits));
1736 struct btrfs_fs_info *fs_info = sctx->fs_info;
1777 ASSERT(stripe->dev == fs_info->dev_replace.srcdev);
1850 struct btrfs_fs_info *fs_info = sctx->fs_info;
1932 ASSERT(!btrfs_is_zoned(sctx->fs_info));
1953 btrfs_err(fs_info,
1970 btrfs_bio_counter_inc_blocked(fs_info);
1971 ret = btrfs_map_block(fs_info, BTRFS_MAP_WRITE, full_stripe_start,
1975 btrfs_bio_counter_dec(fs_info);
1979 BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
1983 btrfs_bio_counter_dec(fs_info);
1997 btrfs_bio_counter_dec(fs_info);
2020 struct btrfs_fs_info *fs_info = sctx->fs_info;
2034 if (atomic_read(&fs_info->scrub_cancel_req) ||
2040 if (atomic_read(&fs_info->scrub_pause_req)) {
2042 scrub_blocked_if_needed(fs_info);
2151 struct btrfs_fs_info *fs_info = sctx->fs_info;
2172 scrub_blocked_if_needed(fs_info);
2193 ret = init_scrub_stripe(fs_info,
2320 struct btrfs_fs_info *fs_info = sctx->fs_info;
2321 struct extent_map_tree *map_tree = &fs_info->mapping_tree;
2366 struct btrfs_fs_info *fs_info = cache->fs_info;
2369 if (!btrfs_is_zoned(fs_info))
2374 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start, cache->length);
2388 struct btrfs_fs_info *fs_info = sctx->fs_info;
2389 struct btrfs_root *root = fs_info->dev_root;
2398 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
2462 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
2494 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2526 scrub_pause_on(fs_info);
2563 scrub_pause_off(fs_info);
2588 btrfs_warn(fs_info,
2591 scrub_pause_off(fs_info);
2595 btrfs_warn(fs_info,
2599 scrub_pause_off(fs_info);
2610 btrfs_wait_ordered_roots(fs_info, U64_MAX, cache->start,
2614 scrub_pause_off(fs_info);
2647 if (btrfs_test_opt(fs_info, DISCARD_ASYNC))
2648 btrfs_discard_queue_work(&fs_info->discard_ctl,
2682 struct btrfs_fs_info *fs_info = sctx->fs_info;
2696 ret = btrfs_check_super_csum(fs_info, sb);
2698 btrfs_err_rl(fs_info,
2704 btrfs_err_rl(fs_info,
2711 return btrfs_validate_super(fs_info, sb, -1);
2722 struct btrfs_fs_info *fs_info = sctx->fs_info;
2724 if (BTRFS_FS_ERROR(fs_info))
2736 if (scrub_dev->fs_devices != fs_info->fs_devices)
2739 gen = fs_info->last_trans_committed;
2760 static void scrub_workers_put(struct btrfs_fs_info *fs_info)
2762 if (refcount_dec_and_mutex_lock(&fs_info->scrub_workers_refcnt,
2763 &fs_info->scrub_lock)) {
2764 struct workqueue_struct *scrub_workers = fs_info->scrub_workers;
2766 fs_info->scrub_workers = NULL;
2767 mutex_unlock(&fs_info->scrub_lock);
2775 * get a reference count on fs_info->scrub_workers. start worker if necessary
2777 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info)
2781 int max_active = fs_info->thread_pool_size;
2784 if (refcount_inc_not_zero(&fs_info->scrub_workers_refcnt))
2791 mutex_lock(&fs_info->scrub_lock);
2792 if (refcount_read(&fs_info->scrub_workers_refcnt) == 0) {
2793 ASSERT(fs_info->scrub_workers == NULL);
2794 fs_info->scrub_workers = scrub_workers;
2795 refcount_set(&fs_info->scrub_workers_refcnt, 1);
2796 mutex_unlock(&fs_info->scrub_lock);
2800 refcount_inc(&fs_info->scrub_workers_refcnt);
2801 mutex_unlock(&fs_info->scrub_lock);
2809 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
2820 if (btrfs_fs_closing(fs_info))
2824 ASSERT(fs_info->nodesize <= BTRFS_STRIPE_LEN);
2831 ASSERT(fs_info->nodesize <=
2832 SCRUB_MAX_SECTORS_PER_BLOCK << fs_info->sectorsize_bits);
2835 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2839 ret = scrub_workers_get(fs_info);
2843 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2844 dev = btrfs_find_device(fs_info->fs_devices, &args);
2847 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2854 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2855 btrfs_err_in_rcu(fs_info,
2862 mutex_lock(&fs_info->scrub_lock);
2865 mutex_unlock(&fs_info->scrub_lock);
2866 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2871 down_read(&fs_info->dev_replace.rwsem);
2874 btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
2875 up_read(&fs_info->dev_replace.rwsem);
2876 mutex_unlock(&fs_info->scrub_lock);
2877 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2881 up_read(&fs_info->dev_replace.rwsem);
2885 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2891 __scrub_blocked_if_needed(fs_info);
2892 atomic_inc(&fs_info->scrubs_running);
2893 mutex_unlock(&fs_info->scrub_lock);
2902 * before incrementing fs_info->scrubs_running).
2912 btrfs_info(fs_info, "scrub: started on devid %llu", devid);
2917 mutex_lock(&fs_info->fs_devices->device_list_mutex);
2919 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2936 atomic_dec(&fs_info->scrubs_running);
2937 wake_up(&fs_info->scrub_pause_wait);
2943 btrfs_info(fs_info, "scrub: %s on devid %llu with status: %d",
2946 mutex_lock(&fs_info->scrub_lock);
2948 mutex_unlock(&fs_info->scrub_lock);
2950 scrub_workers_put(fs_info);
2960 trans = btrfs_start_transaction(fs_info->tree_root, 0);
2963 btrfs_err(fs_info,
2969 btrfs_err(fs_info,
2974 scrub_workers_put(fs_info);
2981 void btrfs_scrub_pause(struct btrfs_fs_info *fs_info)
2983 mutex_lock(&fs_info->scrub_lock);
2984 atomic_inc(&fs_info->scrub_pause_req);
2985 while (atomic_read(&fs_info->scrubs_paused) !=
2986 atomic_read(&fs_info->scrubs_running)) {
2987 mutex_unlock(&fs_info->scrub_lock);
2988 wait_event(fs_info->scrub_pause_wait,
2989 atomic_read(&fs_info->scrubs_paused) ==
2990 atomic_read(&fs_info->scrubs_running));
2991 mutex_lock(&fs_info->scrub_lock);
2993 mutex_unlock(&fs_info->scrub_lock);
2996 void btrfs_scrub_continue(struct btrfs_fs_info *fs_info)
2998 atomic_dec(&fs_info->scrub_pause_req);
2999 wake_up(&fs_info->scrub_pause_wait);
3002 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3004 mutex_lock(&fs_info->scrub_lock);
3005 if (!atomic_read(&fs_info->scrubs_running)) {
3006 mutex_unlock(&fs_info->scrub_lock);
3010 atomic_inc(&fs_info->scrub_cancel_req);
3011 while (atomic_read(&fs_info->scrubs_running)) {
3012 mutex_unlock(&fs_info->scrub_lock);
3013 wait_event(fs_info->scrub_pause_wait,
3014 atomic_read(&fs_info->scrubs_running) == 0);
3015 mutex_lock(&fs_info->scrub_lock);
3017 atomic_dec(&fs_info->scrub_cancel_req);
3018 mutex_unlock(&fs_info->scrub_lock);
3025 struct btrfs_fs_info *fs_info = dev->fs_info;
3028 mutex_lock(&fs_info->scrub_lock);
3031 mutex_unlock(&fs_info->scrub_lock);
3036 mutex_unlock(&fs_info->scrub_lock);
3037 wait_event(fs_info->scrub_pause_wait,
3039 mutex_lock(&fs_info->scrub_lock);
3041 mutex_unlock(&fs_info->scrub_lock);
3046 int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
3053 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3054 dev = btrfs_find_device(fs_info->fs_devices, &args);
3059 mutex_unlock(&fs_info->fs_devices->device_list_mutex);