Lines Matching defs:sctx

52  * How many groups we have for each sctx.
109 struct scrub_ctx *sctx;
246 stripe->sctx = NULL;
290 static void scrub_put_ctx(struct scrub_ctx *sctx);
324 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
328 if (!sctx)
332 release_scrub_stripe(&sctx->stripes[i]);
334 kvfree(sctx);
337 static void scrub_put_ctx(struct scrub_ctx *sctx)
339 if (refcount_dec_and_test(&sctx->refs))
340 scrub_free_ctx(sctx);
346 struct scrub_ctx *sctx;
349 /* Since sctx has inline 128 stripes, it can go beyond 64K easily. Use
352 sctx = kvzalloc(sizeof(*sctx), GFP_KERNEL);
353 if (!sctx)
355 refcount_set(&sctx->refs, 1);
356 sctx->is_dev_replace = is_dev_replace;
357 sctx->fs_info = fs_info;
358 sctx->extent_path.search_commit_root = 1;
359 sctx->extent_path.skip_locking = 1;
360 sctx->csum_path.search_commit_root = 1;
361 sctx->csum_path.skip_locking = 1;
365 ret = init_scrub_stripe(fs_info, &sctx->stripes[i]);
368 sctx->stripes[i].sctx = sctx;
370 sctx->first_free = 0;
371 atomic_set(&sctx->cancel_req, 0);
373 spin_lock_init(&sctx->stat_lock);
374 sctx->throttle_deadline = 0;
376 mutex_init(&sctx->wr_lock);
379 sctx->wr_tgtdev = fs_info->dev_replace.tgtdev;
382 return sctx;
385 scrub_free_ctx(sctx);
561 static int fill_writer_pointer_gap(struct scrub_ctx *sctx, u64 physical)
566 if (!btrfs_is_zoned(sctx->fs_info))
569 if (!btrfs_dev_is_sequential(sctx->wr_tgtdev, physical))
572 if (sctx->write_pointer < physical) {
573 length = physical - sctx->write_pointer;
575 ret = btrfs_zoned_issue_zeroout(sctx->wr_tgtdev,
576 sctx->write_pointer, length);
578 sctx->write_pointer = physical;
867 static void scrub_stripe_report_errors(struct scrub_ctx *sctx,
872 struct btrfs_fs_info *fs_info = sctx->fs_info;
978 spin_lock(&sctx->stat_lock);
979 sctx->stat.data_extents_scrubbed += stripe->nr_data_extents;
980 sctx->stat.tree_extents_scrubbed += stripe->nr_meta_extents;
981 sctx->stat.data_bytes_scrubbed += nr_data_sectors << fs_info->sectorsize_bits;
982 sctx->stat.tree_bytes_scrubbed += nr_meta_sectors << fs_info->sectorsize_bits;
983 sctx->stat.no_csum += nr_nodatacsum_sectors;
984 sctx->stat.read_errors += stripe->init_nr_io_errors;
985 sctx->stat.csum_errors += stripe->init_nr_csum_errors;
986 sctx->stat.verify_errors += stripe->init_nr_meta_errors;
987 sctx->stat.uncorrectable_errors +=
989 sctx->stat.corrected_errors += nr_repaired_sectors;
990 spin_unlock(&sctx->stat_lock);
993 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1012 struct scrub_ctx *sctx = stripe->sctx;
1013 struct btrfs_fs_info *fs_info = sctx->fs_info;
1084 btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
1085 } else if (!sctx->readonly) {
1090 scrub_write_sectors(sctx, stripe, repaired, false);
1094 scrub_stripe_report_errors(sctx, stripe);
1153 static void scrub_submit_write_bio(struct scrub_ctx *sctx,
1157 struct btrfs_fs_info *fs_info = sctx->fs_info;
1162 fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
1179 sctx->write_pointer += bio_len;
1195 static void scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe,
1212 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1226 scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
1233 static void scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device,
1255 if (sctx->throttle_deadline == 0) {
1256 sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
1257 sctx->throttle_sent = 0;
1261 if (ktime_before(now, sctx->throttle_deadline)) {
1263 sctx->throttle_sent += bio_size;
1264 if (sctx->throttle_sent <= div_u64(bwlimit, div))
1268 delta = ktime_ms_delta(sctx->throttle_deadline, now);
1282 sctx->throttle_deadline = 0;
1455 static int sync_write_pointer_for_zoned(struct scrub_ctx *sctx, u64 logical,
1458 struct btrfs_fs_info *fs_info = sctx->fs_info;
1464 mutex_lock(&sctx->wr_lock);
1465 if (sctx->write_pointer < physical_end) {
1466 ret = btrfs_sync_zone_write_pointer(sctx->wr_tgtdev, logical,
1468 sctx->write_pointer);
1473 mutex_unlock(&sctx->wr_lock);
1474 btrfs_dev_clear_zone_empty(sctx->wr_tgtdev, physical);
1648 static void scrub_submit_initial_read(struct scrub_ctx *sctx,
1651 struct btrfs_fs_info *fs_info = sctx->fs_info;
1682 if (sctx->is_dev_replace &&
1712 static void submit_initial_group_read(struct scrub_ctx *sctx,
1721 scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
1725 struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
1729 scrub_submit_initial_read(sctx, stripe);
1734 static int flush_scrub_stripes(struct scrub_ctx *sctx)
1736 struct btrfs_fs_info *fs_info = sctx->fs_info;
1738 const int nr_stripes = sctx->cur_stripe;
1744 ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &sctx->stripes[0].state));
1750 submit_initial_group_read(sctx, first_slot, nr_stripes - first_slot);
1754 stripe = &sctx->stripes[i];
1761 if (sctx->is_dev_replace) {
1767 if (stripe_has_metadata_error(&sctx->stripes[i])) {
1775 stripe = &sctx->stripes[i];
1781 scrub_write_sectors(sctx, stripe, good, true);
1787 stripe = &sctx->stripes[i];
1793 sctx->cur_stripe = 0;
1802 static int queue_scrub_stripe(struct scrub_ctx *sctx, struct btrfs_block_group *bg,
1814 ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
1819 stripe = &sctx->stripes[sctx->cur_stripe];
1821 ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
1822 &sctx->csum_path, dev, physical,
1828 sctx->cur_stripe++;
1831 if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) {
1832 const int first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
1834 submit_initial_group_read(sctx, first_slot, SCRUB_STRIPES_PER_GROUP);
1838 if (sctx->cur_stripe == SCRUB_TOTAL_STRIPES)
1839 return flush_scrub_stripes(sctx);
1843 static int scrub_raid56_parity_stripe(struct scrub_ctx *sctx,
1850 struct btrfs_fs_info *fs_info = sctx->fs_info;
1863 ASSERT(sctx->raid56_data_stripes);
1880 stripe = &sctx->raid56_data_stripes[i];
1910 stripe = &sctx->raid56_data_stripes[i];
1922 stripe = &sctx->raid56_data_stripes[i];
1923 scrub_submit_initial_read(sctx, stripe);
1926 stripe = &sctx->raid56_data_stripes[i];
1932 ASSERT(!btrfs_is_zoned(sctx->fs_info));
1944 stripe = &sctx->raid56_data_stripes[i];
1988 stripe = &sctx->raid56_data_stripes[i];
2013 static int scrub_simple_mirror(struct scrub_ctx *sctx,
2020 struct btrfs_fs_info *fs_info = sctx->fs_info;
2035 atomic_read(&sctx->cancel_req)) {
2053 ret = queue_scrub_stripe(sctx, bg, device, mirror_num,
2058 sctx->stat.last_physical = physical + logical_length;
2112 static int scrub_simple_stripe(struct scrub_ctx *sctx,
2132 ret = scrub_simple_mirror(sctx, bg, map, cur_logical,
2145 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2151 struct btrfs_fs_info *fs_info = sctx->fs_info;
2170 ASSERT(sctx->extent_path.nodes[0] == NULL);
2174 if (sctx->is_dev_replace &&
2175 btrfs_dev_is_sequential(sctx->wr_tgtdev, physical)) {
2176 mutex_lock(&sctx->wr_lock);
2177 sctx->write_pointer = physical;
2178 mutex_unlock(&sctx->wr_lock);
2183 ASSERT(sctx->raid56_data_stripes == NULL);
2185 sctx->raid56_data_stripes = kcalloc(nr_data_stripes(map),
2188 if (!sctx->raid56_data_stripes) {
2194 &sctx->raid56_data_stripes[i]);
2197 sctx->raid56_data_stripes[i].bg = bg;
2198 sctx->raid56_data_stripes[i].sctx = sctx;
2218 ret = scrub_simple_mirror(sctx, bg, map, bg->start, bg->length,
2225 ret = scrub_simple_stripe(sctx, bg, map, scrub_dev, stripe_index);
2254 ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
2269 ret = scrub_simple_mirror(sctx, bg, map, logical, BTRFS_STRIPE_LEN,
2276 spin_lock(&sctx->stat_lock);
2278 sctx->stat.last_physical =
2281 sctx->stat.last_physical = physical;
2282 spin_unlock(&sctx->stat_lock);
2287 ret2 = flush_scrub_stripes(sctx);
2290 btrfs_release_path(&sctx->extent_path);
2291 btrfs_release_path(&sctx->csum_path);
2293 if (sctx->raid56_data_stripes) {
2295 release_scrub_stripe(&sctx->raid56_data_stripes[i]);
2296 kfree(sctx->raid56_data_stripes);
2297 sctx->raid56_data_stripes = NULL;
2300 if (sctx->is_dev_replace && ret >= 0) {
2303 ret2 = sync_write_pointer_for_zoned(sctx,
2314 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
2320 struct btrfs_fs_info *fs_info = sctx->fs_info;
2352 ret = scrub_stripe(sctx, bg, em, scrub_dev, i);
2383 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
2388 struct btrfs_fs_info *fs_info = sctx->fs_info;
2494 if (sctx->is_dev_replace && btrfs_is_zoned(fs_info)) {
2558 ret = btrfs_inc_block_group_ro(cache, sctx->is_dev_replace);
2559 if (!ret && sctx->is_dev_replace) {
2571 } else if (ret == -ENOSPC && !sctx->is_dev_replace &&
2608 if (sctx->is_dev_replace) {
2621 ret = scrub_chunk(sctx, cache, scrub_dev, found_key.offset,
2623 if (sctx->is_dev_replace &&
2660 if (sctx->is_dev_replace &&
2665 if (sctx->stat.malloc_errors > 0) {
2679 static int scrub_one_super(struct scrub_ctx *sctx, struct btrfs_device *dev,
2682 struct btrfs_fs_info *fs_info = sctx->fs_info;
2714 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
2722 struct btrfs_fs_info *fs_info = sctx->fs_info;
2729 spin_lock(&sctx->stat_lock);
2730 sctx->stat.malloc_errors++;
2731 spin_unlock(&sctx->stat_lock);
2749 ret = scrub_one_super(sctx, scrub_dev, page, bytenr, gen);
2751 spin_lock(&sctx->stat_lock);
2752 sctx->stat.super_errors++;
2753 spin_unlock(&sctx->stat_lock);
2814 struct scrub_ctx *sctx;
2835 sctx = scrub_setup_ctx(fs_info, is_dev_replace);
2836 if (IS_ERR(sctx))
2837 return PTR_ERR(sctx);
2883 sctx->readonly = readonly;
2884 dev->scrub_ctx = sctx;
2908 spin_lock(&sctx->stat_lock);
2909 old_super_errors = sctx->stat.super_errors;
2910 spin_unlock(&sctx->stat_lock);
2918 ret = scrub_supers(sctx, dev);
2921 spin_lock(&sctx->stat_lock);
2927 if (sctx->stat.super_errors > old_super_errors && !sctx->readonly)
2929 spin_unlock(&sctx->stat_lock);
2933 ret = scrub_enumerate_chunks(sctx, dev, start, end);
2940 memcpy(progress, &sctx->stat, sizeof(*progress));
2951 scrub_put_ctx(sctx);
2976 scrub_free_ctx(sctx);
3026 struct scrub_ctx *sctx;
3029 sctx = dev->scrub_ctx;
3030 if (!sctx) {
3034 atomic_inc(&sctx->cancel_req);
3051 struct scrub_ctx *sctx = NULL;
3056 sctx = dev->scrub_ctx;
3057 if (sctx)
3058 memcpy(progress, &sctx->stat, sizeof(*progress));
3061 return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;