Lines Matching refs:rbio
26 /* set when additional merges to this rbio are not allowed */
30 * set when this rbio is sitting in the hash, but it is just a cache
71 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
72 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
74 static int finish_parity_scrub(struct btrfs_raid_bio *rbio);
77 static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio)
79 bitmap_free(rbio->error_bitmap);
80 kfree(rbio->stripe_pages);
81 kfree(rbio->bio_sectors);
82 kfree(rbio->stripe_sectors);
83 kfree(rbio->finish_pointers);
86 static void free_raid_bio(struct btrfs_raid_bio *rbio)
90 if (!refcount_dec_and_test(&rbio->refs))
93 WARN_ON(!list_empty(&rbio->stripe_cache));
94 WARN_ON(!list_empty(&rbio->hash_list));
95 WARN_ON(!bio_list_empty(&rbio->bio_list));
97 for (i = 0; i < rbio->nr_pages; i++) {
98 if (rbio->stripe_pages[i]) {
99 __free_page(rbio->stripe_pages[i]);
100 rbio->stripe_pages[i] = NULL;
104 btrfs_put_bioc(rbio->bioc);
105 free_raid_bio_pointers(rbio);
106 kfree(rbio);
109 static void start_async_work(struct btrfs_raid_bio *rbio, work_func_t work_func)
111 INIT_WORK(&rbio->work, work_func);
112 queue_work(rbio->bioc->fs_info->rmw_workers, &rbio->work);
159 * caching an rbio means to copy anything from the
167 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
172 ret = alloc_rbio_pages(rbio);
176 for (i = 0; i < rbio->nr_sectors; i++) {
178 if (!rbio->bio_sectors[i].page) {
184 if (i < rbio->nr_data * rbio->stripe_nsectors)
185 ASSERT(rbio->stripe_sectors[i].uptodate);
189 ASSERT(rbio->stripe_sectors[i].page);
190 memcpy_page(rbio->stripe_sectors[i].page,
191 rbio->stripe_sectors[i].pgoff,
192 rbio->bio_sectors[i].page,
193 rbio->bio_sectors[i].pgoff,
194 rbio->bioc->fs_info->sectorsize);
195 rbio->stripe_sectors[i].uptodate = 1;
197 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
203 static int rbio_bucket(struct btrfs_raid_bio *rbio)
205 u64 num = rbio->bioc->full_stripe_logical;
218 static bool full_page_sectors_uptodate(struct btrfs_raid_bio *rbio,
221 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
225 ASSERT(page_nr < rbio->nr_pages);
230 if (!rbio->stripe_sectors[i].uptodate)
241 static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
243 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
247 for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
250 ASSERT(page_index < rbio->nr_pages);
251 rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
252 rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
274 static bool is_data_stripe_page(struct btrfs_raid_bio *rbio, int page_nr)
277 rbio->bioc->fs_info->sectorsize_bits;
286 return (sector_nr < rbio->nr_data * rbio->stripe_nsectors);
290 * Stealing an rbio means taking all the uptodate pages from the stripe array
291 * in the source rbio and putting them into the destination rbio.
347 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349 int bucket = rbio_bucket(rbio);
357 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
360 table = rbio->bioc->fs_info->stripe_hash_table;
372 spin_lock(&rbio->bio_list_lock);
374 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
375 list_del_init(&rbio->stripe_cache);
379 /* if the bio list isn't empty, this rbio is
385 * the rbio from the hash_table, and drop
388 if (bio_list_empty(&rbio->bio_list)) {
389 if (!list_empty(&rbio->hash_list)) {
390 list_del_init(&rbio->hash_list);
391 refcount_dec(&rbio->refs);
392 BUG_ON(!list_empty(&rbio->plug_list));
397 spin_unlock(&rbio->bio_list_lock);
401 free_raid_bio(rbio);
405 * prune a given rbio from the cache
407 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
411 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 table = rbio->bioc->fs_info->stripe_hash_table;
417 __remove_rbio_from_cache(rbio);
427 struct btrfs_raid_bio *rbio;
433 rbio = list_entry(table->stripe_cache.next,
436 __remove_rbio_from_cache(rbio);
455 * insert an rbio into the stripe cache. It
459 * If this rbio was already cached, it gets
462 * If the size of the rbio cache is too big, we
465 static void cache_rbio(struct btrfs_raid_bio *rbio)
469 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
472 table = rbio->bioc->fs_info->stripe_hash_table;
475 spin_lock(&rbio->bio_list_lock);
478 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
479 refcount_inc(&rbio->refs);
481 if (!list_empty(&rbio->stripe_cache)){
482 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 list_add(&rbio->stripe_cache, &table->stripe_cache);
488 spin_unlock(&rbio->bio_list_lock);
497 if (found != rbio)
525 * Returns true if the bio list inside this rbio covers an entire stripe (no
528 static int rbio_is_full(struct btrfs_raid_bio *rbio)
530 unsigned long size = rbio->bio_list_bytes;
533 spin_lock(&rbio->bio_list_lock);
534 if (size != rbio->nr_data * BTRFS_STRIPE_LEN)
536 BUG_ON(size > rbio->nr_data * BTRFS_STRIPE_LEN);
537 spin_unlock(&rbio->bio_list_lock);
562 * rbio is going to run our IO for us. We can
593 static unsigned int rbio_stripe_sector_index(const struct btrfs_raid_bio *rbio,
597 ASSERT(stripe_nr < rbio->real_stripes);
598 ASSERT(sector_nr < rbio->stripe_nsectors);
600 return stripe_nr * rbio->stripe_nsectors + sector_nr;
603 /* Return a sector from rbio->stripe_sectors, not from the bio list */
604 static struct sector_ptr *rbio_stripe_sector(const struct btrfs_raid_bio *rbio,
608 return &rbio->stripe_sectors[rbio_stripe_sector_index(rbio, stripe_nr,
613 static struct sector_ptr *rbio_pstripe_sector(const struct btrfs_raid_bio *rbio,
616 return rbio_stripe_sector(rbio, rbio->nr_data, sector_nr);
620 static struct sector_ptr *rbio_qstripe_sector(const struct btrfs_raid_bio *rbio,
623 if (rbio->nr_data + 1 == rbio->real_stripes)
625 return rbio_stripe_sector(rbio, rbio->nr_data + 1, sector_nr);
632 * 1) Nobody has the stripe locked yet. The rbio is given
637 * with the lock owner. The rbio is freed and the IO will
638 * start automatically along with the existing rbio. 1 is returned.
641 * The rbio is added to the lock owner's plug list, or merged into
642 * an rbio already on the plug list. When the lock owner unlocks,
643 * the next rbio on the list is run and the IO is started automatically.
646 * If we return 0, the caller still owns the rbio and must continue with
647 * IO submission. If we return 1, the caller must assume the rbio has
650 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
659 h = rbio->bioc->fs_info->stripe_hash_table->table + rbio_bucket(rbio);
663 if (cur->bioc->full_stripe_logical != rbio->bioc->full_stripe_logical)
668 /* Can we steal this cached rbio's pages? */
676 steal_rbio(cur, rbio);
684 if (rbio_can_merge(cur, rbio)) {
685 merge_rbio(cur, rbio);
687 freeit = rbio;
694 * We couldn't merge with the running rbio, see if we can merge
699 if (rbio_can_merge(pending, rbio)) {
700 merge_rbio(pending, rbio);
702 freeit = rbio;
709 * No merging, put us on the tail of the plug list, our rbio
710 * will be started with the currently running rbio unlocks
712 list_add_tail(&rbio->plug_list, &cur->plug_list);
718 refcount_inc(&rbio->refs);
719 list_add(&rbio->hash_list, &h->hash_list);
735 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
741 bucket = rbio_bucket(rbio);
742 h = rbio->bioc->fs_info->stripe_hash_table->table + bucket;
744 if (list_empty(&rbio->plug_list))
745 cache_rbio(rbio);
748 spin_lock(&rbio->bio_list_lock);
750 if (!list_empty(&rbio->hash_list)) {
753 * to perform, just leave this rbio here for others
756 if (list_empty(&rbio->plug_list) &&
757 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
759 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
760 BUG_ON(!bio_list_empty(&rbio->bio_list));
764 list_del_init(&rbio->hash_list);
765 refcount_dec(&rbio->refs);
772 if (!list_empty(&rbio->plug_list)) {
774 struct list_head *head = rbio->plug_list.next;
779 list_del_init(&rbio->plug_list);
783 spin_unlock(&rbio->bio_list_lock);
789 steal_rbio(rbio, next);
792 steal_rbio(rbio, next);
800 spin_unlock(&rbio->bio_list_lock);
805 remove_rbio_from_cache(rbio);
822 * this frees the rbio and runs through all the bios in the
825 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
827 struct bio *cur = bio_list_get(&rbio->bio_list);
830 kfree(rbio->csum_buf);
831 bitmap_free(rbio->csum_bitmap);
832 rbio->csum_buf = NULL;
833 rbio->csum_bitmap = NULL;
836 * Clear the data bitmap, as the rbio may be cached for later usage.
840 bitmap_clear(&rbio->dbitmap, 0, rbio->stripe_nsectors);
843 * At this moment, rbio->bio_list is empty, however since rbio does not
844 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
845 * hash list, rbio may be merged with others so that rbio->bio_list
847 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
850 unlock_stripe(rbio);
851 extra = bio_list_get(&rbio->bio_list);
852 free_raid_bio(rbio);
862 * @rbio: The raid bio
871 static struct sector_ptr *sector_in_rbio(struct btrfs_raid_bio *rbio,
878 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->real_stripes);
879 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
881 index = stripe_nr * rbio->stripe_nsectors + sector_nr;
882 ASSERT(index >= 0 && index < rbio->nr_sectors);
884 spin_lock(&rbio->bio_list_lock);
885 sector = &rbio->bio_sectors[index];
890 spin_unlock(&rbio->bio_list_lock);
893 spin_unlock(&rbio->bio_list_lock);
895 return &rbio->stripe_sectors[index];
900 * this does not allocate any pages for rbio->pages.
911 struct btrfs_raid_bio *rbio;
921 rbio = kzalloc(sizeof(*rbio), GFP_NOFS);
922 if (!rbio)
924 rbio->stripe_pages = kcalloc(num_pages, sizeof(struct page *),
926 rbio->bio_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
928 rbio->stripe_sectors = kcalloc(num_sectors, sizeof(struct sector_ptr),
930 rbio->finish_pointers = kcalloc(real_stripes, sizeof(void *), GFP_NOFS);
931 rbio->error_bitmap = bitmap_zalloc(num_sectors, GFP_NOFS);
933 if (!rbio->stripe_pages || !rbio->bio_sectors || !rbio->stripe_sectors ||
934 !rbio->finish_pointers || !rbio->error_bitmap) {
935 free_raid_bio_pointers(rbio);
936 kfree(rbio);
940 bio_list_init(&rbio->bio_list);
941 init_waitqueue_head(&rbio->io_wait);
942 INIT_LIST_HEAD(&rbio->plug_list);
943 spin_lock_init(&rbio->bio_list_lock);
944 INIT_LIST_HEAD(&rbio->stripe_cache);
945 INIT_LIST_HEAD(&rbio->hash_list);
947 rbio->bioc = bioc;
948 rbio->nr_pages = num_pages;
949 rbio->nr_sectors = num_sectors;
950 rbio->real_stripes = real_stripes;
951 rbio->stripe_npages = stripe_npages;
952 rbio->stripe_nsectors = stripe_nsectors;
953 refcount_set(&rbio->refs, 1);
954 atomic_set(&rbio->stripes_pending, 0);
957 rbio->nr_data = real_stripes - btrfs_nr_parity_stripes(bioc->map_type);
959 return rbio;
963 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
967 ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
971 index_stripe_sectors(rbio);
976 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
978 const int data_pages = rbio->nr_data * rbio->stripe_npages;
981 ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
982 rbio->stripe_pages + data_pages);
986 index_stripe_sectors(rbio);
996 static int get_rbio_veritical_errors(struct btrfs_raid_bio *rbio, int sector_nr,
1012 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1013 int total_sector_nr = stripe_nr * rbio->stripe_nsectors + sector_nr;
1015 if (test_bit(total_sector_nr, rbio->error_bitmap)) {
1035 static int rbio_add_io_sector(struct btrfs_raid_bio *rbio,
1042 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1051 * thus it can be larger than rbio->real_stripe.
1052 * So here we check against bioc->num_stripes, not rbio->real_stripes.
1054 ASSERT(stripe_nr >= 0 && stripe_nr < rbio->bioc->num_stripes);
1055 ASSERT(sector_nr >= 0 && sector_nr < rbio->stripe_nsectors);
1058 stripe = &rbio->bioc->stripes[stripe_nr];
1065 set_bit(stripe_nr * rbio->stripe_nsectors + sector_nr,
1066 rbio->error_bitmap);
1069 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
1071 if (found_errors > rbio->bioc->max_errors)
1099 bio->bi_private = rbio;
1106 static void index_one_bio(struct btrfs_raid_bio *rbio, struct bio *bio)
1108 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1112 rbio->bioc->full_stripe_logical;
1120 struct sector_ptr *sector = &rbio->bio_sectors[index];
1137 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1141 spin_lock(&rbio->bio_list_lock);
1142 bio_list_for_each(bio, &rbio->bio_list)
1143 index_one_bio(rbio, bio);
1145 spin_unlock(&rbio->bio_list_lock);
1148 static void bio_get_trace_info(struct btrfs_raid_bio *rbio, struct bio *bio,
1151 const struct btrfs_io_context *bioc = rbio->bioc;
1185 static void generate_pq_vertical(struct btrfs_raid_bio *rbio, int sectornr)
1187 void **pointers = rbio->finish_pointers;
1188 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1191 const bool has_qstripe = rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6;
1194 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1195 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
1201 sector = rbio_pstripe_sector(rbio, sectornr);
1210 sector = rbio_qstripe_sector(rbio, sectornr);
1215 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
1219 memcpy(pointers[rbio->nr_data], pointers[0], sectorsize);
1220 run_xor(pointers + 1, rbio->nr_data - 1, sectorsize);
1226 static int rmw_assemble_write_bios(struct btrfs_raid_bio *rbio,
1238 ASSERT(bitmap_weight(&rbio->dbitmap, rbio->stripe_nsectors));
1244 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
1248 * bio_list in our rbio) and our P/Q. Ignore everything else.
1250 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1254 stripe = total_sector_nr / rbio->stripe_nsectors;
1255 sectornr = total_sector_nr % rbio->stripe_nsectors;
1258 if (!test_bit(sectornr, &rbio->dbitmap))
1261 if (stripe < rbio->nr_data) {
1262 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1266 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1269 ret = rbio_add_io_sector(rbio, bio_list, sector, stripe,
1275 if (likely(!rbio->bioc->replace_nr_stripes))
1283 ASSERT(rbio->bioc->replace_stripe_src >= 0);
1285 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1289 stripe = total_sector_nr / rbio->stripe_nsectors;
1290 sectornr = total_sector_nr % rbio->stripe_nsectors;
1297 if (stripe != rbio->bioc->replace_stripe_src) {
1303 total_sector_nr += rbio->stripe_nsectors - 1;
1308 if (!test_bit(sectornr, &rbio->dbitmap))
1311 if (stripe < rbio->nr_data) {
1312 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
1316 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1319 ret = rbio_add_io_sector(rbio, bio_list, sector,
1320 rbio->real_stripes,
1332 static void set_rbio_range_error(struct btrfs_raid_bio *rbio, struct bio *bio)
1334 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1336 rbio->bioc->full_stripe_logical;
1339 ASSERT(total_nr_sector < rbio->nr_data * rbio->stripe_nsectors);
1341 bitmap_set(rbio->error_bitmap, total_nr_sector,
1354 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1355 if (!rbio->bioc->stripes[stripe_nr].dev->bdev) {
1357 bitmap_set(rbio->error_bitmap,
1358 stripe_nr * rbio->stripe_nsectors,
1359 rbio->stripe_nsectors);
1370 static struct sector_ptr *find_stripe_sector(struct btrfs_raid_bio *rbio,
1376 for (i = 0; i < rbio->nr_sectors; i++) {
1377 struct sector_ptr *sector = &rbio->stripe_sectors[i];
1387 * rbio pages, nothing that comes in from the higher layers
1389 static void set_bio_pages_uptodate(struct btrfs_raid_bio *rbio, struct bio *bio)
1391 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
1403 sector = find_stripe_sector(rbio, bvec->bv_page, pgoff);
1411 static int get_bio_sector_nr(struct btrfs_raid_bio *rbio, struct bio *bio)
1416 for (i = 0; i < rbio->nr_sectors; i++) {
1419 sector = &rbio->stripe_sectors[i];
1422 sector = &rbio->bio_sectors[i];
1426 ASSERT(i < rbio->nr_sectors);
1430 static void rbio_update_error_bitmap(struct btrfs_raid_bio *rbio, struct bio *bio)
1432 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1447 (bio_size >> rbio->bioc->fs_info->sectorsize_bits); i++)
1448 set_bit(i, rbio->error_bitmap);
1452 static void verify_bio_data_sectors(struct btrfs_raid_bio *rbio,
1455 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1456 int total_sector_nr = get_bio_sector_nr(rbio, bio);
1461 if (!rbio->csum_bitmap || !rbio->csum_buf)
1465 if (total_sector_nr >= rbio->nr_data * rbio->stripe_nsectors)
1475 u8 *expected_csum = rbio->csum_buf +
1480 if (!test_bit(total_sector_nr, rbio->csum_bitmap))
1486 set_bit(total_sector_nr, rbio->error_bitmap);
1493 struct btrfs_raid_bio *rbio = bio->bi_private;
1496 rbio_update_error_bitmap(rbio, bio);
1498 set_bio_pages_uptodate(rbio, bio);
1499 verify_bio_data_sectors(rbio, bio);
1503 if (atomic_dec_and_test(&rbio->stripes_pending))
1504 wake_up(&rbio->io_wait);
1507 static void submit_read_wait_bio_list(struct btrfs_raid_bio *rbio,
1512 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
1519 bio_get_trace_info(rbio, bio, &trace_info);
1520 trace_raid56_read(rbio, bio, &trace_info);
1525 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
1528 static int alloc_rbio_data_pages(struct btrfs_raid_bio *rbio)
1530 const int data_pages = rbio->nr_data * rbio->stripe_npages;
1533 ret = btrfs_alloc_page_array(data_pages, rbio->stripe_pages);
1537 index_stripe_sectors(rbio);
1608 /* Add the original bio into rbio->bio_list, and update rbio::dbitmap. */
1609 static void rbio_add_bio(struct btrfs_raid_bio *rbio, struct bio *orig_bio)
1611 const struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1613 const u64 full_stripe_start = rbio->bioc->full_stripe_logical;
1620 rbio->nr_data * BTRFS_STRIPE_LEN);
1622 bio_list_add(&rbio->bio_list, orig_bio);
1623 rbio->bio_list_bytes += orig_bio->bi_iter.bi_size;
1629 fs_info->sectorsize_bits) % rbio->stripe_nsectors;
1631 set_bit(bit, &rbio->dbitmap);
1641 struct btrfs_raid_bio *rbio;
1645 rbio = alloc_rbio(fs_info, bioc);
1646 if (IS_ERR(rbio)) {
1647 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
1651 rbio->operation = BTRFS_RBIO_WRITE;
1652 rbio_add_bio(rbio, bio);
1658 if (!rbio_is_full(rbio)) {
1666 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1675 start_async_work(rbio, rmw_rbio_work);
1678 static int verify_one_sector(struct btrfs_raid_bio *rbio,
1681 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1687 if (!rbio->csum_bitmap || !rbio->csum_buf)
1691 if (stripe_nr >= rbio->nr_data)
1697 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1698 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1700 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1705 csum_expected = rbio->csum_buf +
1706 (stripe_nr * rbio->stripe_nsectors + sector_nr) *
1718 static int recover_vertical(struct btrfs_raid_bio *rbio, int sector_nr,
1721 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
1734 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1735 !test_bit(sector_nr, &rbio->dbitmap))
1738 found_errors = get_rbio_veritical_errors(rbio, sector_nr, &faila,
1747 if (found_errors > rbio->bioc->max_errors)
1756 for (stripe_nr = 0; stripe_nr < rbio->real_stripes; stripe_nr++) {
1761 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1762 sector = sector_in_rbio(rbio, stripe_nr, sector_nr, 0);
1764 sector = rbio_stripe_sector(rbio, stripe_nr, sector_nr);
1773 if (rbio->bioc->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1776 if (faila == rbio->nr_data)
1798 if (failb == rbio->real_stripes - 1) {
1799 if (faila == rbio->real_stripes - 2)
1813 if (failb == rbio->real_stripes - 2) {
1814 raid6_datap_recov(rbio->real_stripes, sectorsize,
1817 raid6_2data_recov(rbio->real_stripes, sectorsize,
1827 memcpy(pointers[faila], pointers[rbio->nr_data], sectorsize);
1831 for (stripe_nr = faila; stripe_nr < rbio->nr_data - 1;
1834 pointers[rbio->nr_data - 1] = p;
1837 run_xor(pointers, rbio->nr_data - 1, sectorsize);
1845 * Especially if we determine to cache the rbio, we need to
1852 ret = verify_one_sector(rbio, faila, sector_nr);
1856 sector = rbio_stripe_sector(rbio, faila, sector_nr);
1860 ret = verify_one_sector(rbio, failb, sector_nr);
1864 sector = rbio_stripe_sector(rbio, failb, sector_nr);
1869 for (stripe_nr = rbio->real_stripes - 1; stripe_nr >= 0; stripe_nr--)
1874 static int recover_sectors(struct btrfs_raid_bio *rbio)
1887 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1888 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1894 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1895 spin_lock(&rbio->bio_list_lock);
1896 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1897 spin_unlock(&rbio->bio_list_lock);
1900 index_rbio_pages(rbio);
1902 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
1903 ret = recover_vertical(rbio, sectornr, pointers, unmap_array);
1914 static void recover_rbio(struct btrfs_raid_bio *rbio)
1924 ASSERT(bitmap_weight(rbio->error_bitmap, rbio->nr_sectors));
1927 ret = alloc_rbio_pages(rbio);
1931 index_rbio_pages(rbio);
1941 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
1943 int stripe = total_sector_nr / rbio->stripe_nsectors;
1944 int sectornr = total_sector_nr % rbio->stripe_nsectors;
1952 if (!rbio->bioc->stripes[stripe].dev->bdev ||
1953 test_bit(total_sector_nr, rbio->error_bitmap)) {
1958 set_bit(total_sector_nr, rbio->error_bitmap);
1962 sector = rbio_stripe_sector(rbio, stripe, sectornr);
1963 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
1971 submit_read_wait_bio_list(rbio, &bio_list);
1972 ret = recover_sectors(rbio);
1974 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
1979 struct btrfs_raid_bio *rbio;
1981 rbio = container_of(work, struct btrfs_raid_bio, work);
1982 if (!lock_stripe_add(rbio))
1983 recover_rbio(rbio);
1991 static void set_rbio_raid6_extra_error(struct btrfs_raid_bio *rbio, int mirror_num)
2003 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2008 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2022 failb = rbio->real_stripes - (mirror_num - 1);
2028 set_bit(failb * rbio->stripe_nsectors + sector_nr,
2029 rbio->error_bitmap);
2046 struct btrfs_raid_bio *rbio;
2048 rbio = alloc_rbio(fs_info, bioc);
2049 if (IS_ERR(rbio)) {
2050 bio->bi_status = errno_to_blk_status(PTR_ERR(rbio));
2055 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2056 rbio_add_bio(rbio, bio);
2058 set_rbio_range_error(rbio, bio);
2066 set_rbio_raid6_extra_error(rbio, mirror_num);
2068 start_async_work(rbio, recover_rbio_work);
2071 static void fill_data_csums(struct btrfs_raid_bio *rbio)
2073 struct btrfs_fs_info *fs_info = rbio->bioc->fs_info;
2075 rbio->bioc->full_stripe_logical);
2076 const u64 start = rbio->bioc->full_stripe_logical;
2077 const u32 len = (rbio->nr_data * rbio->stripe_nsectors) <<
2081 /* The rbio should not have its csum buffer initialized. */
2082 ASSERT(!rbio->csum_buf && !rbio->csum_bitmap);
2087 * - The rbio doesn't belong to data block groups
2090 * - The rbio belongs to mixed block groups
2095 if (!(rbio->bioc->map_type & BTRFS_BLOCK_GROUP_DATA) ||
2096 rbio->bioc->map_type & BTRFS_BLOCK_GROUP_METADATA)
2099 rbio->csum_buf = kzalloc(rbio->nr_data * rbio->stripe_nsectors *
2101 rbio->csum_bitmap = bitmap_zalloc(rbio->nr_data * rbio->stripe_nsectors,
2103 if (!rbio->csum_buf || !rbio->csum_bitmap) {
2109 rbio->csum_buf, rbio->csum_bitmap);
2112 if (bitmap_empty(rbio->csum_bitmap, len >> fs_info->sectorsize_bits))
2124 rbio->bioc->full_stripe_logical, ret);
2126 kfree(rbio->csum_buf);
2127 bitmap_free(rbio->csum_bitmap);
2128 rbio->csum_buf = NULL;
2129 rbio->csum_bitmap = NULL;
2132 static int rmw_read_wait_recover(struct btrfs_raid_bio *rbio)
2143 fill_data_csums(rbio);
2150 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2153 int stripe = total_sector_nr / rbio->stripe_nsectors;
2154 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2156 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2157 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2169 submit_read_wait_bio_list(rbio, &bio_list);
2170 return recover_sectors(rbio);
2175 struct btrfs_raid_bio *rbio = bio->bi_private;
2179 rbio_update_error_bitmap(rbio, bio);
2181 if (atomic_dec_and_test(&rbio->stripes_pending))
2182 wake_up(&rbio->io_wait);
2185 static void submit_write_bios(struct btrfs_raid_bio *rbio,
2190 atomic_set(&rbio->stripes_pending, bio_list_size(bio_list));
2197 bio_get_trace_info(rbio, bio, &trace_info);
2198 trace_raid56_write(rbio, bio, &trace_info);
2206 * Should only be utilized in RMW path, to skip cached rbio.
2208 static bool need_read_stripe_sectors(struct btrfs_raid_bio *rbio)
2212 for (i = 0; i < rbio->nr_data * rbio->stripe_nsectors; i++) {
2213 struct sector_ptr *sector = &rbio->stripe_sectors[i];
2217 * thus this rbio can not be cached one, as cached one must
2226 static void rmw_rbio(struct btrfs_raid_bio *rbio)
2236 ret = alloc_rbio_parity_pages(rbio);
2244 if (!rbio_is_full(rbio) && need_read_stripe_sectors(rbio)) {
2249 ret = alloc_rbio_data_pages(rbio);
2253 index_rbio_pages(rbio);
2255 ret = rmw_read_wait_recover(rbio);
2265 spin_lock(&rbio->bio_list_lock);
2266 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
2267 spin_unlock(&rbio->bio_list_lock);
2269 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2271 index_rbio_pages(rbio);
2279 if (!rbio_is_full(rbio))
2280 cache_rbio_pages(rbio);
2282 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2284 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++)
2285 generate_pq_vertical(rbio, sectornr);
2288 ret = rmw_assemble_write_bios(rbio, &bio_list);
2294 submit_write_bios(rbio, &bio_list);
2295 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2298 for (sectornr = 0; sectornr < rbio->stripe_nsectors; sectornr++) {
2301 found_errors = get_rbio_veritical_errors(rbio, sectornr, NULL, NULL);
2302 if (found_errors > rbio->bioc->max_errors) {
2308 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2313 struct btrfs_raid_bio *rbio;
2315 rbio = container_of(work, struct btrfs_raid_bio, work);
2316 if (lock_stripe_add(rbio) == 0)
2317 rmw_rbio(rbio);
2341 struct btrfs_raid_bio *rbio;
2344 rbio = alloc_rbio(fs_info, bioc);
2345 if (IS_ERR(rbio))
2347 bio_list_add(&rbio->bio_list, bio);
2350 * and make the scrub rbio is similar to the other types
2353 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2360 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2362 rbio->scrubp = i;
2366 ASSERT(i < rbio->real_stripes);
2368 bitmap_copy(&rbio->dbitmap, dbitmap, stripe_nsectors);
2369 return rbio;
2376 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2378 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2381 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2384 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2387 if (!test_bit(sectornr, &rbio->dbitmap))
2389 if (rbio->stripe_pages[index])
2394 rbio->stripe_pages[index] = page;
2396 index_stripe_sectors(rbio);
2400 static int finish_parity_scrub(struct btrfs_raid_bio *rbio)
2402 struct btrfs_io_context *bioc = rbio->bioc;
2404 void **pointers = rbio->finish_pointers;
2405 unsigned long *pbitmap = &rbio->finish_pbitmap;
2406 int nr_data = rbio->nr_data;
2418 if (rbio->real_stripes - rbio->nr_data == 1)
2420 else if (rbio->real_stripes - rbio->nr_data == 2)
2429 if (bioc->replace_nr_stripes && bioc->replace_stripe_src == rbio->scrubp) {
2431 bitmap_copy(pbitmap, &rbio->dbitmap, rbio->stripe_nsectors);
2439 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2457 pointers[rbio->real_stripes - 1] = kmap_local_page(q_sector.page);
2460 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2465 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2471 sector = sector_in_rbio(rbio, stripe, sectornr, 0);
2478 raid6_call.gen_syndrome(rbio->real_stripes, sectorsize,
2487 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2489 if (memcmp(parity, pointers[rbio->scrubp], sectorsize) != 0)
2490 memcpy(parity, pointers[rbio->scrubp], sectorsize);
2493 bitmap_clear(&rbio->dbitmap, sectornr, 1);
2504 kunmap_local(pointers[rbio->real_stripes - 1]);
2511 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2514 for_each_set_bit(sectornr, &rbio->dbitmap, rbio->stripe_nsectors) {
2517 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2518 ret = rbio_add_io_sector(rbio, &bio_list, sector, rbio->scrubp,
2531 ASSERT(rbio->bioc->replace_stripe_src >= 0);
2532 for_each_set_bit(sectornr, pbitmap, rbio->stripe_nsectors) {
2535 sector = rbio_stripe_sector(rbio, rbio->scrubp, sectornr);
2536 ret = rbio_add_io_sector(rbio, &bio_list, sector,
2537 rbio->real_stripes,
2544 submit_write_bios(rbio, &bio_list);
2552 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2554 if (stripe >= 0 && stripe < rbio->nr_data)
2559 static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
2572 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2573 unmap_array = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
2579 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2585 found_errors = get_rbio_veritical_errors(rbio, sector_nr,
2587 if (found_errors > rbio->bioc->max_errors) {
2597 if (is_data_stripe(rbio, faila))
2602 if (is_data_stripe(rbio, failb))
2611 if (dfail > rbio->bioc->max_errors - 1) {
2628 if (failp != rbio->scrubp) {
2633 ret = recover_vertical(rbio, sector_nr, pointers, unmap_array);
2643 static int scrub_assemble_read_bios(struct btrfs_raid_bio *rbio)
2650 for (total_sector_nr = 0; total_sector_nr < rbio->nr_sectors;
2652 int sectornr = total_sector_nr % rbio->stripe_nsectors;
2653 int stripe = total_sector_nr / rbio->stripe_nsectors;
2657 if (!test_bit(sectornr, &rbio->dbitmap))
2661 * We want to find all the sectors missing from the rbio and
2665 sector = sector_in_rbio(rbio, stripe, sectornr, 1);
2669 sector = rbio_stripe_sector(rbio, stripe, sectornr);
2677 ret = rbio_add_io_sector(rbio, &bio_list, sector, stripe,
2685 submit_read_wait_bio_list(rbio, &bio_list);
2689 static void scrub_rbio(struct btrfs_raid_bio *rbio)
2694 ret = alloc_rbio_essential_pages(rbio);
2698 bitmap_clear(rbio->error_bitmap, 0, rbio->nr_sectors);
2700 ret = scrub_assemble_read_bios(rbio);
2705 ret = recover_scrub_rbio(rbio);
2713 ret = finish_parity_scrub(rbio);
2714 wait_event(rbio->io_wait, atomic_read(&rbio->stripes_pending) == 0);
2715 for (sector_nr = 0; sector_nr < rbio->stripe_nsectors; sector_nr++) {
2718 found_errors = get_rbio_veritical_errors(rbio, sector_nr, NULL, NULL);
2719 if (found_errors > rbio->bioc->max_errors) {
2725 rbio_orig_end_io(rbio, errno_to_blk_status(ret));
2733 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2735 if (!lock_stripe_add(rbio))
2736 start_async_work(rbio, scrub_rbio_work_locked);
2744 * This is due to the fact rbio has its own page management for its cache.
2746 void raid56_parity_cache_data_pages(struct btrfs_raid_bio *rbio,
2750 rbio->bioc->full_stripe_logical;
2752 const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
2764 ret = alloc_rbio_data_pages(rbio);
2770 ASSERT(offset_in_full_stripe < (rbio->nr_data << BTRFS_STRIPE_LEN_SHIFT));
2773 struct page *dst = rbio->stripe_pages[page_nr + page_index];
2780 rbio->stripe_sectors[sector_nr].uptodate = true;