Lines Matching refs:sbio

547 		struct scrub_bio *sbio = sctx->bios[sctx->curr];
549 for (i = 0; i < sbio->page_count; i++) {
550 WARN_ON(!sbio->pagev[i]->page);
551 scrub_block_put(sbio->pagev[i]->sblock);
553 bio_put(sbio->bio);
557 struct scrub_bio *sbio = sctx->bios[i];
559 if (!sbio)
561 kfree(sbio);
591 struct scrub_bio *sbio;
593 sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
594 if (!sbio)
596 sctx->bios[i] = sbio;
598 sbio->index = i;
599 sbio->sctx = sctx;
600 sbio->page_count = 0;
601 btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL,
1628 struct scrub_bio *sbio;
1643 sbio = sctx->wr_curr_bio;
1644 if (sbio->page_count == 0) {
1647 sbio->physical = spage->physical_for_dev_replace;
1648 sbio->logical = spage->logical;
1649 sbio->dev = sctx->wr_tgtdev;
1650 bio = sbio->bio;
1653 sbio->bio = bio;
1656 bio->bi_private = sbio;
1658 bio_set_dev(bio, sbio->dev->bdev);
1659 bio->bi_iter.bi_sector = sbio->physical >> 9;
1661 sbio->status = 0;
1662 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1664 sbio->logical + sbio->page_count * PAGE_SIZE !=
1670 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1672 if (sbio->page_count < 1) {
1673 bio_put(sbio->bio);
1674 sbio->bio = NULL;
1682 sbio->pagev[sbio->page_count] = spage;
1684 sbio->page_count++;
1685 if (sbio->page_count == sctx->pages_per_wr_bio)
1694 struct scrub_bio *sbio;
1699 sbio = sctx->wr_curr_bio;
1701 WARN_ON(!sbio->bio->bi_disk);
1707 btrfsic_submit_bio(sbio->bio);
1712 struct scrub_bio *sbio = bio->bi_private;
1713 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
1715 sbio->status = bio->bi_status;
1716 sbio->bio = bio;
1718 btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL);
1719 btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1724 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1725 struct scrub_ctx *sctx = sbio->sctx;
1728 WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1729 if (sbio->status) {
1731 &sbio->sctx->fs_info->dev_replace;
1733 for (i = 0; i < sbio->page_count; i++) {
1734 struct scrub_page *spage = sbio->pagev[i];
1741 for (i = 0; i < sbio->page_count; i++)
1742 scrub_page_put(sbio->pagev[i]);
1744 bio_put(sbio->bio);
1745 kfree(sbio);
1954 struct scrub_bio *sbio;
1959 sbio = sctx->bios[sctx->curr];
1962 btrfsic_submit_bio(sbio->bio);
1969 struct scrub_bio *sbio;
1989 sbio = sctx->bios[sctx->curr];
1990 if (sbio->page_count == 0) {
1993 sbio->physical = spage->physical;
1994 sbio->logical = spage->logical;
1995 sbio->dev = spage->dev;
1996 bio = sbio->bio;
1999 sbio->bio = bio;
2002 bio->bi_private = sbio;
2004 bio_set_dev(bio, sbio->dev->bdev);
2005 bio->bi_iter.bi_sector = sbio->physical >> 9;
2007 sbio->status = 0;
2008 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2010 sbio->logical + sbio->page_count * PAGE_SIZE !=
2012 sbio->dev != spage->dev) {
2017 sbio->pagev[sbio->page_count] = spage;
2018 ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2020 if (sbio->page_count < 1) {
2021 bio_put(sbio->bio);
2022 sbio->bio = NULL;
2031 sbio->page_count++;
2032 if (sbio->page_count == sctx->pages_per_rd_bio)
2245 struct scrub_bio *sbio = bio->bi_private;
2246 struct btrfs_fs_info *fs_info = sbio->dev->fs_info;
2248 sbio->status = bio->bi_status;
2249 sbio->bio = bio;
2251 btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2256 struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2257 struct scrub_ctx *sctx = sbio->sctx;
2260 BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2261 if (sbio->status) {
2262 for (i = 0; i < sbio->page_count; i++) {
2263 struct scrub_page *spage = sbio->pagev[i];
2271 for (i = 0; i < sbio->page_count; i++) {
2272 struct scrub_page *spage = sbio->pagev[i];
2280 bio_put(sbio->bio);
2281 sbio->bio = NULL;
2283 sbio->next_free = sctx->first_free;
2284 sctx->first_free = sbio->index;
2439 /* push csums to sbio */
2570 /* push csums to sbio */