Lines Matching refs:sc

25 	struct xfs_scrub	*sc,
28 struct xfs_mount *mp = sc->mp;
29 xfs_agnumber_t agno = sc->sm->sm_agno;
33 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
38 error = xchk_ag_init(sc, agno, &sc->sa);
39 if (!xchk_xref_process_error(sc, agno, agbno, &error))
42 xchk_xref_is_used_space(sc, agbno, 1);
43 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
44 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
45 xchk_xref_is_not_shared(sc, agbno, 1);
47 /* scrub teardown will take care of sc->sa for us */
60 struct xfs_scrub *sc)
62 struct xfs_mount *mp = sc->mp;
71 agno = sc->sm->sm_agno;
75 error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
92 if (!xchk_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
104 xchk_block_set_corrupt(sc, bp);
107 xchk_block_set_corrupt(sc, bp);
110 xchk_block_set_corrupt(sc, bp);
113 xchk_block_set_corrupt(sc, bp);
116 xchk_block_set_preen(sc, bp);
119 xchk_block_set_corrupt(sc, bp);
122 xchk_block_set_preen(sc, bp);
125 xchk_block_set_preen(sc, bp);
128 xchk_block_set_preen(sc, bp);
131 xchk_block_set_corrupt(sc, bp);
134 xchk_block_set_corrupt(sc, bp);
137 xchk_block_set_corrupt(sc, bp);
140 xchk_block_set_corrupt(sc, bp);
143 xchk_block_set_corrupt(sc, bp);
157 xchk_block_set_corrupt(sc, bp);
165 xchk_block_set_preen(sc, bp);
168 xchk_block_set_corrupt(sc, bp);
171 xchk_block_set_corrupt(sc, bp);
174 xchk_block_set_corrupt(sc, bp);
177 xchk_block_set_preen(sc, bp);
180 xchk_block_set_corrupt(sc, bp);
183 xchk_block_set_corrupt(sc, bp);
186 xchk_block_set_corrupt(sc, bp);
189 xchk_block_set_corrupt(sc, bp);
192 xchk_block_set_corrupt(sc, bp);
195 xchk_block_set_corrupt(sc, bp);
198 xchk_block_set_preen(sc, bp);
206 xchk_block_set_preen(sc, bp);
209 xchk_block_set_preen(sc, bp);
217 xchk_block_set_corrupt(sc, bp);
220 xchk_block_set_corrupt(sc, bp);
223 xchk_block_set_corrupt(sc, bp);
226 xchk_block_set_preen(sc, bp);
229 xchk_block_set_preen(sc, bp);
232 xchk_block_set_corrupt(sc, bp);
235 xchk_block_set_corrupt(sc, bp);
238 xchk_block_set_corrupt(sc, bp);
241 xchk_block_set_corrupt(sc, bp);
246 xchk_block_set_corrupt(sc, bp);
253 xchk_block_set_corrupt(sc, bp);
256 xchk_block_set_preen(sc, bp);
266 xchk_block_set_corrupt(sc, bp);
272 xchk_block_set_corrupt(sc, bp);
279 xchk_block_set_corrupt(sc, bp);
285 xchk_block_set_corrupt(sc, bp);
295 xchk_block_set_corrupt(sc, bp);
305 xchk_block_set_corrupt(sc, bp);
312 xchk_block_set_corrupt(sc, bp);
317 xchk_block_set_corrupt(sc, bp);
320 xchk_block_set_preen(sc, bp);
328 xchk_block_set_corrupt(sc, bp);
334 xchk_block_set_corrupt(sc, bp);
336 xchk_superblock_xref(sc, bp);
359 struct xfs_scrub *sc)
361 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
365 if (!sc->sa.bno_cur)
368 error = xfs_alloc_query_all(sc->sa.bno_cur,
370 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
373 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
379 struct xfs_scrub *sc)
381 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
387 if (!sc->sa.cnt_cur)
391 error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
392 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
396 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
401 error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
402 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
405 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
411 struct xfs_scrub *sc)
413 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
414 struct xfs_mount *mp = sc->mp;
420 if (sc->sa.rmap_cur) {
421 error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
422 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
426 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
435 if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
436 !sc->sa.bno_cur || !sc->sa.cnt_cur)
440 error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
441 if (!xchk_should_check_xref(sc, &error, &sc->sa.bno_cur))
445 error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
446 if (!xchk_should_check_xref(sc, &error, &sc->sa.cnt_cur))
451 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
457 struct xfs_scrub *sc)
459 struct xfs_agf *agf = sc->sa.agf_bp->b_addr;
463 if (!sc->sa.refc_cur)
466 error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
467 if (!xchk_should_check_xref(sc, &error, &sc->sa.refc_cur))
470 xchk_block_xref_set_corrupt(sc, sc->sa.agf_bp);
476 struct xfs_scrub *sc)
478 struct xfs_mount *mp = sc->mp;
482 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
487 error = xchk_ag_btcur_init(sc, &sc->sa);
491 xchk_xref_is_used_space(sc, agbno, 1);
492 xchk_agf_xref_freeblks(sc);
493 xchk_agf_xref_cntbt(sc);
494 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
495 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
496 xchk_agf_xref_btreeblks(sc);
497 xchk_xref_is_not_shared(sc, agbno, 1);
498 xchk_agf_xref_refcblks(sc);
500 /* scrub teardown will take care of sc->sa for us */
506 struct xfs_scrub *sc)
508 struct xfs_mount *mp = sc->mp;
521 agno = sc->sa.agno = sc->sm->sm_agno;
522 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
523 &sc->sa.agf_bp, &sc->sa.agfl_bp);
524 if (!xchk_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
526 xchk_buffer_recheck(sc, sc->sa.agf_bp);
528 agf = sc->sa.agf_bp->b_addr;
533 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
538 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
542 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
546 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
550 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
555 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
559 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
565 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
569 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
581 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
586 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
588 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
590 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
593 xchk_agf_xref(sc);
604 struct xfs_scrub *sc;
610 struct xfs_scrub *sc,
613 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
616 xchk_xref_is_used_space(sc, agbno, 1);
617 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
618 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_AG);
619 xchk_xref_is_not_shared(sc, agbno, 1);
630 struct xfs_scrub *sc = sai->sc;
631 xfs_agnumber_t agno = sc->sa.agno;
637 xchk_block_set_corrupt(sc, sc->sa.agfl_bp);
639 xchk_agfl_block_xref(sc, agbno);
641 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
661 struct xfs_scrub *sc)
663 struct xfs_mount *mp = sc->mp;
667 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
672 error = xchk_ag_btcur_init(sc, &sc->sa);
676 xchk_xref_is_used_space(sc, agbno, 1);
677 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
678 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
679 xchk_xref_is_not_shared(sc, agbno, 1);
682 * Scrub teardown will take care of sc->sa for us. Leave sc->sa
690 struct xfs_scrub *sc)
699 agno = sc->sa.agno = sc->sm->sm_agno;
700 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
701 &sc->sa.agf_bp, &sc->sa.agfl_bp);
702 if (!xchk_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
704 if (!sc->sa.agf_bp)
706 xchk_buffer_recheck(sc, sc->sa.agfl_bp);
708 xchk_agfl_xref(sc);
710 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
714 agf = sc->sa.agf_bp->b_addr;
716 if (agflcount > xfs_agfl_size(sc->mp)) {
717 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
721 sai.sc = sc;
731 error = xfs_agfl_walk(sc->mp, sc->sa.agf_bp->b_addr,
732 sc->sa.agfl_bp, xchk_agfl_block, &sai);
741 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
750 xchk_block_set_corrupt(sc, sc->sa.agf_bp);
766 struct xfs_scrub *sc)
768 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
773 if (!sc->sa.ino_cur)
776 error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
777 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
781 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
787 struct xfs_scrub *sc)
789 struct xfs_agi *agi = sc->sa.agi_bp->b_addr;
793 if (!xfs_sb_version_hasinobtcounts(&sc->mp->m_sb))
796 if (sc->sa.ino_cur) {
797 error = xfs_btree_count_blocks(sc->sa.ino_cur, &blocks);
798 if (!xchk_should_check_xref(sc, &error, &sc->sa.ino_cur))
801 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
804 if (sc->sa.fino_cur) {
805 error = xfs_btree_count_blocks(sc->sa.fino_cur, &blocks);
806 if (!xchk_should_check_xref(sc, &error, &sc->sa.fino_cur))
809 xchk_block_xref_set_corrupt(sc, sc->sa.agi_bp);
816 struct xfs_scrub *sc)
818 struct xfs_mount *mp = sc->mp;
822 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
827 error = xchk_ag_btcur_init(sc, &sc->sa);
831 xchk_xref_is_used_space(sc, agbno, 1);
832 xchk_xref_is_not_inode_chunk(sc, agbno, 1);
833 xchk_agi_xref_icounts(sc);
834 xchk_xref_is_owned_by(sc, agbno, 1, &XFS_RMAP_OINFO_FS);
835 xchk_xref_is_not_shared(sc, agbno, 1);
836 xchk_agi_xref_fiblocks(sc);
838 /* scrub teardown will take care of sc->sa for us */
844 struct xfs_scrub *sc)
846 struct xfs_mount *mp = sc->mp;
860 agno = sc->sa.agno = sc->sm->sm_agno;
861 error = xchk_ag_read_headers(sc, agno, &sc->sa.agi_bp,
862 &sc->sa.agf_bp, &sc->sa.agfl_bp);
863 if (!xchk_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
865 xchk_buffer_recheck(sc, sc->sa.agi_bp);
867 agi = sc->sa.agi_bp->b_addr;
872 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
877 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
881 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
886 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
890 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
898 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
903 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
907 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
913 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
917 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
922 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
924 xchk_block_set_corrupt(sc, sc->sa.agi_bp);
927 xchk_agi_xref(sc);