Lines Matching refs:sdp

59 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
65 gfs2_log_lock(sdp);
66 spin_lock(&sdp->sd_ail_lock);
76 gfs2_trans_add_revoke(sdp, bd);
80 spin_unlock(&sdp->sd_ail_lock);
81 gfs2_log_unlock(sdp);
87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
104 * the sdp revoke queue, in which case, we still want to flush
107 * If the sdp revoke list is empty too, we might still have an
114 gfs2_log_lock(sdp);
115 have_revokes = !list_empty(&sdp->sd_log_revokes);
116 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
117 gfs2_log_unlock(sdp);
121 log_flush_wait(sdp);
128 tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes);
130 ret = gfs2_log_reserve(sdp, tr.tr_reserved);
138 gfs2_trans_end(sdp);
140 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
149 unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
156 max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
158 ret = gfs2_trans_begin(sdp, 0, max_revokes);
162 gfs2_trans_end(sdp);
163 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
176 struct address_space *metamapping = &sdp->sd_aspace;
178 const unsigned bsize = sdp->sd_sb.sb_bsize;
185 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
188 gfs2_io_error(sdp);
203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
233 struct address_space *mapping = &sdp->sd_aspace;
235 const unsigned bsize = sdp->sd_sb.sb_bsize;
398 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
400 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
408 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
448 if (unlikely(height > sdp->sd_max_height))
504 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
523 spin_lock(&sdp->sd_trunc_lock);
525 list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
526 spin_unlock(&sdp->sd_trunc_lock);
527 wake_up(&sdp->sd_quota_wait);
576 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
589 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
590 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
591 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
592 error = freeze_super(sdp->sd_vfs);
594 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
596 if (gfs2_withdrawn(sdp)) {
597 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
600 gfs2_assert_withdraw(sdp, 0);
602 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
603 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
604 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
607 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
620 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
621 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
626 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
629 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
630 if (gfs2_assert_withdraw_delayed(sdp, !error))
632 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
635 sdp->sd_log_sequence = head.lh_sequence + 1;
636 gfs2_log_pointers_init(sdp, head.lh_blkno);
662 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
664 if (!remote || sb_rdonly(sdp->sd_vfs))
707 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
721 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
722 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
723 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
731 if (sdp->sd_args.ar_spectator) {
732 fs_warn(sdp, "Spectator node cannot recover journals.\n");
736 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
737 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
745 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);