Lines Matching defs:gl

34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
36 fs_err(gl->gl_name.ln_sbd,
41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
42 gl->gl_name.ln_type, gl->gl_name.ln_number,
43 gfs2_glock2aspace(gl));
44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n");
45 gfs2_withdraw(gl->gl_name.ln_sbd);
50 * @gl: the glock
56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
60 struct list_head *head = &gl->gl_ail_list;
74 gfs2_ail_error(gl, bh);
79 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
85 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
87 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
96 tr.tr_revokes = atomic_read(&gl->gl_ail_count);
136 __gfs2_ail_flush(gl, 0, tr.tr_revokes);
145 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
147 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
148 unsigned int revokes = atomic_read(&gl->gl_ail_count);
161 __gfs2_ail_flush(gl, fsync, max_revokes);
169 * @gl: the glock protecting the resource group
173 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
175 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
177 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
194 * @gl: the glock
201 static int rgrp_go_sync(struct gfs2_glock *gl)
203 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
204 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
207 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
209 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
211 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
213 error = gfs2_rgrp_metasync(gl);
215 error = gfs2_ail_empty_gl(gl);
222 * @gl: the glock
230 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
232 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
234 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
245 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
248 struct gfs2_rgrpd *rgd = gl->gl_object;
254 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
258 spin_lock(&gl->gl_lockref.lock);
259 ip = gl->gl_object;
262 spin_unlock(&gl->gl_lockref.lock);
266 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
270 spin_lock(&gl->gl_lockref.lock);
271 rgd = gl->gl_object;
272 spin_unlock(&gl->gl_lockref.lock);
288 * @gl: the glock protecting the inode
291 int gfs2_inode_metasync(struct gfs2_glock *gl)
293 struct address_space *metamapping = gfs2_glock2aspace(gl);
299 gfs2_io_error(gl->gl_name.ln_sbd);
305 * @gl: the glock protecting the inode
309 static int inode_go_sync(struct gfs2_glock *gl)
311 struct gfs2_inode *ip = gfs2_glock2inode(gl);
313 struct address_space *metamapping = gfs2_glock2aspace(gl);
321 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
324 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
326 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
335 ret = gfs2_inode_metasync(gl);
338 gfs2_ail_empty_gl(gl);
344 clear_bit(GLF_DIRTY, &gl->gl_flags);
353 * @gl: the glock
362 static void inode_go_inval(struct gfs2_glock *gl, int flags)
364 struct gfs2_inode *ip = gfs2_glock2inode(gl);
367 struct address_space *mapping = gfs2_glock2aspace(gl);
377 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
378 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
381 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
391 * @gl: the glock
396 static int inode_go_demote_ok(const struct gfs2_glock *gl)
398 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
400 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
495 * @gl: the glock
503 struct gfs2_glock *gl = gh->gh_gl;
504 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
505 struct gfs2_inode *ip = gl->gl_object;
521 (gl->gl_state == LM_ST_EXCLUSIVE) &&
542 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
545 struct gfs2_inode *ip = gl->gl_object;
567 * @gl: the glock
573 static int freeze_go_sync(struct gfs2_glock *gl)
576 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
589 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
614 * @gl: the glock
618 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
620 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
643 * @gl: the glock
648 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
655 * @gl: the glock
659 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
661 struct gfs2_inode *ip = gl->gl_object;
662 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
667 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
668 gl->gl_state == LM_ST_SHARED && ip) {
669 gl->gl_lockref.count++;
671 &gl->gl_delete, 0))
672 gl->gl_lockref.count--;
676 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
678 return !gfs2_delete_work_queued(gl);
683 * @gl: glock being freed
689 static void inode_go_free(struct gfs2_glock *gl)
693 if (!test_bit(GLF_FREEING, &gl->gl_flags))
695 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
696 wake_up_bit(&gl->gl_flags, GLF_FREEING);
701 * @gl: the nondisk glock
705 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
707 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
711 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
717 clear_bit(GLF_DEMOTE, &gl->gl_flags);
718 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
728 if (gl->gl_demote_state != LM_ST_UNLOCKED)