Lines Matching defs:gl

128 	void (*lm_put_lock) (struct gfs2_glock *gl);
129 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
131 void (*lm_cancel) (struct gfs2_glock *gl);
136 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
142 spin_lock(&gl->gl_lockref.lock);
144 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
152 spin_unlock(&gl->gl_lockref.lock);
157 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
159 return gl->gl_state == LM_ST_EXCLUSIVE;
162 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
164 return gl->gl_state == LM_ST_DEFERRED;
167 static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
169 return gl->gl_state == LM_ST_SHARED;
172 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
174 if (gl->gl_ops->go_flags & GLOF_ASPACE)
175 return (struct address_space *)(gl + 1);
182 extern void gfs2_glock_hold(struct gfs2_glock *gl);
183 extern void gfs2_glock_put(struct gfs2_glock *gl);
184 extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
185 extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
203 extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
205 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { \
206 gfs2_dump_glock(NULL, gl, true); \
208 #define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
209 gfs2_dump_glock(NULL, gl, true); \
210 gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
212 #define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
213 gfs2_dump_glock(NULL, gl, true); \
214 gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
222 * @gl: the glock
230 static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
236 gfs2_holder_init(gl, state, flags, gh);
245 extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
246 extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
247 extern bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay);
248 extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
249 extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
254 extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
255 extern void gfs2_glock_free(struct gfs2_glock *gl);
284 * @gl: the glock
287 static inline void glock_set_object(struct gfs2_glock *gl, void *object)
289 spin_lock(&gl->gl_lockref.lock);
290 if (gfs2_assert_warn(gl->gl_name.ln_sbd, gl->gl_object == NULL))
291 gfs2_dump_glock(NULL, gl, true);
292 gl->gl_object = object;
293 spin_unlock(&gl->gl_lockref.lock);
298 * @gl: the glock
302 * else if (gfs2_assert_warn(gl->gl_sbd, gl->gl_object == object))
303 * gfs2_dump_glock(NULL, gl, true);
314 static inline void glock_clear_object(struct gfs2_glock *gl, void *object)
316 spin_lock(&gl->gl_lockref.lock);
317 if (gl->gl_object == object)
318 gl->gl_object = NULL;
319 spin_unlock(&gl->gl_lockref.lock);
322 extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
323 extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);