Lines Matching defs:gl

57  * @gl: The glock to update
59 * This assumes that gl->gl_dstamp has been set earlier.
72 static inline void gfs2_update_reply_times(struct gfs2_glock *gl)
75 const unsigned gltype = gl->gl_name.ln_type;
76 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ?
81 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
82 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
83 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
87 trace_gfs2_glock_lock_time(gl, rtt);
92 * @gl: The glock to update
99 static inline void gfs2_update_request_times(struct gfs2_glock *gl)
102 const unsigned gltype = gl->gl_name.ln_type;
107 dstamp = gl->gl_dstamp;
108 gl->gl_dstamp = ktime_get_real();
109 irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
110 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
111 gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
118 struct gfs2_glock *gl = arg;
119 unsigned ret = gl->gl_state;
121 gfs2_update_reply_times(gl);
122 BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
124 if ((gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID) && gl->gl_lksb.sb_lvbptr)
125 memset(gl->gl_lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
127 switch (gl->gl_lksb.sb_status) {
129 if (gl->gl_ops->go_free)
130 gl->gl_ops->go_free(gl);
131 gfs2_glock_free(gl);
148 ret = gl->gl_req;
149 if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
150 if (gl->gl_req == LM_ST_SHARED)
152 else if (gl->gl_req == LM_ST_DEFERRED)
158 set_bit(GLF_INITIAL, &gl->gl_flags);
159 gfs2_glock_complete(gl, ret);
162 if (!test_bit(GLF_INITIAL, &gl->gl_flags))
163 gl->gl_lksb.sb_lkid = 0;
164 gfs2_glock_complete(gl, ret);
169 struct gfs2_glock *gl = arg;
173 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
176 gfs2_glock_cb(gl, LM_ST_DEFERRED);
179 gfs2_glock_cb(gl, LM_ST_SHARED);
182 fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
206 static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
211 if (gl->gl_lksb.sb_lvbptr)
236 if (gl->gl_lksb.sb_lkid != 0) {
238 if (test_bit(GLF_BLOCKING, &gl->gl_flags))
254 static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
257 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
262 req = make_mode(gl->gl_name.ln_sbd, req_state);
263 lkf = make_flags(gl, flags, req);
264 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
265 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
266 if (gl->gl_lksb.sb_lkid) {
267 gfs2_update_request_times(gl);
271 gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
272 gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
273 gl->gl_dstamp = ktime_get_real();
279 return dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, strname,
280 GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
283 static void gdlm_put_lock(struct gfs2_glock *gl)
285 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
289 if (gl->gl_lksb.sb_lkid == 0) {
290 gfs2_glock_free(gl);
294 clear_bit(GLF_BLOCKING, &gl->gl_flags);
295 gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT);
296 gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
297 gfs2_update_request_times(gl);
301 gfs2_glock_free(gl);
307 !gl->gl_lksb.sb_lvbptr) {
308 gfs2_glock_free(gl);
312 error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
313 NULL, gl);
316 gl->gl_name.ln_type,
317 (unsigned long long)gl->gl_name.ln_number, error);
322 static void gdlm_cancel(struct gfs2_glock *gl)
324 struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
325 dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);