Lines Matching refs:gh

60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
308 * @gh: The lock request which we wish to grant
313 static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
316 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
317 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
319 if (gl->gl_state == gh->gh_state)
321 if (gh->gh_flags & GL_EXACT)
324 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
326 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
329 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
334 static void gfs2_holder_wake(struct gfs2_holder *gh)
336 clear_bit(HIF_WAIT, &gh->gh_iflags);
338 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
339 if (gh->gh_flags & GL_ASYNC) {
340 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
353 struct gfs2_holder *gh, *tmp;
355 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
356 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
359 gh->gh_error = -EIO;
360 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
361 gh->gh_error = GLR_TRYFAILED;
364 list_del_init(&gh->gh_list);
365 trace_gfs2_glock_queue(gh, 0);
366 gfs2_holder_wake(gh);
383 struct gfs2_holder *gh, *tmp;
387 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
388 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
390 if (may_grant(gl, gh)) {
391 if (gh->gh_list.prev == &gl->gl_holders &&
395 ret = glops->go_lock(gh);
400 gh->gh_error = ret;
401 list_del_init(&gh->gh_list);
402 trace_gfs2_glock_queue(gh, 0);
403 gfs2_holder_wake(gh);
406 set_bit(HIF_HOLDER, &gh->gh_iflags);
407 trace_gfs2_promote(gh, 1);
408 gfs2_holder_wake(gh);
411 set_bit(HIF_HOLDER, &gh->gh_iflags);
412 trace_gfs2_promote(gh, 0);
413 gfs2_holder_wake(gh);
416 if (gh->gh_list.prev == &gl->gl_holders)
425 * find_first_waiter - find the first gh that's waiting for the glock
431 struct gfs2_holder *gh;
433 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
434 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
435 return gh;
496 struct gfs2_holder *gh;
503 gh = find_first_waiter(gl);
512 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
515 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
516 list_move_tail(&gh->gh_list, &gl->gl_holders);
517 gh = find_first_waiter(gl);
518 gl->gl_target = gh->gh_state;
523 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
533 do_xmote(gl, gh, gl->gl_target);
538 do_xmote(gl, gh, LM_ST_UNLOCKED);
555 rv = glops->go_xmote_bh(gl, gh);
585 * @gh: The holder (only for promotes)
590 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
596 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
600 gh && !(gh->gh_flags & LM_FLAG_NOEXP))
714 * find_first_holder - find the first "holder" gh
720 struct gfs2_holder *gh;
723 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
724 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
725 return gh;
741 struct gfs2_holder *gh = NULL;
766 gh = find_first_waiter(gl);
767 gl->gl_target = gh->gh_state;
768 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
771 do_xmote(gl, gh, gl->gl_target);
810 struct gfs2_holder gh;
813 gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
814 error = gfs2_glock_nq(&gh);
816 gfs2_glock_dq(&gh);
817 gfs2_holder_uninit(&gh);
1112 * @gh: the holder structure
1117 struct gfs2_holder *gh)
1119 INIT_LIST_HEAD(&gh->gh_list);
1120 gh->gh_gl = gl;
1121 gh->gh_ip = _RET_IP_;
1122 gh->gh_owner_pid = get_pid(task_pid(current));
1123 gh->gh_state = state;
1124 gh->gh_flags = flags;
1125 gh->gh_error = 0;
1126 gh->gh_iflags = 0;
1134 * @gh: the holder structure
1140 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
1142 gh->gh_state = state;
1143 gh->gh_flags = flags;
1144 gh->gh_iflags = 0;
1145 gh->gh_ip = _RET_IP_;
1146 put_pid(gh->gh_owner_pid);
1147 gh->gh_owner_pid = get_pid(task_pid(current));
1152 * @gh: the holder structure
1156 void gfs2_holder_uninit(struct gfs2_holder *gh)
1158 put_pid(gh->gh_owner_pid);
1159 gfs2_glock_put(gh->gh_gl);
1160 gfs2_holder_mark_uninitialized(gh);
1161 gh->gh_ip = 0;
1177 * @gh: the glock holder
1182 int gfs2_glock_wait(struct gfs2_holder *gh)
1187 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1188 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
1189 return gh->gh_error;
1326 * @gh: the holder structure to add
1334 static inline void add_to_queue(struct gfs2_holder *gh)
1338 struct gfs2_glock *gl = gh->gh_gl;
1344 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1345 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1348 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1350 try_futile = !may_grant(gl, gh);
1356 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
1357 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
1362 gh->gh_error = GLR_TRYFAILED;
1363 gfs2_holder_wake(gh);
1368 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
1371 trace_gfs2_glock_queue(gh, 1);
1375 list_add_tail(&gh->gh_list, &gl->gl_holders);
1376 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
1380 list_add_tail(&gh->gh_list, insert_pt);
1382 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
1383 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
1396 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1397 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1399 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1406 * @gh: the holder structure
1408 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1413 int gfs2_glock_nq(struct gfs2_holder *gh)
1415 struct gfs2_glock *gl = gh->gh_gl;
1418 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1425 add_to_queue(gh);
1426 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1435 if (!(gh->gh_flags & GL_ASYNC))
1436 error = gfs2_glock_wait(gh);
1443 * @gh: the holder
1448 int gfs2_glock_poll(struct gfs2_holder *gh)
1450 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1455 * @gh: the glock holder
1459 void gfs2_glock_dq(struct gfs2_holder *gh)
1461 struct gfs2_glock *gl = gh->gh_gl;
1476 gh->gh_gl != sdp->sd_jinode_gl) {
1484 if (gh->gh_flags & GL_NOCACHE)
1487 list_del_init(&gh->gh_list);
1488 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1498 trace_gfs2_glock_queue(gh, 0);
1510 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1512 struct gfs2_glock *gl = gh->gh_gl;
1513 gfs2_glock_dq(gh);
1520 * @gh: the holder structure
1524 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1526 gfs2_glock_dq(gh);
1527 gfs2_holder_uninit(gh);
1537 * @gh: the struct gfs2_holder
1544 unsigned int state, u16 flags, struct gfs2_holder *gh)
1551 error = gfs2_glock_nq_init(gl, state, flags, gh);
1700 const struct gfs2_holder *gh;
1707 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1708 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1710 if (LM_FLAG_NOEXP & gh->gh_flags)
2081 * @gh: the glock holder
2086 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2093 if (gh->gh_owner_pid)
2094 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
2096 fs_id_buf, state2str(gh->gh_state),
2097 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
2098 gh->gh_error,
2099 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
2101 (void *)gh->gh_ip);
2170 const struct gfs2_holder *gh;
2200 list_for_each_entry(gh, &gl->gl_holders, gh_list)
2201 dump_holder(seq, gh, fs_id_buf);