Lines Matching refs:gh
63 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
64 static void __gfs2_glock_dq(struct gfs2_holder *gh);
314 * @gh: The lock request which we wish to grant
318 * @current_gh; they are all the same as far as compatibility with the new @gh
326 struct gfs2_holder *gh)
340 return gh->gh_state == LM_ST_EXCLUSIVE &&
342 (gh->gh_flags & LM_FLAG_NODE_SCOPE);
346 return gh->gh_state == current_gh->gh_state;
353 if (gl->gl_state == gh->gh_state)
355 if (gh->gh_flags & GL_EXACT)
358 return gh->gh_state == LM_ST_SHARED ||
359 gh->gh_state == LM_ST_DEFERRED;
361 if (gh->gh_flags & LM_FLAG_ANY)
366 static void gfs2_holder_wake(struct gfs2_holder *gh)
368 clear_bit(HIF_WAIT, &gh->gh_iflags);
370 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
371 if (gh->gh_flags & GL_ASYNC) {
372 struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
386 struct gfs2_holder *gh, *tmp;
388 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
389 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
392 gh->gh_error = -EIO;
393 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
394 gh->gh_error = GLR_TRYFAILED;
397 list_del_init(&gh->gh_list);
398 trace_gfs2_glock_queue(gh, 0);
399 gfs2_holder_wake(gh);
404 * find_first_holder - find the first "holder" gh
410 struct gfs2_holder *gh;
413 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder,
415 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
416 return gh;
423 * @gh: The glock holder
427 int gfs2_instantiate(struct gfs2_holder *gh)
429 struct gfs2_glock *gl = gh->gh_gl;
463 return glops->go_held(gh);
476 struct gfs2_holder *gh, *current_gh;
479 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
480 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
482 if (!may_grant(gl, current_gh, gh)) {
489 if (list_is_first(&gh->gh_list, &gl->gl_holders))
494 set_bit(HIF_HOLDER, &gh->gh_iflags);
495 trace_gfs2_promote(gh);
496 gfs2_holder_wake(gh);
498 current_gh = gh;
504 * find_first_waiter - find the first gh that's waiting for the glock
510 struct gfs2_holder *gh;
512 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
513 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
514 return gh;
574 struct gfs2_holder *gh;
580 gh = find_first_waiter(gl);
589 if (gh && (ret & LM_OUT_CANCELED))
590 gfs2_holder_wake(gh);
591 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
594 list_move_tail(&gh->gh_list, &gl->gl_holders);
595 gh = find_first_waiter(gl);
596 gl->gl_target = gh->gh_state;
603 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
613 do_xmote(gl, gh, gl->gl_target);
618 do_xmote(gl, gh, LM_ST_UNLOCKED);
664 * @gh: The holder (only for promotes)
669 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh,
676 unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
680 gh && !(gh->gh_flags & LM_FLAG_NOEXP))
818 struct gfs2_holder *gh = NULL;
839 gh = find_first_waiter(gl);
840 gl->gl_target = gh->gh_state;
841 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
844 do_xmote(gl, gh, gl->gl_target);
924 struct gfs2_holder gh;
927 __gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
928 error = gfs2_glock_nq(&gh);
930 gfs2_glock_dq(&gh);
931 gfs2_holder_uninit(&gh);
1245 * @gh: the holder structure
1250 struct gfs2_holder *gh, unsigned long ip)
1252 INIT_LIST_HEAD(&gh->gh_list);
1253 gh->gh_gl = gfs2_glock_hold(gl);
1254 gh->gh_ip = ip;
1255 gh->gh_owner_pid = get_pid(task_pid(current));
1256 gh->gh_state = state;
1257 gh->gh_flags = flags;
1258 gh->gh_iflags = 0;
1265 * @gh: the holder structure
1271 void gfs2_holder_reinit(unsigned int state, u16 flags, struct gfs2_holder *gh)
1273 gh->gh_state = state;
1274 gh->gh_flags = flags;
1275 gh->gh_iflags = 0;
1276 gh->gh_ip = _RET_IP_;
1277 put_pid(gh->gh_owner_pid);
1278 gh->gh_owner_pid = get_pid(task_pid(current));
1283 * @gh: the holder structure
1287 void gfs2_holder_uninit(struct gfs2_holder *gh)
1289 put_pid(gh->gh_owner_pid);
1290 gfs2_glock_put(gh->gh_gl);
1291 gfs2_holder_mark_uninitialized(gh);
1292 gh->gh_ip = 0;
1308 * @gh: the glock holder
1315 int gfs2_glock_holder_ready(struct gfs2_holder *gh)
1317 if (gh->gh_error || (gh->gh_flags & GL_SKIP))
1318 return gh->gh_error;
1319 gh->gh_error = gfs2_instantiate(gh);
1320 if (gh->gh_error)
1321 gfs2_glock_dq(gh);
1322 return gh->gh_error;
1327 * @gh: the glock holder
1332 int gfs2_glock_wait(struct gfs2_holder *gh)
1337 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1338 gfs2_glock_update_hold_time(gh->gh_gl, start_time);
1339 return gfs2_glock_holder_ready(gh);
1383 struct gfs2_holder *gh = &ghs[i];
1386 if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1387 gfs2_glock_update_hold_time(gh->gh_gl,
1390 ret2 = gfs2_glock_holder_ready(gh);
1398 struct gfs2_holder *gh = &ghs[i];
1400 gfs2_glock_dq(gh);
1455 static inline bool pid_is_meaningful(const struct gfs2_holder *gh)
1457 if (!(gh->gh_flags & GL_NOPID))
1459 if (gh->gh_state == LM_ST_UNLOCKED)
1466 * @gh: the holder structure to add
1474 static inline void add_to_queue(struct gfs2_holder *gh)
1478 struct gfs2_glock *gl = gh->gh_gl;
1484 GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
1485 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1488 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1493 try_futile = !may_grant(gl, current_gh, gh);
1500 if (likely(gh2->gh_owner_pid != gh->gh_owner_pid))
1502 if (gh->gh_gl->gl_ops->go_type == LM_TYPE_FLOCK)
1512 gh->gh_error = GLR_TRYFAILED;
1513 gfs2_holder_wake(gh);
1519 trace_gfs2_glock_queue(gh, 1);
1523 list_add_tail(&gh->gh_list, &gl->gl_holders);
1526 list_add_tail(&gh->gh_list, insert_pt);
1527 gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
1539 fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
1540 fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
1542 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1549 * @gh: the holder structure
1551 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1556 int gfs2_glock_nq(struct gfs2_holder *gh)
1558 struct gfs2_glock *gl = gh->gh_gl;
1561 if (glock_blocked_by_withdraw(gl) && !(gh->gh_flags & LM_FLAG_NOEXP))
1567 gh->gh_error = 0;
1569 add_to_queue(gh);
1570 if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
1579 if (!(gh->gh_flags & GL_ASYNC))
1580 error = gfs2_glock_wait(gh);
1587 * @gh: the holder
1592 int gfs2_glock_poll(struct gfs2_holder *gh)
1594 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
1603 static void __gfs2_glock_dq(struct gfs2_holder *gh)
1605 struct gfs2_glock *gl = gh->gh_gl;
1614 if (gh->gh_flags & GL_NOCACHE)
1617 list_del_init(&gh->gh_list);
1618 clear_bit(HIF_HOLDER, &gh->gh_iflags);
1619 trace_gfs2_glock_queue(gh, 0);
1645 * @gh: the glock holder
1648 void gfs2_glock_dq(struct gfs2_holder *gh)
1650 struct gfs2_glock *gl = gh->gh_gl;
1654 if (!gfs2_holder_queued(gh)) {
1662 if (list_is_first(&gh->gh_list, &gl->gl_holders) &&
1663 !test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1666 wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
1679 gh->gh_gl != sdp->sd_jinode_gl) {
1688 __gfs2_glock_dq(gh);
1693 void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1695 struct gfs2_glock *gl = gh->gh_gl;
1696 gfs2_glock_dq(gh);
1703 * @gh: the holder structure
1707 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1709 gfs2_glock_dq(gh);
1710 gfs2_holder_uninit(gh);
1720 * @gh: the struct gfs2_holder
1727 unsigned int state, u16 flags, struct gfs2_holder *gh)
1734 error = gfs2_glock_nq_init(gl, state, flags, gh);
1880 const struct gfs2_holder *gh;
1887 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1888 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1890 if (LM_FLAG_NOEXP & gh->gh_flags)
2246 * @gh: the glock holder
2251 static void dump_holder(struct seq_file *seq, const struct gfs2_holder *gh,
2259 if (pid_is_meaningful(gh)) {
2263 owner_pid = pid_nr(gh->gh_owner_pid);
2264 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
2269 fs_id_buf, state2str(gh->gh_state),
2270 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
2271 gh->gh_error, (long)owner_pid, comm, (void *)gh->gh_ip);
2344 const struct gfs2_holder *gh;
2374 list_for_each_entry(gh, &gl->gl_holders, gh_list)
2375 dump_holder(seq, gh, fs_id_buf);