Lines Matching refs:res

44 					      struct dlm_lock_resource *res,
49 struct dlm_lock_resource *res,
55 struct dlm_lock_resource *res,
78 * taken: res->spinlock and lock->spinlock taken and dropped
84 struct dlm_lock_resource *res,
100 BUG_ON(res->owner != dlm->node_num);
102 BUG_ON(res->owner == dlm->node_num);
111 "while waiting for an ast!", res->lockname.len,
112 res->lockname.name);
116 spin_lock(&res->spinlock);
117 if (res->state & DLM_LOCK_RES_IN_PROGRESS) {
120 spin_unlock(&res->spinlock);
124 __dlm_wait_on_lockres(res);
125 res->state |= DLM_LOCK_RES_IN_PROGRESS;
129 if (res->state & DLM_LOCK_RES_RECOVERING) {
134 if (res->state & DLM_LOCK_RES_MIGRATING) {
142 status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions);
144 status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions);
153 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
160 owner = res->owner;
167 spin_unlock(&res->spinlock);
168 status = dlm_send_remote_unlock_request(dlm, res, lock, lksb,
170 spin_lock(&res->spinlock);
187 dlm->name, res->lockname.len,
188 res->lockname.name,
215 list_add_tail(&lock->list, &res->granted);
227 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
228 if (!dlm_lock_on_list(&res->converting, lock))
233 spin_unlock(&res->spinlock);
234 wake_up(&res->wq);
237 spin_lock(&res->spinlock);
243 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING);
244 spin_unlock(&res->spinlock);
267 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
275 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
278 list_move_tail(&lock->list, &res->granted);
284 struct dlm_lock_resource *res,
290 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1);
294 struct dlm_lock_resource *res,
299 return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0);
310 struct dlm_lock_resource *res,
323 mlog(0, "%.*s\n", res->lockname.len, res->lockname.name);
331 res->lockname.len, res->lockname.name);
339 unlock.namelen = res->lockname.len;
340 memcpy(unlock.name, res->lockname.name, unlock.namelen);
386 * taken: takes and drops res->spinlock
396 struct dlm_lock_resource *res = NULL;
431 res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen);
432 if (!res) {
436 mlog(0, "returning DLM_FORWARD -- res no longer exists\n");
441 queue=&res->granted;
443 spin_lock(&res->spinlock);
444 if (res->state & DLM_LOCK_RES_RECOVERING) {
445 spin_unlock(&res->spinlock);
451 if (res->state & DLM_LOCK_RES_MIGRATING) {
452 spin_unlock(&res->spinlock);
458 if (res->owner != dlm->node_num) {
459 spin_unlock(&res->spinlock);
479 spin_unlock(&res->spinlock);
499 status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore);
506 dlm_lockres_calc_usage(dlm, res);
507 dlm_kick_thread(dlm, res);
519 if (res)
520 dlm_lockres_put(res);
529 struct dlm_lock_resource *res,
536 if (dlm_lock_on_list(&res->blocked, lock)) {
541 } else if (dlm_lock_on_list(&res->converting, lock)) {
548 } else if (dlm_lock_on_list(&res->granted, lock)) {
561 struct dlm_lock_resource *res,
569 if (!dlm_lock_on_list(&res->granted, lock)) {
591 struct dlm_lock_resource *res;
619 res = lock->lockres;
620 BUG_ON(!res);
621 dlm_lockres_get(res);
625 mlog(0, "lock=%p res=%p\n", lock, res);
627 spin_lock(&res->spinlock);
628 is_master = (res->owner == dlm->node_num);
631 spin_unlock(&res->spinlock);
634 status = dlmunlock_master(dlm, res, lock, lksb, flags,
639 status = dlmunlock_remote(dlm, res, lock, lksb, flags,
686 dlm_kick_thread(dlm, res);
690 dlm_lockres_calc_usage(dlm, res);
691 dlm_lockres_put(res);