Lines Matching refs:res

51 				struct dlm_lock_resource *res,
78 struct dlm_lock_resource *res,
87 static int dlm_do_master_request(struct dlm_lock_resource *res,
92 struct dlm_lock_resource *res,
96 struct dlm_lock_resource *res,
100 struct dlm_lock_resource *res,
107 struct dlm_lock_resource *res);
109 struct dlm_lock_resource *res);
111 struct dlm_lock_resource *res,
114 struct dlm_lock_resource *res);
253 struct dlm_lock_resource *res,
278 BUG_ON(!res);
279 mle->mleres = res;
280 memcpy(mle->mname, res->lockname.name, res->lockname.len);
281 mle->mnamelen = res->lockname.len;
282 mle->mnamehash = res->lockname.hash;
468 struct dlm_lock_resource *res;
471 res = container_of(kref, struct dlm_lock_resource, refs);
472 dlm = res->dlm;
476 BUG_ON(!res->lockname.name);
478 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
479 res->lockname.name);
483 if (!hlist_unhashed(&res->hash_node) ||
484 !list_empty(&res->granted) ||
485 !list_empty(&res->converting) ||
486 !list_empty(&res->blocked) ||
487 !list_empty(&res->dirty) ||
488 !list_empty(&res->recovering) ||
489 !list_empty(&res->purge)) {
493 res->lockname.len, res->lockname.name,
494 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
495 !list_empty(&res->granted) ? 'G' : ' ',
496 !list_empty(&res->converting) ? 'C' : ' ',
497 !list_empty(&res->blocked) ? 'B' : ' ',
498 !list_empty(&res->dirty) ? 'D' : ' ',
499 !list_empty(&res->recovering) ? 'R' : ' ',
500 !list_empty(&res->purge) ? 'P' : ' ');
502 dlm_print_one_lock_resource(res);
507 BUG_ON(!hlist_unhashed(&res->hash_node));
508 BUG_ON(!list_empty(&res->granted));
509 BUG_ON(!list_empty(&res->converting));
510 BUG_ON(!list_empty(&res->blocked));
511 BUG_ON(!list_empty(&res->dirty));
512 BUG_ON(!list_empty(&res->recovering));
513 BUG_ON(!list_empty(&res->purge));
515 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
517 kmem_cache_free(dlm_lockres_cache, res);
520 void dlm_lockres_put(struct dlm_lock_resource *res)
522 kref_put(&res->refs, dlm_lockres_release);
526 struct dlm_lock_resource *res,
532 * res->lockname.name, so be sure to init every field
535 qname = (char *) res->lockname.name;
538 res->lockname.len = namelen;
539 res->lockname.hash = dlm_lockid_hash(name, namelen);
541 init_waitqueue_head(&res->wq);
542 spin_lock_init(&res->spinlock);
543 INIT_HLIST_NODE(&res->hash_node);
544 INIT_LIST_HEAD(&res->granted);
545 INIT_LIST_HEAD(&res->converting);
546 INIT_LIST_HEAD(&res->blocked);
547 INIT_LIST_HEAD(&res->dirty);
548 INIT_LIST_HEAD(&res->recovering);
549 INIT_LIST_HEAD(&res->purge);
550 INIT_LIST_HEAD(&res->tracking);
551 atomic_set(&res->asts_reserved, 0);
552 res->migration_pending = 0;
553 res->inflight_locks = 0;
554 res->inflight_assert_workers = 0;
556 res->dlm = dlm;
558 kref_init(&res->refs);
564 spin_lock(&res->spinlock);
565 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
566 spin_unlock(&res->spinlock);
568 res->state = DLM_LOCK_RES_IN_PROGRESS;
570 res->last_used = 0;
573 list_add_tail(&res->tracking, &dlm->tracking_list);
576 memset(res->lvb, 0, DLM_LVB_LEN);
577 memset(res->refmap, 0, sizeof(res->refmap));
584 struct dlm_lock_resource *res = NULL;
586 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
587 if (!res)
590 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
591 if (!res->lockname.name)
594 dlm_init_lockres(dlm, res, name, namelen);
595 return res;
598 if (res)
599 kmem_cache_free(dlm_lockres_cache, res);
604 struct dlm_lock_resource *res, int bit)
606 assert_spin_locked(&res->spinlock);
608 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
609 res->lockname.name, bit, __builtin_return_address(0));
611 set_bit(bit, res->refmap);
615 struct dlm_lock_resource *res, int bit)
617 assert_spin_locked(&res->spinlock);
619 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
620 res->lockname.name, bit, __builtin_return_address(0));
622 clear_bit(bit, res->refmap);
626 struct dlm_lock_resource *res)
628 res->inflight_locks++;
630 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
631 res->lockname.len, res->lockname.name, res->inflight_locks,
636 struct dlm_lock_resource *res)
638 assert_spin_locked(&res->spinlock);
639 __dlm_lockres_grab_inflight_ref(dlm, res);
643 struct dlm_lock_resource *res)
645 assert_spin_locked(&res->spinlock);
647 BUG_ON(res->inflight_locks == 0);
649 res->inflight_locks--;
651 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
652 res->lockname.len, res->lockname.name, res->inflight_locks,
655 wake_up(&res->wq);
659 struct dlm_lock_resource *res)
661 assert_spin_locked(&res->spinlock);
662 res->inflight_assert_workers++;
664 dlm->name, res->lockname.len, res->lockname.name,
665 res->inflight_assert_workers);
669 struct dlm_lock_resource *res)
671 assert_spin_locked(&res->spinlock);
672 BUG_ON(res->inflight_assert_workers == 0);
673 res->inflight_assert_workers--;
675 dlm->name, res->lockname.len, res->lockname.name,
676 res->inflight_assert_workers);
680 struct dlm_lock_resource *res)
682 spin_lock(&res->spinlock);
683 __dlm_lockres_drop_inflight_worker(dlm, res);
684 spin_unlock(&res->spinlock);
708 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
768 if (res) {
770 if (!list_empty(&res->tracking))
771 list_del_init(&res->tracking);
775 res->lockname.len,
776 res->lockname.name);
778 dlm_lockres_put(res);
780 res = tmpres;
784 if (!res) {
791 res = dlm_new_lockres(dlm, lockid, namelen);
792 if (!res)
797 mlog(0, "no lockres found, allocated our own: %p\n", res);
802 spin_lock(&res->spinlock);
803 dlm_change_lockres_owner(dlm, res, dlm->node_num);
804 __dlm_insert_lockres(dlm, res);
805 dlm_lockres_grab_inflight_ref(dlm, res);
806 spin_unlock(&res->spinlock);
858 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
868 mlog(0, "%s: res %.*s, At least one node (%d) "
881 __dlm_insert_lockres(dlm, res);
884 __dlm_lockres_grab_inflight_ref(dlm, res);
902 if (!dlm_pre_master_reco_lockres(dlm, res))
919 mlog(0, "%s: res %.*s, At least one node (%d) "
938 ret = dlm_do_master_request(res, mle, nodenum);
949 mlog(0, "%s: res %.*s, Requests only up to %u but "
957 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
960 mlog(0, "%s: res %.*s, Node map changed, redo the master "
961 "request now, blocked=%d\n", dlm->name, res->lockname.len,
962 res->lockname.name, blocked);
964 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
966 dlm->name, res->lockname.len,
967 res->lockname.name, blocked);
968 dlm_print_one_lock_resource(res);
975 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
976 res->lockname.name, res->owner);
978 BUG_ON(res->owner == O2NM_MAX_NODES);
987 spin_lock(&res->spinlock);
988 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
989 spin_unlock(&res->spinlock);
990 wake_up(&res->wq);
997 return res;
1004 struct dlm_lock_resource *res,
1018 spin_lock(&res->spinlock);
1019 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1021 res->lockname.len, res->lockname.name, res->owner);
1022 spin_unlock(&res->spinlock);
1025 if (res->owner != dlm->node_num) {
1026 ret = dlm_do_master_request(res, mle, res->owner);
1029 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1037 spin_unlock(&res->spinlock);
1050 dlm->name, res->lockname.len, res->lockname.name);
1051 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1055 dlm->name, res->lockname.len, res->lockname.name,
1065 "rechecking now\n", dlm->name, res->lockname.len,
1066 res->lockname.name);
1071 "for %s:%.*s\n", dlm->name, res->lockname.len,
1072 res->lockname.name);
1109 if (res->owner == O2NM_MAX_NODES) {
1111 res->lockname.len, res->lockname.name);
1114 mlog(0, "done waiting, master is %u\n", res->owner);
1123 res->lockname.len, res->lockname.name, m);
1124 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1139 spin_lock(&res->spinlock);
1142 dlm_change_lockres_owner(dlm, res, m);
1143 spin_unlock(&res->spinlock);
1209 struct dlm_lock_resource *res,
1256 res->lockname.len,
1257 res->lockname.name,
1272 res->lockname.len,
1273 res->lockname.name);
1275 mle->mleres = res;
1308 static int dlm_do_master_request(struct dlm_lock_resource *res,
1360 "reference\n", dlm->name, res->lockname.len,
1361 res->lockname.name, to);
1396 * res->spinlock
1407 struct dlm_lock_resource *res = NULL;
1436 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1437 if (res) {
1441 spin_lock(&res->spinlock);
1448 if (hlist_unhashed(&res->hash_node)) {
1449 spin_unlock(&res->spinlock);
1450 dlm_lockres_put(res);
1454 if (res->state & (DLM_LOCK_RES_RECOVERING|
1456 spin_unlock(&res->spinlock);
1457 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1465 if (res->owner == dlm->node_num) {
1466 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1467 spin_unlock(&res->spinlock);
1480 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1481 spin_unlock(&res->spinlock);
1482 // mlog(0, "node %u is the master\n", res->owner);
1492 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1531 dlm_lockres_set_refmap_bit(dlm, res,
1545 spin_unlock(&res->spinlock);
1618 dlm->node_num, res->lockname.len, res->lockname.name);
1619 spin_lock(&res->spinlock);
1620 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1625 spin_unlock(&res->spinlock);
1626 dlm_lockres_put(res);
1629 __dlm_lockres_grab_inflight_worker(dlm, res);
1630 spin_unlock(&res->spinlock);
1633 if (res)
1634 dlm_lockres_put(res);
1653 struct dlm_lock_resource *res,
1661 const char *lockname = res->lockname.name;
1662 unsigned int namelen = res->lockname.len;
1666 spin_lock(&res->spinlock);
1667 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1668 spin_unlock(&res->spinlock);
1735 spin_lock(&res->spinlock);
1736 dlm_lockres_set_refmap_bit(dlm, res, to);
1737 spin_unlock(&res->spinlock);
1744 spin_lock(&res->spinlock);
1745 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1746 spin_unlock(&res->spinlock);
1747 wake_up(&res->wq);
1755 * res->spinlock
1767 struct dlm_lock_resource *res = NULL;
1845 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1846 if (res) {
1847 spin_lock(&res->spinlock);
1848 if (res->state & DLM_LOCK_RES_RECOVERING) {
1854 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1855 res->owner != assert->node_idx) {
1858 assert->node_idx, res->owner, namelen,
1860 __dlm_print_one_lock_resource(res);
1864 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1866 if (res->owner == assert->node_idx) {
1875 res->owner, namelen, name);
1878 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1899 spin_unlock(&res->spinlock);
1929 if (res) {
1931 spin_lock(&res->spinlock);
1935 res->lockname.len, res->lockname.name,
1937 res->state &= ~DLM_LOCK_RES_MIGRATING;
1939 dlm_change_lockres_owner(dlm, res, mle->new_master);
1940 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1942 dlm_change_lockres_owner(dlm, res, mle->master);
1944 spin_unlock(&res->spinlock);
1947 wake_up(&res->wq);
1985 } else if (res) {
1986 if (res->owner != assert->node_idx) {
1989 res->owner, namelen, name);
1996 if (res) {
1997 spin_lock(&res->spinlock);
1998 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1999 spin_unlock(&res->spinlock);
2000 *ret_data = (void *)res;
2025 __dlm_print_one_lock_resource(res);
2026 spin_unlock(&res->spinlock);
2032 *ret_data = (void *)res;
2039 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2042 spin_lock(&res->spinlock);
2043 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2044 spin_unlock(&res->spinlock);
2045 wake_up(&res->wq);
2046 dlm_lockres_put(res);
2052 struct dlm_lock_resource *res,
2063 item->u.am.lockres = res; /* already have a ref */
2070 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2071 res->lockname.name);
2085 struct dlm_lock_resource *res;
2093 res = item->u.am.lockres;
2124 spin_lock(&res->spinlock);
2125 if (res->state & DLM_LOCK_RES_MIGRATING) {
2129 spin_unlock(&res->spinlock);
2132 __dlm_lockres_reserve_ast(res);
2133 spin_unlock(&res->spinlock);
2138 res->lockname.len, res->lockname.name, dlm->node_num);
2139 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2147 dlm_lockres_release_ast(dlm, res);
2150 dlm_lockres_drop_inflight_worker(dlm, res);
2152 dlm_lockres_put(res);
2168 struct dlm_lock_resource *res)
2183 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2217 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2224 lockname = res->lockname.name;
2225 namelen = res->lockname.len;
2234 &deref, sizeof(deref), res->owner, &r);
2236 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2237 dlm->name, namelen, lockname, ret, res->owner);
2240 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2241 dlm->name, namelen, lockname, res->owner, r);
2242 dlm_print_one_lock_resource(res);
2256 struct dlm_lock_resource *res = NULL;
2285 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2286 if (!res) {
2294 spin_lock(&res->spinlock);
2295 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2298 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2299 if (test_bit(node, res->refmap)) {
2300 dlm_lockres_clear_refmap_bit(dlm, res, node);
2304 spin_unlock(&res->spinlock);
2308 dlm_lockres_calc_usage(dlm, res);
2312 res->lockname.len, res->lockname.name, node);
2313 dlm_print_one_lock_resource(res);
2327 item->u.dl.deref_res = res;
2338 if (res)
2339 dlm_lockres_put(res);
2351 struct dlm_lock_resource *res = NULL;
2377 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2378 if (!res) {
2385 spin_lock(&res->spinlock);
2386 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2387 spin_unlock(&res->spinlock);
2391 res->lockname.len, res->lockname.name, node);
2396 __dlm_do_purge_lockres(dlm, res);
2397 spin_unlock(&res->spinlock);
2398 wake_up(&res->wq);
2404 if (res)
2405 dlm_lockres_put(res);
2411 struct dlm_lock_resource *res, u8 node)
2418 lockname = res->lockname.name;
2419 namelen = res->lockname.len;
2430 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
2435 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2437 dlm_print_one_lock_resource(res);
2444 struct dlm_lock_resource *res;
2449 res = item->u.dl.deref_res;
2452 spin_lock(&res->spinlock);
2453 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2454 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2455 if (test_bit(node, res->refmap)) {
2456 dlm_lockres_clear_refmap_bit(dlm, res, node);
2459 spin_unlock(&res->spinlock);
2461 dlm_drop_lockres_ref_done(dlm, res, node);
2465 dlm->name, res->lockname.len, res->lockname.name, node);
2466 dlm_lockres_calc_usage(dlm, res);
2470 res->lockname.len, res->lockname.name, node);
2471 dlm_print_one_lock_resource(res);
2474 dlm_lockres_put(res);
2485 struct dlm_lock_resource *res)
2493 assert_spin_locked(&res->spinlock);
2496 if (res->state & DLM_LOCK_RES_MIGRATING)
2500 if (res->state & (DLM_LOCK_RES_RECOVERING|
2504 if (res->owner != dlm->node_num)
2508 queue = dlm_list_idx_to_ptr(res, idx);
2515 mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
2516 "%s list\n", dlm->name, res->lockname.len,
2517 res->lockname.name,
2526 node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
2531 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
2532 res->lockname.name);
2543 struct dlm_lock_resource *res, u8 target)
2557 name = res->lockname.name;
2558 namelen = res->lockname.len;
2584 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2606 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2608 "the target went down.\n", res->lockname.len,
2609 res->lockname.name, target);
2610 spin_lock(&res->spinlock);
2611 res->state &= ~DLM_LOCK_RES_MIGRATING;
2613 spin_unlock(&res->spinlock);
2650 ret = dlm_send_one_lockres(dlm, res, mres, target,
2660 spin_lock(&res->spinlock);
2661 res->state &= ~DLM_LOCK_RES_MIGRATING;
2663 spin_unlock(&res->spinlock);
2689 res->owner == target)
2693 dlm->name, res->lockname.len, res->lockname.name);
2699 dlm->name, res->lockname.len,
2700 res->lockname.name, target);
2706 spin_lock(&res->spinlock);
2707 res->state &= ~DLM_LOCK_RES_MIGRATING;
2709 spin_unlock(&res->spinlock);
2714 dlm->name, res->lockname.len, res->lockname.name);
2718 spin_lock(&res->spinlock);
2719 dlm_set_lockres_owner(dlm, res, target);
2720 res->state &= ~DLM_LOCK_RES_MIGRATING;
2721 dlm_remove_nonlocal_locks(dlm, res);
2722 spin_unlock(&res->spinlock);
2723 wake_up(&res->wq);
2730 dlm_lockres_calc_usage(dlm, res);
2735 dlm_kick_thread(dlm, res);
2740 wake_up(&res->wq);
2762 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2771 spin_lock(&res->spinlock);
2772 if (dlm_is_lockres_migratable(dlm, res))
2773 target = dlm_pick_migration_target(dlm, res);
2774 spin_unlock(&res->spinlock);
2782 ret = dlm_migrate_lockres(dlm, res, target);
2784 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2785 dlm->name, res->lockname.len, res->lockname.name,
2804 struct dlm_lock_resource *res,
2808 spin_lock(&res->spinlock);
2809 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2810 spin_unlock(&res->spinlock);
2822 struct dlm_lock_resource *res)
2825 spin_lock(&res->spinlock);
2826 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2827 spin_unlock(&res->spinlock);
2833 struct dlm_lock_resource *res,
2839 res->lockname.len, res->lockname.name, dlm->node_num,
2843 spin_lock(&res->spinlock);
2844 BUG_ON(res->migration_pending);
2845 res->migration_pending = 1;
2848 __dlm_lockres_reserve_ast(res);
2849 spin_unlock(&res->spinlock);
2852 dlm_kick_thread(dlm, res);
2855 spin_lock(&res->spinlock);
2856 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2857 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2858 spin_unlock(&res->spinlock);
2860 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2861 dlm_lockres_release_ast(dlm, res);
2864 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2870 dlm_migration_can_proceed(dlm, res, target),
2874 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2878 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2881 if (!dlm_migration_can_proceed(dlm, res, target)) {
2901 spin_lock(&res->spinlock);
2902 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2903 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2905 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2907 res->migration_pending = 0;
2908 spin_unlock(&res->spinlock);
2925 struct dlm_lock_resource *res)
2927 struct list_head *queue = &res->granted;
2931 assert_spin_locked(&res->spinlock);
2933 BUG_ON(res->owner == dlm->node_num);
2945 dlm_lockres_clear_refmap_bit(dlm, res,
2958 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2966 res->lockname.len, res->lockname.name, bit);
2967 dlm_lockres_clear_refmap_bit(dlm, res, bit);
2979 struct dlm_lock_resource *res)
2982 struct list_head *queue = &res->granted;
2988 assert_spin_locked(&res->spinlock);
2992 queue = dlm_list_idx_to_ptr(res, idx);
3006 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
3025 struct dlm_lock_resource *res,
3034 migrate.namelen = res->lockname.len;
3035 memcpy(migrate.name, res->lockname.name, migrate.namelen);
3060 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
3078 dlm->name, res->lockname.len, res->lockname.name,
3080 spin_lock(&res->spinlock);
3081 dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3082 spin_unlock(&res->spinlock);
3105 struct dlm_lock_resource *res = NULL;
3129 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3130 if (res) {
3131 spin_lock(&res->spinlock);
3132 if (res->state & DLM_LOCK_RES_RECOVERING) {
3136 spin_unlock(&res->spinlock);
3143 res->state |= DLM_LOCK_RES_MIGRATING;
3144 spin_unlock(&res->spinlock);
3149 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3167 if (res)
3168 dlm_lockres_put(res);
3182 struct dlm_lock_resource *res,
3242 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3260 struct dlm_lock_resource *res;
3263 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3265 if (res) {
3269 spin_lock(&res->spinlock);
3270 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3271 dlm_move_lockres_to_recovery_list(dlm, res);
3272 spin_unlock(&res->spinlock);
3273 dlm_lockres_put(res);
3284 return res;
3332 struct dlm_lock_resource *res;
3401 res = dlm_reset_mleres_owner(dlm, mle);
3402 if (res)
3413 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3428 spin_lock(&res->spinlock);
3429 dlm_lockres_set_refmap_bit(dlm, res, old_master);
3430 spin_unlock(&res->spinlock);
3433 ret = dlm_do_migrate_request(dlm, res, old_master,
3441 res->lockname.len, res->lockname.name);
3444 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3455 res->lockname.len, res->lockname.name, old_master);
3456 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3467 spin_lock(&res->spinlock);
3468 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3469 res->state &= ~DLM_LOCK_RES_MIGRATING;
3470 spin_unlock(&res->spinlock);
3472 dlm_kick_thread(dlm, res);
3473 wake_up(&res->wq);
3487 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3489 assert_spin_locked(&res->spinlock);
3490 if (res->state & DLM_LOCK_RES_MIGRATING) {
3491 __dlm_print_one_lock_resource(res);
3493 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3495 atomic_inc(&res->asts_reserved);
3512 struct dlm_lock_resource *res)
3514 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3517 if (!res->migration_pending) {
3518 spin_unlock(&res->spinlock);
3522 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3523 res->migration_pending = 0;
3524 res->state |= DLM_LOCK_RES_MIGRATING;
3525 spin_unlock(&res->spinlock);
3526 wake_up(&res->wq);