Lines Matching refs:res
49 struct dlm_lock_resource *res,
76 struct dlm_lock_resource *res,
85 static int dlm_do_master_request(struct dlm_lock_resource *res,
90 struct dlm_lock_resource *res,
94 struct dlm_lock_resource *res,
98 struct dlm_lock_resource *res,
105 struct dlm_lock_resource *res);
107 struct dlm_lock_resource *res);
109 struct dlm_lock_resource *res,
112 struct dlm_lock_resource *res);
251 struct dlm_lock_resource *res,
276 BUG_ON(!res);
277 mle->mleres = res;
278 memcpy(mle->mname, res->lockname.name, res->lockname.len);
279 mle->mnamelen = res->lockname.len;
280 mle->mnamehash = res->lockname.hash;
466 struct dlm_lock_resource *res;
469 res = container_of(kref, struct dlm_lock_resource, refs);
470 dlm = res->dlm;
474 BUG_ON(!res->lockname.name);
476 mlog(0, "destroying lockres %.*s\n", res->lockname.len,
477 res->lockname.name);
481 if (!hlist_unhashed(&res->hash_node) ||
482 !list_empty(&res->granted) ||
483 !list_empty(&res->converting) ||
484 !list_empty(&res->blocked) ||
485 !list_empty(&res->dirty) ||
486 !list_empty(&res->recovering) ||
487 !list_empty(&res->purge)) {
491 res->lockname.len, res->lockname.name,
492 !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
493 !list_empty(&res->granted) ? 'G' : ' ',
494 !list_empty(&res->converting) ? 'C' : ' ',
495 !list_empty(&res->blocked) ? 'B' : ' ',
496 !list_empty(&res->dirty) ? 'D' : ' ',
497 !list_empty(&res->recovering) ? 'R' : ' ',
498 !list_empty(&res->purge) ? 'P' : ' ');
500 dlm_print_one_lock_resource(res);
505 BUG_ON(!hlist_unhashed(&res->hash_node));
506 BUG_ON(!list_empty(&res->granted));
507 BUG_ON(!list_empty(&res->converting));
508 BUG_ON(!list_empty(&res->blocked));
509 BUG_ON(!list_empty(&res->dirty));
510 BUG_ON(!list_empty(&res->recovering));
511 BUG_ON(!list_empty(&res->purge));
513 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
515 kmem_cache_free(dlm_lockres_cache, res);
518 void dlm_lockres_put(struct dlm_lock_resource *res)
520 kref_put(&res->refs, dlm_lockres_release);
524 struct dlm_lock_resource *res,
530 * res->lockname.name, so be sure to init every field
533 qname = (char *) res->lockname.name;
536 res->lockname.len = namelen;
537 res->lockname.hash = dlm_lockid_hash(name, namelen);
539 init_waitqueue_head(&res->wq);
540 spin_lock_init(&res->spinlock);
541 INIT_HLIST_NODE(&res->hash_node);
542 INIT_LIST_HEAD(&res->granted);
543 INIT_LIST_HEAD(&res->converting);
544 INIT_LIST_HEAD(&res->blocked);
545 INIT_LIST_HEAD(&res->dirty);
546 INIT_LIST_HEAD(&res->recovering);
547 INIT_LIST_HEAD(&res->purge);
548 INIT_LIST_HEAD(&res->tracking);
549 atomic_set(&res->asts_reserved, 0);
550 res->migration_pending = 0;
551 res->inflight_locks = 0;
552 res->inflight_assert_workers = 0;
554 res->dlm = dlm;
556 kref_init(&res->refs);
562 spin_lock(&res->spinlock);
563 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
564 spin_unlock(&res->spinlock);
566 res->state = DLM_LOCK_RES_IN_PROGRESS;
568 res->last_used = 0;
571 list_add_tail(&res->tracking, &dlm->tracking_list);
574 memset(res->lvb, 0, DLM_LVB_LEN);
575 bitmap_zero(res->refmap, O2NM_MAX_NODES);
582 struct dlm_lock_resource *res = NULL;
584 res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
585 if (!res)
588 res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
589 if (!res->lockname.name)
592 dlm_init_lockres(dlm, res, name, namelen);
593 return res;
596 if (res)
597 kmem_cache_free(dlm_lockres_cache, res);
602 struct dlm_lock_resource *res, int bit)
604 assert_spin_locked(&res->spinlock);
606 mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len,
607 res->lockname.name, bit, __builtin_return_address(0));
609 set_bit(bit, res->refmap);
613 struct dlm_lock_resource *res, int bit)
615 assert_spin_locked(&res->spinlock);
617 mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len,
618 res->lockname.name, bit, __builtin_return_address(0));
620 clear_bit(bit, res->refmap);
624 struct dlm_lock_resource *res)
626 res->inflight_locks++;
628 mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name,
629 res->lockname.len, res->lockname.name, res->inflight_locks,
634 struct dlm_lock_resource *res)
636 assert_spin_locked(&res->spinlock);
637 __dlm_lockres_grab_inflight_ref(dlm, res);
641 struct dlm_lock_resource *res)
643 assert_spin_locked(&res->spinlock);
645 BUG_ON(res->inflight_locks == 0);
647 res->inflight_locks--;
649 mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name,
650 res->lockname.len, res->lockname.name, res->inflight_locks,
653 wake_up(&res->wq);
657 struct dlm_lock_resource *res)
659 assert_spin_locked(&res->spinlock);
660 res->inflight_assert_workers++;
662 dlm->name, res->lockname.len, res->lockname.name,
663 res->inflight_assert_workers);
667 struct dlm_lock_resource *res)
669 assert_spin_locked(&res->spinlock);
670 BUG_ON(res->inflight_assert_workers == 0);
671 res->inflight_assert_workers--;
673 dlm->name, res->lockname.len, res->lockname.name,
674 res->inflight_assert_workers);
678 struct dlm_lock_resource *res)
680 spin_lock(&res->spinlock);
681 __dlm_lockres_drop_inflight_worker(dlm, res);
682 spin_unlock(&res->spinlock);
706 struct dlm_lock_resource *tmpres=NULL, *res=NULL;
766 if (res) {
768 if (!list_empty(&res->tracking))
769 list_del_init(&res->tracking);
773 res->lockname.len,
774 res->lockname.name);
776 dlm_lockres_put(res);
778 res = tmpres;
782 if (!res) {
789 res = dlm_new_lockres(dlm, lockid, namelen);
790 if (!res)
795 mlog(0, "no lockres found, allocated our own: %p\n", res);
800 spin_lock(&res->spinlock);
801 dlm_change_lockres_owner(dlm, res, dlm->node_num);
802 __dlm_insert_lockres(dlm, res);
803 dlm_lockres_grab_inflight_ref(dlm, res);
804 spin_unlock(&res->spinlock);
856 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
866 mlog(0, "%s: res %.*s, At least one node (%d) "
879 __dlm_insert_lockres(dlm, res);
882 __dlm_lockres_grab_inflight_ref(dlm, res);
900 if (!dlm_pre_master_reco_lockres(dlm, res))
917 mlog(0, "%s: res %.*s, At least one node (%d) "
936 ret = dlm_do_master_request(res, mle, nodenum);
947 mlog(0, "%s: res %.*s, Requests only up to %u but "
955 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
958 mlog(0, "%s: res %.*s, Node map changed, redo the master "
959 "request now, blocked=%d\n", dlm->name, res->lockname.len,
960 res->lockname.name, blocked);
962 mlog(ML_ERROR, "%s: res %.*s, Spinning on "
964 dlm->name, res->lockname.len,
965 res->lockname.name, blocked);
966 dlm_print_one_lock_resource(res);
973 mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len,
974 res->lockname.name, res->owner);
976 BUG_ON(res->owner == O2NM_MAX_NODES);
985 spin_lock(&res->spinlock);
986 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
987 spin_unlock(&res->spinlock);
988 wake_up(&res->wq);
995 return res;
1002 struct dlm_lock_resource *res,
1016 spin_lock(&res->spinlock);
1017 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1019 res->lockname.len, res->lockname.name, res->owner);
1020 spin_unlock(&res->spinlock);
1023 if (res->owner != dlm->node_num) {
1024 ret = dlm_do_master_request(res, mle, res->owner);
1027 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1035 spin_unlock(&res->spinlock);
1048 dlm->name, res->lockname.len, res->lockname.name);
1049 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1053 dlm->name, res->lockname.len, res->lockname.name,
1063 "rechecking now\n", dlm->name, res->lockname.len,
1064 res->lockname.name);
1069 "for %s:%.*s\n", dlm->name, res->lockname.len,
1070 res->lockname.name);
1107 if (res->owner == O2NM_MAX_NODES) {
1109 res->lockname.len, res->lockname.name);
1112 mlog(0, "done waiting, master is %u\n", res->owner);
1121 res->lockname.len, res->lockname.name, m);
1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1137 spin_lock(&res->spinlock);
1140 dlm_change_lockres_owner(dlm, res, m);
1141 spin_unlock(&res->spinlock);
1207 struct dlm_lock_resource *res,
1254 res->lockname.len,
1255 res->lockname.name,
1270 res->lockname.len,
1271 res->lockname.name);
1273 mle->mleres = res;
1306 static int dlm_do_master_request(struct dlm_lock_resource *res,
1358 "reference\n", dlm->name, res->lockname.len,
1359 res->lockname.name, to);
1394 * res->spinlock
1405 struct dlm_lock_resource *res = NULL;
1434 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1435 if (res) {
1439 spin_lock(&res->spinlock);
1446 if (hlist_unhashed(&res->hash_node)) {
1447 spin_unlock(&res->spinlock);
1448 dlm_lockres_put(res);
1452 if (res->state & (DLM_LOCK_RES_RECOVERING|
1454 spin_unlock(&res->spinlock);
1455 mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1463 if (res->owner == dlm->node_num) {
1464 dlm_lockres_set_refmap_bit(dlm, res, request->node_idx);
1465 spin_unlock(&res->spinlock);
1478 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1479 spin_unlock(&res->spinlock);
1480 // mlog(0, "node %u is the master\n", res->owner);
1490 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1529 dlm_lockres_set_refmap_bit(dlm, res,
1543 spin_unlock(&res->spinlock);
1616 dlm->node_num, res->lockname.len, res->lockname.name);
1617 spin_lock(&res->spinlock);
1618 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
1623 spin_unlock(&res->spinlock);
1624 dlm_lockres_put(res);
1627 __dlm_lockres_grab_inflight_worker(dlm, res);
1628 spin_unlock(&res->spinlock);
1631 if (res)
1632 dlm_lockres_put(res);
1651 struct dlm_lock_resource *res,
1659 const char *lockname = res->lockname.name;
1660 unsigned int namelen = res->lockname.len;
1664 spin_lock(&res->spinlock);
1665 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1666 spin_unlock(&res->spinlock);
1733 spin_lock(&res->spinlock);
1734 dlm_lockres_set_refmap_bit(dlm, res, to);
1735 spin_unlock(&res->spinlock);
1742 spin_lock(&res->spinlock);
1743 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1744 spin_unlock(&res->spinlock);
1745 wake_up(&res->wq);
1753 * res->spinlock
1765 struct dlm_lock_resource *res = NULL;
1843 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1844 if (res) {
1845 spin_lock(&res->spinlock);
1846 if (res->state & DLM_LOCK_RES_RECOVERING) {
1852 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1853 res->owner != assert->node_idx) {
1856 assert->node_idx, res->owner, namelen,
1858 __dlm_print_one_lock_resource(res);
1862 if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1864 if (res->owner == assert->node_idx) {
1873 res->owner, namelen, name);
1876 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1897 spin_unlock(&res->spinlock);
1927 if (res) {
1929 spin_lock(&res->spinlock);
1933 res->lockname.len, res->lockname.name,
1935 res->state &= ~DLM_LOCK_RES_MIGRATING;
1937 dlm_change_lockres_owner(dlm, res, mle->new_master);
1938 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1940 dlm_change_lockres_owner(dlm, res, mle->master);
1942 spin_unlock(&res->spinlock);
1945 wake_up(&res->wq);
1983 } else if (res) {
1984 if (res->owner != assert->node_idx) {
1987 res->owner, namelen, name);
1994 if (res) {
1995 spin_lock(&res->spinlock);
1996 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1997 spin_unlock(&res->spinlock);
1998 *ret_data = (void *)res;
2023 __dlm_print_one_lock_resource(res);
2024 spin_unlock(&res->spinlock);
2030 *ret_data = (void *)res;
2037 struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2040 spin_lock(&res->spinlock);
2041 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2042 spin_unlock(&res->spinlock);
2043 wake_up(&res->wq);
2044 dlm_lockres_put(res);
2050 struct dlm_lock_resource *res,
2061 item->u.am.lockres = res; /* already have a ref */
2068 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
2069 res->lockname.name);
2083 struct dlm_lock_resource *res;
2091 res = item->u.am.lockres;
2122 spin_lock(&res->spinlock);
2123 if (res->state & DLM_LOCK_RES_MIGRATING) {
2127 spin_unlock(&res->spinlock);
2130 __dlm_lockres_reserve_ast(res);
2131 spin_unlock(&res->spinlock);
2136 res->lockname.len, res->lockname.name, dlm->node_num);
2137 ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2145 dlm_lockres_release_ast(dlm, res);
2148 dlm_lockres_drop_inflight_worker(dlm, res);
2150 dlm_lockres_put(res);
2166 struct dlm_lock_resource *res)
2181 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2215 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2222 lockname = res->lockname.name;
2223 namelen = res->lockname.len;
2232 &deref, sizeof(deref), res->owner, &r);
2234 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n",
2235 dlm->name, namelen, lockname, ret, res->owner);
2238 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2239 dlm->name, namelen, lockname, res->owner, r);
2240 dlm_print_one_lock_resource(res);
2254 struct dlm_lock_resource *res = NULL;
2283 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2284 if (!res) {
2292 spin_lock(&res->spinlock);
2293 if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2296 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2297 if (test_bit(node, res->refmap)) {
2298 dlm_lockres_clear_refmap_bit(dlm, res, node);
2302 spin_unlock(&res->spinlock);
2306 dlm_lockres_calc_usage(dlm, res);
2310 res->lockname.len, res->lockname.name, node);
2311 dlm_print_one_lock_resource(res);
2325 item->u.dl.deref_res = res;
2336 if (res)
2337 dlm_lockres_put(res);
2349 struct dlm_lock_resource *res = NULL;
2375 res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2376 if (!res) {
2383 spin_lock(&res->spinlock);
2384 if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
2385 spin_unlock(&res->spinlock);
2389 res->lockname.len, res->lockname.name, node);
2394 __dlm_do_purge_lockres(dlm, res);
2395 spin_unlock(&res->spinlock);
2396 wake_up(&res->wq);
2402 if (res)
2403 dlm_lockres_put(res);
2409 struct dlm_lock_resource *res, u8 node)
2416 lockname = res->lockname.name;
2417 namelen = res->lockname.len;
2428 mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF DONE "
2433 mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
2435 dlm_print_one_lock_resource(res);
2442 struct dlm_lock_resource *res;
2447 res = item->u.dl.deref_res;
2450 spin_lock(&res->spinlock);
2451 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2452 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2453 if (test_bit(node, res->refmap)) {
2454 dlm_lockres_clear_refmap_bit(dlm, res, node);
2457 spin_unlock(&res->spinlock);
2459 dlm_drop_lockres_ref_done(dlm, res, node);
2463 dlm->name, res->lockname.len, res->lockname.name, node);
2464 dlm_lockres_calc_usage(dlm, res);
2468 res->lockname.len, res->lockname.name, node);
2469 dlm_print_one_lock_resource(res);
2472 dlm_lockres_put(res);
2483 struct dlm_lock_resource *res)
2491 assert_spin_locked(&res->spinlock);
2494 if (res->state & DLM_LOCK_RES_MIGRATING)
2498 if (res->state & (DLM_LOCK_RES_RECOVERING|
2502 if (res->owner != dlm->node_num)
2506 queue = dlm_list_idx_to_ptr(res, idx);
2513 mlog(0, "%s: Not migratable res %.*s, lock %u:%llu on "
2514 "%s list\n", dlm->name, res->lockname.len,
2515 res->lockname.name,
2524 node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
2529 mlog(0, "%s: res %.*s, Migratable\n", dlm->name, res->lockname.len,
2530 res->lockname.name);
2541 struct dlm_lock_resource *res, u8 target)
2555 name = res->lockname.name;
2556 namelen = res->lockname.len;
2582 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2604 if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2606 "the target went down.\n", res->lockname.len,
2607 res->lockname.name, target);
2608 spin_lock(&res->spinlock);
2609 res->state &= ~DLM_LOCK_RES_MIGRATING;
2611 spin_unlock(&res->spinlock);
2648 ret = dlm_send_one_lockres(dlm, res, mres, target,
2658 spin_lock(&res->spinlock);
2659 res->state &= ~DLM_LOCK_RES_MIGRATING;
2661 spin_unlock(&res->spinlock);
2687 res->owner == target)
2691 dlm->name, res->lockname.len, res->lockname.name);
2697 dlm->name, res->lockname.len,
2698 res->lockname.name, target);
2704 spin_lock(&res->spinlock);
2705 res->state &= ~DLM_LOCK_RES_MIGRATING;
2707 spin_unlock(&res->spinlock);
2712 dlm->name, res->lockname.len, res->lockname.name);
2716 spin_lock(&res->spinlock);
2717 dlm_set_lockres_owner(dlm, res, target);
2718 res->state &= ~DLM_LOCK_RES_MIGRATING;
2719 dlm_remove_nonlocal_locks(dlm, res);
2720 spin_unlock(&res->spinlock);
2721 wake_up(&res->wq);
2728 dlm_lockres_calc_usage(dlm, res);
2733 dlm_kick_thread(dlm, res);
2738 wake_up(&res->wq);
2760 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2769 spin_lock(&res->spinlock);
2770 if (dlm_is_lockres_migratable(dlm, res))
2771 target = dlm_pick_migration_target(dlm, res);
2772 spin_unlock(&res->spinlock);
2780 ret = dlm_migrate_lockres(dlm, res, target);
2782 mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n",
2783 dlm->name, res->lockname.len, res->lockname.name,
2802 struct dlm_lock_resource *res,
2806 spin_lock(&res->spinlock);
2807 can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2808 spin_unlock(&res->spinlock);
2820 struct dlm_lock_resource *res)
2823 spin_lock(&res->spinlock);
2824 ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2825 spin_unlock(&res->spinlock);
2831 struct dlm_lock_resource *res,
2837 res->lockname.len, res->lockname.name, dlm->node_num,
2841 spin_lock(&res->spinlock);
2842 BUG_ON(res->migration_pending);
2843 res->migration_pending = 1;
2846 __dlm_lockres_reserve_ast(res);
2847 spin_unlock(&res->spinlock);
2850 dlm_kick_thread(dlm, res);
2853 spin_lock(&res->spinlock);
2854 BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2855 res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2856 spin_unlock(&res->spinlock);
2858 wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2859 dlm_lockres_release_ast(dlm, res);
2862 res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2868 dlm_migration_can_proceed(dlm, res, target),
2872 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2876 res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2879 if (!dlm_migration_can_proceed(dlm, res, target)) {
2899 spin_lock(&res->spinlock);
2900 BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2901 res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2903 BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2905 res->migration_pending = 0;
2906 spin_unlock(&res->spinlock);
2923 struct dlm_lock_resource *res)
2925 struct list_head *queue = &res->granted;
2929 assert_spin_locked(&res->spinlock);
2931 BUG_ON(res->owner == dlm->node_num);
2943 dlm_lockres_clear_refmap_bit(dlm, res,
2956 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2964 res->lockname.len, res->lockname.name, bit);
2965 dlm_lockres_clear_refmap_bit(dlm, res, bit);
2977 struct dlm_lock_resource *res)
2986 assert_spin_locked(&res->spinlock);
2990 queue = dlm_list_idx_to_ptr(res, idx);
3004 noderef = find_next_bit(res->refmap, O2NM_MAX_NODES,
3023 struct dlm_lock_resource *res,
3032 migrate.namelen = res->lockname.len;
3033 memcpy(migrate.name, res->lockname.name, migrate.namelen);
3058 mlog(ML_ERROR, "%s: res %.*s, Error %d send "
3076 dlm->name, res->lockname.len, res->lockname.name,
3078 spin_lock(&res->spinlock);
3079 dlm_lockres_set_refmap_bit(dlm, res, nodenum);
3080 spin_unlock(&res->spinlock);
3103 struct dlm_lock_resource *res = NULL;
3127 res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3128 if (res) {
3129 spin_lock(&res->spinlock);
3130 if (res->state & DLM_LOCK_RES_RECOVERING) {
3134 spin_unlock(&res->spinlock);
3141 res->state |= DLM_LOCK_RES_MIGRATING;
3142 spin_unlock(&res->spinlock);
3147 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3165 if (res)
3166 dlm_lockres_put(res);
3180 struct dlm_lock_resource *res,
3240 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3258 struct dlm_lock_resource *res;
3261 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3263 if (res) {
3267 spin_lock(&res->spinlock);
3268 dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
3269 dlm_move_lockres_to_recovery_list(dlm, res);
3270 spin_unlock(&res->spinlock);
3271 dlm_lockres_put(res);
3282 return res;
3330 struct dlm_lock_resource *res;
3399 res = dlm_reset_mleres_owner(dlm, mle);
3400 if (res)
3411 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3426 spin_lock(&res->spinlock);
3427 dlm_lockres_set_refmap_bit(dlm, res, old_master);
3428 spin_unlock(&res->spinlock);
3431 ret = dlm_do_migrate_request(dlm, res, old_master,
3439 res->lockname.len, res->lockname.name);
3442 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3453 res->lockname.len, res->lockname.name, old_master);
3454 ret = dlm_do_assert_master(dlm, res, iter.node_map,
3465 spin_lock(&res->spinlock);
3466 dlm_set_lockres_owner(dlm, res, dlm->node_num);
3467 res->state &= ~DLM_LOCK_RES_MIGRATING;
3468 spin_unlock(&res->spinlock);
3470 dlm_kick_thread(dlm, res);
3471 wake_up(&res->wq);
3485 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3487 assert_spin_locked(&res->spinlock);
3488 if (res->state & DLM_LOCK_RES_MIGRATING) {
3489 __dlm_print_one_lock_resource(res);
3491 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3493 atomic_inc(&res->asts_reserved);
3510 struct dlm_lock_resource *res)
3512 if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3515 if (!res->migration_pending) {
3516 spin_unlock(&res->spinlock);
3520 BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3521 res->migration_pending = 0;
3522 res->state |= DLM_LOCK_RES_MIGRATING;
3523 spin_unlock(&res->spinlock);
3524 wake_up(&res->wq);