18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later 28c2ecf20Sopenharmony_ci/* -*- mode: c; c-basic-offset: 8; -*- 38c2ecf20Sopenharmony_ci * vim: noexpandtab sw=8 ts=8 sts=0: 48c2ecf20Sopenharmony_ci * 58c2ecf20Sopenharmony_ci * dlmunlock.c 68c2ecf20Sopenharmony_ci * 78c2ecf20Sopenharmony_ci * underlying calls for unlocking locks 88c2ecf20Sopenharmony_ci * 98c2ecf20Sopenharmony_ci * Copyright (C) 2004 Oracle. All rights reserved. 108c2ecf20Sopenharmony_ci */ 118c2ecf20Sopenharmony_ci 128c2ecf20Sopenharmony_ci 138c2ecf20Sopenharmony_ci#include <linux/module.h> 148c2ecf20Sopenharmony_ci#include <linux/fs.h> 158c2ecf20Sopenharmony_ci#include <linux/types.h> 168c2ecf20Sopenharmony_ci#include <linux/highmem.h> 178c2ecf20Sopenharmony_ci#include <linux/init.h> 188c2ecf20Sopenharmony_ci#include <linux/sysctl.h> 198c2ecf20Sopenharmony_ci#include <linux/random.h> 208c2ecf20Sopenharmony_ci#include <linux/blkdev.h> 218c2ecf20Sopenharmony_ci#include <linux/socket.h> 228c2ecf20Sopenharmony_ci#include <linux/inet.h> 238c2ecf20Sopenharmony_ci#include <linux/spinlock.h> 248c2ecf20Sopenharmony_ci#include <linux/delay.h> 258c2ecf20Sopenharmony_ci 268c2ecf20Sopenharmony_ci#include "../cluster/heartbeat.h" 278c2ecf20Sopenharmony_ci#include "../cluster/nodemanager.h" 288c2ecf20Sopenharmony_ci#include "../cluster/tcp.h" 298c2ecf20Sopenharmony_ci 308c2ecf20Sopenharmony_ci#include "dlmapi.h" 318c2ecf20Sopenharmony_ci#include "dlmcommon.h" 328c2ecf20Sopenharmony_ci 338c2ecf20Sopenharmony_ci#define MLOG_MASK_PREFIX ML_DLM 348c2ecf20Sopenharmony_ci#include "../cluster/masklog.h" 358c2ecf20Sopenharmony_ci 368c2ecf20Sopenharmony_ci#define DLM_UNLOCK_FREE_LOCK 0x00000001 378c2ecf20Sopenharmony_ci#define DLM_UNLOCK_CALL_AST 0x00000002 388c2ecf20Sopenharmony_ci#define DLM_UNLOCK_REMOVE_LOCK 0x00000004 398c2ecf20Sopenharmony_ci#define DLM_UNLOCK_REGRANT_LOCK 0x00000008 408c2ecf20Sopenharmony_ci#define DLM_UNLOCK_CLEAR_CONVERT_TYPE 0x00000010 418c2ecf20Sopenharmony_ci 428c2ecf20Sopenharmony_ci 438c2ecf20Sopenharmony_cistatic enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, 448c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 458c2ecf20Sopenharmony_ci struct dlm_lock *lock, 468c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 478c2ecf20Sopenharmony_ci int *actions); 488c2ecf20Sopenharmony_cistatic enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, 498c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 508c2ecf20Sopenharmony_ci struct dlm_lock *lock, 518c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 528c2ecf20Sopenharmony_ci int *actions); 538c2ecf20Sopenharmony_ci 548c2ecf20Sopenharmony_cistatic enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, 558c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 568c2ecf20Sopenharmony_ci struct dlm_lock *lock, 578c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 588c2ecf20Sopenharmony_ci int flags, 598c2ecf20Sopenharmony_ci u8 owner); 608c2ecf20Sopenharmony_ci 618c2ecf20Sopenharmony_ci 628c2ecf20Sopenharmony_ci/* 638c2ecf20Sopenharmony_ci * according to the spec: 648c2ecf20Sopenharmony_ci * http://opendlm.sourceforge.net/cvsmirror/opendlm/docs/dlmbook_final.pdf 658c2ecf20Sopenharmony_ci * 668c2ecf20Sopenharmony_ci * flags & LKM_CANCEL != 0: must be converting or blocked 678c2ecf20Sopenharmony_ci * flags & LKM_CANCEL == 0: must be granted 688c2ecf20Sopenharmony_ci * 698c2ecf20Sopenharmony_ci * So to unlock a converting lock, you must first cancel the 708c2ecf20Sopenharmony_ci * convert (passing LKM_CANCEL in flags), then call the unlock 718c2ecf20Sopenharmony_ci * again (with no LKM_CANCEL in flags). 728c2ecf20Sopenharmony_ci */ 738c2ecf20Sopenharmony_ci 748c2ecf20Sopenharmony_ci 758c2ecf20Sopenharmony_ci/* 768c2ecf20Sopenharmony_ci * locking: 778c2ecf20Sopenharmony_ci * caller needs: none 788c2ecf20Sopenharmony_ci * taken: res->spinlock and lock->spinlock taken and dropped 798c2ecf20Sopenharmony_ci * held on exit: none 808c2ecf20Sopenharmony_ci * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network 818c2ecf20Sopenharmony_ci * all callers should have taken an extra ref on lock coming in 828c2ecf20Sopenharmony_ci */ 838c2ecf20Sopenharmony_cistatic enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm, 848c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 858c2ecf20Sopenharmony_ci struct dlm_lock *lock, 868c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 878c2ecf20Sopenharmony_ci int flags, int *call_ast, 888c2ecf20Sopenharmony_ci int master_node) 898c2ecf20Sopenharmony_ci{ 908c2ecf20Sopenharmony_ci enum dlm_status status; 918c2ecf20Sopenharmony_ci int actions = 0; 928c2ecf20Sopenharmony_ci int in_use; 938c2ecf20Sopenharmony_ci u8 owner; 948c2ecf20Sopenharmony_ci int recovery_wait = 0; 958c2ecf20Sopenharmony_ci 968c2ecf20Sopenharmony_ci mlog(0, "master_node = %d, valblk = %d\n", master_node, 978c2ecf20Sopenharmony_ci flags & LKM_VALBLK); 988c2ecf20Sopenharmony_ci 998c2ecf20Sopenharmony_ci if (master_node) 1008c2ecf20Sopenharmony_ci BUG_ON(res->owner != dlm->node_num); 1018c2ecf20Sopenharmony_ci else 1028c2ecf20Sopenharmony_ci BUG_ON(res->owner == dlm->node_num); 1038c2ecf20Sopenharmony_ci 1048c2ecf20Sopenharmony_ci spin_lock(&dlm->ast_lock); 1058c2ecf20Sopenharmony_ci /* We want to be sure that we're not freeing a lock 1068c2ecf20Sopenharmony_ci * that still has AST's pending... */ 1078c2ecf20Sopenharmony_ci in_use = !list_empty(&lock->ast_list); 1088c2ecf20Sopenharmony_ci spin_unlock(&dlm->ast_lock); 1098c2ecf20Sopenharmony_ci if (in_use && !(flags & LKM_CANCEL)) { 1108c2ecf20Sopenharmony_ci mlog(ML_ERROR, "lockres %.*s: Someone is calling dlmunlock " 1118c2ecf20Sopenharmony_ci "while waiting for an ast!", res->lockname.len, 1128c2ecf20Sopenharmony_ci res->lockname.name); 1138c2ecf20Sopenharmony_ci return DLM_BADPARAM; 1148c2ecf20Sopenharmony_ci } 1158c2ecf20Sopenharmony_ci 1168c2ecf20Sopenharmony_ci spin_lock(&res->spinlock); 1178c2ecf20Sopenharmony_ci if (res->state & DLM_LOCK_RES_IN_PROGRESS) { 1188c2ecf20Sopenharmony_ci if (master_node && !(flags & LKM_CANCEL)) { 1198c2ecf20Sopenharmony_ci mlog(ML_ERROR, "lockres in progress!\n"); 1208c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 1218c2ecf20Sopenharmony_ci return DLM_FORWARD; 1228c2ecf20Sopenharmony_ci } 1238c2ecf20Sopenharmony_ci /* ok for this to sleep if not in a network handler */ 1248c2ecf20Sopenharmony_ci __dlm_wait_on_lockres(res); 1258c2ecf20Sopenharmony_ci res->state |= DLM_LOCK_RES_IN_PROGRESS; 1268c2ecf20Sopenharmony_ci } 1278c2ecf20Sopenharmony_ci spin_lock(&lock->spinlock); 1288c2ecf20Sopenharmony_ci 1298c2ecf20Sopenharmony_ci if (res->state & DLM_LOCK_RES_RECOVERING) { 1308c2ecf20Sopenharmony_ci status = DLM_RECOVERING; 1318c2ecf20Sopenharmony_ci goto leave; 1328c2ecf20Sopenharmony_ci } 1338c2ecf20Sopenharmony_ci 1348c2ecf20Sopenharmony_ci if (res->state & DLM_LOCK_RES_MIGRATING) { 1358c2ecf20Sopenharmony_ci status = DLM_MIGRATING; 1368c2ecf20Sopenharmony_ci goto leave; 1378c2ecf20Sopenharmony_ci } 1388c2ecf20Sopenharmony_ci 1398c2ecf20Sopenharmony_ci /* see above for what the spec says about 1408c2ecf20Sopenharmony_ci * LKM_CANCEL and the lock queue state */ 1418c2ecf20Sopenharmony_ci if (flags & LKM_CANCEL) 1428c2ecf20Sopenharmony_ci status = dlm_get_cancel_actions(dlm, res, lock, lksb, &actions); 1438c2ecf20Sopenharmony_ci else 1448c2ecf20Sopenharmony_ci status = dlm_get_unlock_actions(dlm, res, lock, lksb, &actions); 1458c2ecf20Sopenharmony_ci 1468c2ecf20Sopenharmony_ci if (status != DLM_NORMAL && (status != DLM_CANCELGRANT || !master_node)) 1478c2ecf20Sopenharmony_ci goto leave; 1488c2ecf20Sopenharmony_ci 1498c2ecf20Sopenharmony_ci /* By now this has been masked out of cancel requests. */ 1508c2ecf20Sopenharmony_ci if (flags & LKM_VALBLK) { 1518c2ecf20Sopenharmony_ci /* make the final update to the lvb */ 1528c2ecf20Sopenharmony_ci if (master_node) 1538c2ecf20Sopenharmony_ci memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN); 1548c2ecf20Sopenharmony_ci else 1558c2ecf20Sopenharmony_ci flags |= LKM_PUT_LVB; /* let the send function 1568c2ecf20Sopenharmony_ci * handle it. */ 1578c2ecf20Sopenharmony_ci } 1588c2ecf20Sopenharmony_ci 1598c2ecf20Sopenharmony_ci if (!master_node) { 1608c2ecf20Sopenharmony_ci owner = res->owner; 1618c2ecf20Sopenharmony_ci /* drop locks and send message */ 1628c2ecf20Sopenharmony_ci if (flags & LKM_CANCEL) 1638c2ecf20Sopenharmony_ci lock->cancel_pending = 1; 1648c2ecf20Sopenharmony_ci else 1658c2ecf20Sopenharmony_ci lock->unlock_pending = 1; 1668c2ecf20Sopenharmony_ci spin_unlock(&lock->spinlock); 1678c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 1688c2ecf20Sopenharmony_ci status = dlm_send_remote_unlock_request(dlm, res, lock, lksb, 1698c2ecf20Sopenharmony_ci flags, owner); 1708c2ecf20Sopenharmony_ci spin_lock(&res->spinlock); 1718c2ecf20Sopenharmony_ci spin_lock(&lock->spinlock); 1728c2ecf20Sopenharmony_ci /* if the master told us the lock was already granted, 1738c2ecf20Sopenharmony_ci * let the ast handle all of these actions */ 1748c2ecf20Sopenharmony_ci if (status == DLM_CANCELGRANT) { 1758c2ecf20Sopenharmony_ci actions &= ~(DLM_UNLOCK_REMOVE_LOCK| 1768c2ecf20Sopenharmony_ci DLM_UNLOCK_REGRANT_LOCK| 1778c2ecf20Sopenharmony_ci DLM_UNLOCK_CLEAR_CONVERT_TYPE); 1788c2ecf20Sopenharmony_ci } else if (status == DLM_RECOVERING || 1798c2ecf20Sopenharmony_ci status == DLM_MIGRATING || 1808c2ecf20Sopenharmony_ci status == DLM_FORWARD || 1818c2ecf20Sopenharmony_ci status == DLM_NOLOCKMGR 1828c2ecf20Sopenharmony_ci ) { 1838c2ecf20Sopenharmony_ci /* must clear the actions because this unlock 1848c2ecf20Sopenharmony_ci * is about to be retried. cannot free or do 1858c2ecf20Sopenharmony_ci * any list manipulation. */ 1868c2ecf20Sopenharmony_ci mlog(0, "%s:%.*s: clearing actions, %s\n", 1878c2ecf20Sopenharmony_ci dlm->name, res->lockname.len, 1888c2ecf20Sopenharmony_ci res->lockname.name, 1898c2ecf20Sopenharmony_ci status==DLM_RECOVERING?"recovering": 1908c2ecf20Sopenharmony_ci (status==DLM_MIGRATING?"migrating": 1918c2ecf20Sopenharmony_ci (status == DLM_FORWARD ? "forward" : 1928c2ecf20Sopenharmony_ci "nolockmanager"))); 1938c2ecf20Sopenharmony_ci actions = 0; 1948c2ecf20Sopenharmony_ci } 1958c2ecf20Sopenharmony_ci if (flags & LKM_CANCEL) 1968c2ecf20Sopenharmony_ci lock->cancel_pending = 0; 1978c2ecf20Sopenharmony_ci else { 1988c2ecf20Sopenharmony_ci if (!lock->unlock_pending) 1998c2ecf20Sopenharmony_ci recovery_wait = 1; 2008c2ecf20Sopenharmony_ci else 2018c2ecf20Sopenharmony_ci lock->unlock_pending = 0; 2028c2ecf20Sopenharmony_ci } 2038c2ecf20Sopenharmony_ci } 2048c2ecf20Sopenharmony_ci 2058c2ecf20Sopenharmony_ci /* get an extra ref on lock. if we are just switching 2068c2ecf20Sopenharmony_ci * lists here, we dont want the lock to go away. */ 2078c2ecf20Sopenharmony_ci dlm_lock_get(lock); 2088c2ecf20Sopenharmony_ci 2098c2ecf20Sopenharmony_ci if (actions & DLM_UNLOCK_REMOVE_LOCK) { 2108c2ecf20Sopenharmony_ci list_del_init(&lock->list); 2118c2ecf20Sopenharmony_ci dlm_lock_put(lock); 2128c2ecf20Sopenharmony_ci } 2138c2ecf20Sopenharmony_ci if (actions & DLM_UNLOCK_REGRANT_LOCK) { 2148c2ecf20Sopenharmony_ci dlm_lock_get(lock); 2158c2ecf20Sopenharmony_ci list_add_tail(&lock->list, &res->granted); 2168c2ecf20Sopenharmony_ci } 2178c2ecf20Sopenharmony_ci if (actions & DLM_UNLOCK_CLEAR_CONVERT_TYPE) { 2188c2ecf20Sopenharmony_ci mlog(0, "clearing convert_type at %smaster node\n", 2198c2ecf20Sopenharmony_ci master_node ? "" : "non-"); 2208c2ecf20Sopenharmony_ci lock->ml.convert_type = LKM_IVMODE; 2218c2ecf20Sopenharmony_ci } 2228c2ecf20Sopenharmony_ci 2238c2ecf20Sopenharmony_ci /* remove the extra ref on lock */ 2248c2ecf20Sopenharmony_ci dlm_lock_put(lock); 2258c2ecf20Sopenharmony_ci 2268c2ecf20Sopenharmony_cileave: 2278c2ecf20Sopenharmony_ci res->state &= ~DLM_LOCK_RES_IN_PROGRESS; 2288c2ecf20Sopenharmony_ci if (!dlm_lock_on_list(&res->converting, lock)) 2298c2ecf20Sopenharmony_ci BUG_ON(lock->ml.convert_type != LKM_IVMODE); 2308c2ecf20Sopenharmony_ci else 2318c2ecf20Sopenharmony_ci BUG_ON(lock->ml.convert_type == LKM_IVMODE); 2328c2ecf20Sopenharmony_ci spin_unlock(&lock->spinlock); 2338c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 2348c2ecf20Sopenharmony_ci wake_up(&res->wq); 2358c2ecf20Sopenharmony_ci 2368c2ecf20Sopenharmony_ci if (recovery_wait) { 2378c2ecf20Sopenharmony_ci spin_lock(&res->spinlock); 2388c2ecf20Sopenharmony_ci /* Unlock request will directly succeed after owner dies, 2398c2ecf20Sopenharmony_ci * and the lock is already removed from grant list. We have to 2408c2ecf20Sopenharmony_ci * wait for RECOVERING done or we miss the chance to purge it 2418c2ecf20Sopenharmony_ci * since the removement is much faster than RECOVERING proc. 2428c2ecf20Sopenharmony_ci */ 2438c2ecf20Sopenharmony_ci __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_RECOVERING); 2448c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 2458c2ecf20Sopenharmony_ci } 2468c2ecf20Sopenharmony_ci 2478c2ecf20Sopenharmony_ci /* let the caller's final dlm_lock_put handle the actual kfree */ 2488c2ecf20Sopenharmony_ci if (actions & DLM_UNLOCK_FREE_LOCK) { 2498c2ecf20Sopenharmony_ci /* this should always be coupled with list removal */ 2508c2ecf20Sopenharmony_ci BUG_ON(!(actions & DLM_UNLOCK_REMOVE_LOCK)); 2518c2ecf20Sopenharmony_ci mlog(0, "lock %u:%llu should be gone now! refs=%d\n", 2528c2ecf20Sopenharmony_ci dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), 2538c2ecf20Sopenharmony_ci dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), 2548c2ecf20Sopenharmony_ci kref_read(&lock->lock_refs)-1); 2558c2ecf20Sopenharmony_ci dlm_lock_put(lock); 2568c2ecf20Sopenharmony_ci } 2578c2ecf20Sopenharmony_ci if (actions & DLM_UNLOCK_CALL_AST) 2588c2ecf20Sopenharmony_ci *call_ast = 1; 2598c2ecf20Sopenharmony_ci 2608c2ecf20Sopenharmony_ci /* if cancel or unlock succeeded, lvb work is done */ 2618c2ecf20Sopenharmony_ci if (status == DLM_NORMAL) 2628c2ecf20Sopenharmony_ci lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB); 2638c2ecf20Sopenharmony_ci 2648c2ecf20Sopenharmony_ci return status; 2658c2ecf20Sopenharmony_ci} 2668c2ecf20Sopenharmony_ci 2678c2ecf20Sopenharmony_civoid dlm_commit_pending_unlock(struct dlm_lock_resource *res, 2688c2ecf20Sopenharmony_ci struct dlm_lock *lock) 2698c2ecf20Sopenharmony_ci{ 2708c2ecf20Sopenharmony_ci /* leave DLM_LKSB_PUT_LVB on the lksb so any final 2718c2ecf20Sopenharmony_ci * update of the lvb will be sent to the new master */ 2728c2ecf20Sopenharmony_ci list_del_init(&lock->list); 2738c2ecf20Sopenharmony_ci} 2748c2ecf20Sopenharmony_ci 2758c2ecf20Sopenharmony_civoid dlm_commit_pending_cancel(struct dlm_lock_resource *res, 2768c2ecf20Sopenharmony_ci struct dlm_lock *lock) 2778c2ecf20Sopenharmony_ci{ 2788c2ecf20Sopenharmony_ci list_move_tail(&lock->list, &res->granted); 2798c2ecf20Sopenharmony_ci lock->ml.convert_type = LKM_IVMODE; 2808c2ecf20Sopenharmony_ci} 2818c2ecf20Sopenharmony_ci 2828c2ecf20Sopenharmony_ci 2838c2ecf20Sopenharmony_cistatic inline enum dlm_status dlmunlock_master(struct dlm_ctxt *dlm, 2848c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 2858c2ecf20Sopenharmony_ci struct dlm_lock *lock, 2868c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 2878c2ecf20Sopenharmony_ci int flags, 2888c2ecf20Sopenharmony_ci int *call_ast) 2898c2ecf20Sopenharmony_ci{ 2908c2ecf20Sopenharmony_ci return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 1); 2918c2ecf20Sopenharmony_ci} 2928c2ecf20Sopenharmony_ci 2938c2ecf20Sopenharmony_cistatic inline enum dlm_status dlmunlock_remote(struct dlm_ctxt *dlm, 2948c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 2958c2ecf20Sopenharmony_ci struct dlm_lock *lock, 2968c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 2978c2ecf20Sopenharmony_ci int flags, int *call_ast) 2988c2ecf20Sopenharmony_ci{ 2998c2ecf20Sopenharmony_ci return dlmunlock_common(dlm, res, lock, lksb, flags, call_ast, 0); 3008c2ecf20Sopenharmony_ci} 3018c2ecf20Sopenharmony_ci 3028c2ecf20Sopenharmony_ci/* 3038c2ecf20Sopenharmony_ci * locking: 3048c2ecf20Sopenharmony_ci * caller needs: none 3058c2ecf20Sopenharmony_ci * taken: none 3068c2ecf20Sopenharmony_ci * held on exit: none 3078c2ecf20Sopenharmony_ci * returns: DLM_NORMAL, DLM_NOLOCKMGR, status from network 3088c2ecf20Sopenharmony_ci */ 3098c2ecf20Sopenharmony_cistatic enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm, 3108c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 3118c2ecf20Sopenharmony_ci struct dlm_lock *lock, 3128c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 3138c2ecf20Sopenharmony_ci int flags, 3148c2ecf20Sopenharmony_ci u8 owner) 3158c2ecf20Sopenharmony_ci{ 3168c2ecf20Sopenharmony_ci struct dlm_unlock_lock unlock; 3178c2ecf20Sopenharmony_ci int tmpret; 3188c2ecf20Sopenharmony_ci enum dlm_status ret; 3198c2ecf20Sopenharmony_ci int status = 0; 3208c2ecf20Sopenharmony_ci struct kvec vec[2]; 3218c2ecf20Sopenharmony_ci size_t veclen = 1; 3228c2ecf20Sopenharmony_ci 3238c2ecf20Sopenharmony_ci mlog(0, "%.*s\n", res->lockname.len, res->lockname.name); 3248c2ecf20Sopenharmony_ci 3258c2ecf20Sopenharmony_ci if (owner == dlm->node_num) { 3268c2ecf20Sopenharmony_ci /* ended up trying to contact ourself. this means 3278c2ecf20Sopenharmony_ci * that the lockres had been remote but became local 3288c2ecf20Sopenharmony_ci * via a migration. just retry it, now as local */ 3298c2ecf20Sopenharmony_ci mlog(0, "%s:%.*s: this node became the master due to a " 3308c2ecf20Sopenharmony_ci "migration, re-evaluate now\n", dlm->name, 3318c2ecf20Sopenharmony_ci res->lockname.len, res->lockname.name); 3328c2ecf20Sopenharmony_ci return DLM_FORWARD; 3338c2ecf20Sopenharmony_ci } 3348c2ecf20Sopenharmony_ci 3358c2ecf20Sopenharmony_ci memset(&unlock, 0, sizeof(unlock)); 3368c2ecf20Sopenharmony_ci unlock.node_idx = dlm->node_num; 3378c2ecf20Sopenharmony_ci unlock.flags = cpu_to_be32(flags); 3388c2ecf20Sopenharmony_ci unlock.cookie = lock->ml.cookie; 3398c2ecf20Sopenharmony_ci unlock.namelen = res->lockname.len; 3408c2ecf20Sopenharmony_ci memcpy(unlock.name, res->lockname.name, unlock.namelen); 3418c2ecf20Sopenharmony_ci 3428c2ecf20Sopenharmony_ci vec[0].iov_len = sizeof(struct dlm_unlock_lock); 3438c2ecf20Sopenharmony_ci vec[0].iov_base = &unlock; 3448c2ecf20Sopenharmony_ci 3458c2ecf20Sopenharmony_ci if (flags & LKM_PUT_LVB) { 3468c2ecf20Sopenharmony_ci /* extra data to send if we are updating lvb */ 3478c2ecf20Sopenharmony_ci vec[1].iov_len = DLM_LVB_LEN; 3488c2ecf20Sopenharmony_ci vec[1].iov_base = lock->lksb->lvb; 3498c2ecf20Sopenharmony_ci veclen++; 3508c2ecf20Sopenharmony_ci } 3518c2ecf20Sopenharmony_ci 3528c2ecf20Sopenharmony_ci tmpret = o2net_send_message_vec(DLM_UNLOCK_LOCK_MSG, dlm->key, 3538c2ecf20Sopenharmony_ci vec, veclen, owner, &status); 3548c2ecf20Sopenharmony_ci if (tmpret >= 0) { 3558c2ecf20Sopenharmony_ci // successfully sent and received 3568c2ecf20Sopenharmony_ci if (status == DLM_FORWARD) 3578c2ecf20Sopenharmony_ci mlog(0, "master was in-progress. retry\n"); 3588c2ecf20Sopenharmony_ci ret = status; 3598c2ecf20Sopenharmony_ci } else { 3608c2ecf20Sopenharmony_ci mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " 3618c2ecf20Sopenharmony_ci "node %u\n", tmpret, DLM_UNLOCK_LOCK_MSG, dlm->key, owner); 3628c2ecf20Sopenharmony_ci if (dlm_is_host_down(tmpret)) { 3638c2ecf20Sopenharmony_ci /* NOTE: this seems strange, but it is what we want. 3648c2ecf20Sopenharmony_ci * when the master goes down during a cancel or 3658c2ecf20Sopenharmony_ci * unlock, the recovery code completes the operation 3668c2ecf20Sopenharmony_ci * as if the master had not died, then passes the 3678c2ecf20Sopenharmony_ci * updated state to the recovery master. this thread 3688c2ecf20Sopenharmony_ci * just needs to finish out the operation and call 3698c2ecf20Sopenharmony_ci * the unlockast. */ 3708c2ecf20Sopenharmony_ci if (dlm_is_node_dead(dlm, owner)) 3718c2ecf20Sopenharmony_ci ret = DLM_NORMAL; 3728c2ecf20Sopenharmony_ci else 3738c2ecf20Sopenharmony_ci ret = DLM_NOLOCKMGR; 3748c2ecf20Sopenharmony_ci } else { 3758c2ecf20Sopenharmony_ci /* something bad. this will BUG in ocfs2 */ 3768c2ecf20Sopenharmony_ci ret = dlm_err_to_dlm_status(tmpret); 3778c2ecf20Sopenharmony_ci } 3788c2ecf20Sopenharmony_ci } 3798c2ecf20Sopenharmony_ci 3808c2ecf20Sopenharmony_ci return ret; 3818c2ecf20Sopenharmony_ci} 3828c2ecf20Sopenharmony_ci 3838c2ecf20Sopenharmony_ci/* 3848c2ecf20Sopenharmony_ci * locking: 3858c2ecf20Sopenharmony_ci * caller needs: none 3868c2ecf20Sopenharmony_ci * taken: takes and drops res->spinlock 3878c2ecf20Sopenharmony_ci * held on exit: none 3888c2ecf20Sopenharmony_ci * returns: DLM_NORMAL, DLM_BADARGS, DLM_IVLOCKID, 3898c2ecf20Sopenharmony_ci * return value from dlmunlock_master 3908c2ecf20Sopenharmony_ci */ 3918c2ecf20Sopenharmony_ciint dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data, 3928c2ecf20Sopenharmony_ci void **ret_data) 3938c2ecf20Sopenharmony_ci{ 3948c2ecf20Sopenharmony_ci struct dlm_ctxt *dlm = data; 3958c2ecf20Sopenharmony_ci struct dlm_unlock_lock *unlock = (struct dlm_unlock_lock *)msg->buf; 3968c2ecf20Sopenharmony_ci struct dlm_lock_resource *res = NULL; 3978c2ecf20Sopenharmony_ci struct dlm_lock *lock = NULL; 3988c2ecf20Sopenharmony_ci enum dlm_status status = DLM_NORMAL; 3998c2ecf20Sopenharmony_ci int found = 0, i; 4008c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb = NULL; 4018c2ecf20Sopenharmony_ci int ignore; 4028c2ecf20Sopenharmony_ci u32 flags; 4038c2ecf20Sopenharmony_ci struct list_head *queue; 4048c2ecf20Sopenharmony_ci 4058c2ecf20Sopenharmony_ci flags = be32_to_cpu(unlock->flags); 4068c2ecf20Sopenharmony_ci 4078c2ecf20Sopenharmony_ci if (flags & LKM_GET_LVB) { 4088c2ecf20Sopenharmony_ci mlog(ML_ERROR, "bad args! GET_LVB specified on unlock!\n"); 4098c2ecf20Sopenharmony_ci return DLM_BADARGS; 4108c2ecf20Sopenharmony_ci } 4118c2ecf20Sopenharmony_ci 4128c2ecf20Sopenharmony_ci if ((flags & (LKM_PUT_LVB|LKM_CANCEL)) == (LKM_PUT_LVB|LKM_CANCEL)) { 4138c2ecf20Sopenharmony_ci mlog(ML_ERROR, "bad args! cannot modify lvb on a CANCEL " 4148c2ecf20Sopenharmony_ci "request!\n"); 4158c2ecf20Sopenharmony_ci return DLM_BADARGS; 4168c2ecf20Sopenharmony_ci } 4178c2ecf20Sopenharmony_ci 4188c2ecf20Sopenharmony_ci if (unlock->namelen > DLM_LOCKID_NAME_MAX) { 4198c2ecf20Sopenharmony_ci mlog(ML_ERROR, "Invalid name length in unlock handler!\n"); 4208c2ecf20Sopenharmony_ci return DLM_IVBUFLEN; 4218c2ecf20Sopenharmony_ci } 4228c2ecf20Sopenharmony_ci 4238c2ecf20Sopenharmony_ci if (!dlm_grab(dlm)) 4248c2ecf20Sopenharmony_ci return DLM_FORWARD; 4258c2ecf20Sopenharmony_ci 4268c2ecf20Sopenharmony_ci mlog_bug_on_msg(!dlm_domain_fully_joined(dlm), 4278c2ecf20Sopenharmony_ci "Domain %s not fully joined!\n", dlm->name); 4288c2ecf20Sopenharmony_ci 4298c2ecf20Sopenharmony_ci mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" : "none"); 4308c2ecf20Sopenharmony_ci 4318c2ecf20Sopenharmony_ci res = dlm_lookup_lockres(dlm, unlock->name, unlock->namelen); 4328c2ecf20Sopenharmony_ci if (!res) { 4338c2ecf20Sopenharmony_ci /* We assume here that a no lock resource simply means 4348c2ecf20Sopenharmony_ci * it was migrated away and destroyed before the other 4358c2ecf20Sopenharmony_ci * node could detect it. */ 4368c2ecf20Sopenharmony_ci mlog(0, "returning DLM_FORWARD -- res no longer exists\n"); 4378c2ecf20Sopenharmony_ci status = DLM_FORWARD; 4388c2ecf20Sopenharmony_ci goto not_found; 4398c2ecf20Sopenharmony_ci } 4408c2ecf20Sopenharmony_ci 4418c2ecf20Sopenharmony_ci queue=&res->granted; 4428c2ecf20Sopenharmony_ci found = 0; 4438c2ecf20Sopenharmony_ci spin_lock(&res->spinlock); 4448c2ecf20Sopenharmony_ci if (res->state & DLM_LOCK_RES_RECOVERING) { 4458c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 4468c2ecf20Sopenharmony_ci mlog(0, "returning DLM_RECOVERING\n"); 4478c2ecf20Sopenharmony_ci status = DLM_RECOVERING; 4488c2ecf20Sopenharmony_ci goto leave; 4498c2ecf20Sopenharmony_ci } 4508c2ecf20Sopenharmony_ci 4518c2ecf20Sopenharmony_ci if (res->state & DLM_LOCK_RES_MIGRATING) { 4528c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 4538c2ecf20Sopenharmony_ci mlog(0, "returning DLM_MIGRATING\n"); 4548c2ecf20Sopenharmony_ci status = DLM_MIGRATING; 4558c2ecf20Sopenharmony_ci goto leave; 4568c2ecf20Sopenharmony_ci } 4578c2ecf20Sopenharmony_ci 4588c2ecf20Sopenharmony_ci if (res->owner != dlm->node_num) { 4598c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 4608c2ecf20Sopenharmony_ci mlog(0, "returning DLM_FORWARD -- not master\n"); 4618c2ecf20Sopenharmony_ci status = DLM_FORWARD; 4628c2ecf20Sopenharmony_ci goto leave; 4638c2ecf20Sopenharmony_ci } 4648c2ecf20Sopenharmony_ci 4658c2ecf20Sopenharmony_ci for (i=0; i<3; i++) { 4668c2ecf20Sopenharmony_ci list_for_each_entry(lock, queue, list) { 4678c2ecf20Sopenharmony_ci if (lock->ml.cookie == unlock->cookie && 4688c2ecf20Sopenharmony_ci lock->ml.node == unlock->node_idx) { 4698c2ecf20Sopenharmony_ci dlm_lock_get(lock); 4708c2ecf20Sopenharmony_ci found = 1; 4718c2ecf20Sopenharmony_ci break; 4728c2ecf20Sopenharmony_ci } 4738c2ecf20Sopenharmony_ci } 4748c2ecf20Sopenharmony_ci if (found) 4758c2ecf20Sopenharmony_ci break; 4768c2ecf20Sopenharmony_ci /* scan granted -> converting -> blocked queues */ 4778c2ecf20Sopenharmony_ci queue++; 4788c2ecf20Sopenharmony_ci } 4798c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 4808c2ecf20Sopenharmony_ci if (!found) { 4818c2ecf20Sopenharmony_ci status = DLM_IVLOCKID; 4828c2ecf20Sopenharmony_ci goto not_found; 4838c2ecf20Sopenharmony_ci } 4848c2ecf20Sopenharmony_ci 4858c2ecf20Sopenharmony_ci /* lock was found on queue */ 4868c2ecf20Sopenharmony_ci lksb = lock->lksb; 4878c2ecf20Sopenharmony_ci if (flags & (LKM_VALBLK|LKM_PUT_LVB) && 4888c2ecf20Sopenharmony_ci lock->ml.type != LKM_EXMODE) 4898c2ecf20Sopenharmony_ci flags &= ~(LKM_VALBLK|LKM_PUT_LVB); 4908c2ecf20Sopenharmony_ci 4918c2ecf20Sopenharmony_ci /* unlockast only called on originating node */ 4928c2ecf20Sopenharmony_ci if (flags & LKM_PUT_LVB) { 4938c2ecf20Sopenharmony_ci lksb->flags |= DLM_LKSB_PUT_LVB; 4948c2ecf20Sopenharmony_ci memcpy(&lksb->lvb[0], &unlock->lvb[0], DLM_LVB_LEN); 4958c2ecf20Sopenharmony_ci } 4968c2ecf20Sopenharmony_ci 4978c2ecf20Sopenharmony_ci /* if this is in-progress, propagate the DLM_FORWARD 4988c2ecf20Sopenharmony_ci * all the way back out */ 4998c2ecf20Sopenharmony_ci status = dlmunlock_master(dlm, res, lock, lksb, flags, &ignore); 5008c2ecf20Sopenharmony_ci if (status == DLM_FORWARD) 5018c2ecf20Sopenharmony_ci mlog(0, "lockres is in progress\n"); 5028c2ecf20Sopenharmony_ci 5038c2ecf20Sopenharmony_ci if (flags & LKM_PUT_LVB) 5048c2ecf20Sopenharmony_ci lksb->flags &= ~DLM_LKSB_PUT_LVB; 5058c2ecf20Sopenharmony_ci 5068c2ecf20Sopenharmony_ci dlm_lockres_calc_usage(dlm, res); 5078c2ecf20Sopenharmony_ci dlm_kick_thread(dlm, res); 5088c2ecf20Sopenharmony_ci 5098c2ecf20Sopenharmony_cinot_found: 5108c2ecf20Sopenharmony_ci if (!found) 5118c2ecf20Sopenharmony_ci mlog(ML_ERROR, "failed to find lock to unlock! " 5128c2ecf20Sopenharmony_ci "cookie=%u:%llu\n", 5138c2ecf20Sopenharmony_ci dlm_get_lock_cookie_node(be64_to_cpu(unlock->cookie)), 5148c2ecf20Sopenharmony_ci dlm_get_lock_cookie_seq(be64_to_cpu(unlock->cookie))); 5158c2ecf20Sopenharmony_ci else 5168c2ecf20Sopenharmony_ci dlm_lock_put(lock); 5178c2ecf20Sopenharmony_ci 5188c2ecf20Sopenharmony_cileave: 5198c2ecf20Sopenharmony_ci if (res) 5208c2ecf20Sopenharmony_ci dlm_lockres_put(res); 5218c2ecf20Sopenharmony_ci 5228c2ecf20Sopenharmony_ci dlm_put(dlm); 5238c2ecf20Sopenharmony_ci 5248c2ecf20Sopenharmony_ci return status; 5258c2ecf20Sopenharmony_ci} 5268c2ecf20Sopenharmony_ci 5278c2ecf20Sopenharmony_ci 5288c2ecf20Sopenharmony_cistatic enum dlm_status dlm_get_cancel_actions(struct dlm_ctxt *dlm, 5298c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 5308c2ecf20Sopenharmony_ci struct dlm_lock *lock, 5318c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 5328c2ecf20Sopenharmony_ci int *actions) 5338c2ecf20Sopenharmony_ci{ 5348c2ecf20Sopenharmony_ci enum dlm_status status; 5358c2ecf20Sopenharmony_ci 5368c2ecf20Sopenharmony_ci if (dlm_lock_on_list(&res->blocked, lock)) { 5378c2ecf20Sopenharmony_ci /* cancel this outright */ 5388c2ecf20Sopenharmony_ci status = DLM_NORMAL; 5398c2ecf20Sopenharmony_ci *actions = (DLM_UNLOCK_CALL_AST | 5408c2ecf20Sopenharmony_ci DLM_UNLOCK_REMOVE_LOCK); 5418c2ecf20Sopenharmony_ci } else if (dlm_lock_on_list(&res->converting, lock)) { 5428c2ecf20Sopenharmony_ci /* cancel the request, put back on granted */ 5438c2ecf20Sopenharmony_ci status = DLM_NORMAL; 5448c2ecf20Sopenharmony_ci *actions = (DLM_UNLOCK_CALL_AST | 5458c2ecf20Sopenharmony_ci DLM_UNLOCK_REMOVE_LOCK | 5468c2ecf20Sopenharmony_ci DLM_UNLOCK_REGRANT_LOCK | 5478c2ecf20Sopenharmony_ci DLM_UNLOCK_CLEAR_CONVERT_TYPE); 5488c2ecf20Sopenharmony_ci } else if (dlm_lock_on_list(&res->granted, lock)) { 5498c2ecf20Sopenharmony_ci /* too late, already granted. */ 5508c2ecf20Sopenharmony_ci status = DLM_CANCELGRANT; 5518c2ecf20Sopenharmony_ci *actions = DLM_UNLOCK_CALL_AST; 5528c2ecf20Sopenharmony_ci } else { 5538c2ecf20Sopenharmony_ci mlog(ML_ERROR, "lock to cancel is not on any list!\n"); 5548c2ecf20Sopenharmony_ci status = DLM_IVLOCKID; 5558c2ecf20Sopenharmony_ci *actions = 0; 5568c2ecf20Sopenharmony_ci } 5578c2ecf20Sopenharmony_ci return status; 5588c2ecf20Sopenharmony_ci} 5598c2ecf20Sopenharmony_ci 5608c2ecf20Sopenharmony_cistatic enum dlm_status dlm_get_unlock_actions(struct dlm_ctxt *dlm, 5618c2ecf20Sopenharmony_ci struct dlm_lock_resource *res, 5628c2ecf20Sopenharmony_ci struct dlm_lock *lock, 5638c2ecf20Sopenharmony_ci struct dlm_lockstatus *lksb, 5648c2ecf20Sopenharmony_ci int *actions) 5658c2ecf20Sopenharmony_ci{ 5668c2ecf20Sopenharmony_ci enum dlm_status status; 5678c2ecf20Sopenharmony_ci 5688c2ecf20Sopenharmony_ci /* unlock request */ 5698c2ecf20Sopenharmony_ci if (!dlm_lock_on_list(&res->granted, lock)) { 5708c2ecf20Sopenharmony_ci status = DLM_DENIED; 5718c2ecf20Sopenharmony_ci dlm_error(status); 5728c2ecf20Sopenharmony_ci *actions = 0; 5738c2ecf20Sopenharmony_ci } else { 5748c2ecf20Sopenharmony_ci /* unlock granted lock */ 5758c2ecf20Sopenharmony_ci status = DLM_NORMAL; 5768c2ecf20Sopenharmony_ci *actions = (DLM_UNLOCK_FREE_LOCK | 5778c2ecf20Sopenharmony_ci DLM_UNLOCK_CALL_AST | 5788c2ecf20Sopenharmony_ci DLM_UNLOCK_REMOVE_LOCK); 5798c2ecf20Sopenharmony_ci } 5808c2ecf20Sopenharmony_ci return status; 5818c2ecf20Sopenharmony_ci} 5828c2ecf20Sopenharmony_ci 5838c2ecf20Sopenharmony_ci/* there seems to be no point in doing this async 5848c2ecf20Sopenharmony_ci * since (even for the remote case) there is really 5858c2ecf20Sopenharmony_ci * no work to queue up... so just do it and fire the 5868c2ecf20Sopenharmony_ci * unlockast by hand when done... */ 5878c2ecf20Sopenharmony_cienum dlm_status dlmunlock(struct dlm_ctxt *dlm, struct dlm_lockstatus *lksb, 5888c2ecf20Sopenharmony_ci int flags, dlm_astunlockfunc_t *unlockast, void *data) 5898c2ecf20Sopenharmony_ci{ 5908c2ecf20Sopenharmony_ci enum dlm_status status; 5918c2ecf20Sopenharmony_ci struct dlm_lock_resource *res; 5928c2ecf20Sopenharmony_ci struct dlm_lock *lock = NULL; 5938c2ecf20Sopenharmony_ci int call_ast, is_master; 5948c2ecf20Sopenharmony_ci 5958c2ecf20Sopenharmony_ci if (!lksb) { 5968c2ecf20Sopenharmony_ci dlm_error(DLM_BADARGS); 5978c2ecf20Sopenharmony_ci return DLM_BADARGS; 5988c2ecf20Sopenharmony_ci } 5998c2ecf20Sopenharmony_ci 6008c2ecf20Sopenharmony_ci if (flags & ~(LKM_CANCEL | LKM_VALBLK | LKM_INVVALBLK)) { 6018c2ecf20Sopenharmony_ci dlm_error(DLM_BADPARAM); 6028c2ecf20Sopenharmony_ci return DLM_BADPARAM; 6038c2ecf20Sopenharmony_ci } 6048c2ecf20Sopenharmony_ci 6058c2ecf20Sopenharmony_ci if ((flags & (LKM_VALBLK | LKM_CANCEL)) == (LKM_VALBLK | LKM_CANCEL)) { 6068c2ecf20Sopenharmony_ci mlog(0, "VALBLK given with CANCEL: ignoring VALBLK\n"); 6078c2ecf20Sopenharmony_ci flags &= ~LKM_VALBLK; 6088c2ecf20Sopenharmony_ci } 6098c2ecf20Sopenharmony_ci 6108c2ecf20Sopenharmony_ci if (!lksb->lockid || !lksb->lockid->lockres) { 6118c2ecf20Sopenharmony_ci dlm_error(DLM_BADPARAM); 6128c2ecf20Sopenharmony_ci return DLM_BADPARAM; 6138c2ecf20Sopenharmony_ci } 6148c2ecf20Sopenharmony_ci 6158c2ecf20Sopenharmony_ci lock = lksb->lockid; 6168c2ecf20Sopenharmony_ci BUG_ON(!lock); 6178c2ecf20Sopenharmony_ci dlm_lock_get(lock); 6188c2ecf20Sopenharmony_ci 6198c2ecf20Sopenharmony_ci res = lock->lockres; 6208c2ecf20Sopenharmony_ci BUG_ON(!res); 6218c2ecf20Sopenharmony_ci dlm_lockres_get(res); 6228c2ecf20Sopenharmony_ciretry: 6238c2ecf20Sopenharmony_ci call_ast = 0; 6248c2ecf20Sopenharmony_ci /* need to retry up here because owner may have changed */ 6258c2ecf20Sopenharmony_ci mlog(0, "lock=%p res=%p\n", lock, res); 6268c2ecf20Sopenharmony_ci 6278c2ecf20Sopenharmony_ci spin_lock(&res->spinlock); 6288c2ecf20Sopenharmony_ci is_master = (res->owner == dlm->node_num); 6298c2ecf20Sopenharmony_ci if (flags & LKM_VALBLK && lock->ml.type != LKM_EXMODE) 6308c2ecf20Sopenharmony_ci flags &= ~LKM_VALBLK; 6318c2ecf20Sopenharmony_ci spin_unlock(&res->spinlock); 6328c2ecf20Sopenharmony_ci 6338c2ecf20Sopenharmony_ci if (is_master) { 6348c2ecf20Sopenharmony_ci status = dlmunlock_master(dlm, res, lock, lksb, flags, 6358c2ecf20Sopenharmony_ci &call_ast); 6368c2ecf20Sopenharmony_ci mlog(0, "done calling dlmunlock_master: returned %d, " 6378c2ecf20Sopenharmony_ci "call_ast is %d\n", status, call_ast); 6388c2ecf20Sopenharmony_ci } else { 6398c2ecf20Sopenharmony_ci status = dlmunlock_remote(dlm, res, lock, lksb, flags, 6408c2ecf20Sopenharmony_ci &call_ast); 6418c2ecf20Sopenharmony_ci mlog(0, "done calling dlmunlock_remote: returned %d, " 6428c2ecf20Sopenharmony_ci "call_ast is %d\n", status, call_ast); 6438c2ecf20Sopenharmony_ci } 6448c2ecf20Sopenharmony_ci 6458c2ecf20Sopenharmony_ci if (status == DLM_RECOVERING || 6468c2ecf20Sopenharmony_ci status == DLM_MIGRATING || 6478c2ecf20Sopenharmony_ci status == DLM_FORWARD || 6488c2ecf20Sopenharmony_ci status == DLM_NOLOCKMGR) { 6498c2ecf20Sopenharmony_ci 6508c2ecf20Sopenharmony_ci /* We want to go away for a tiny bit to allow recovery 6518c2ecf20Sopenharmony_ci * / migration to complete on this resource. I don't 6528c2ecf20Sopenharmony_ci * know of any wait queue we could sleep on as this 6538c2ecf20Sopenharmony_ci * may be happening on another node. Perhaps the 6548c2ecf20Sopenharmony_ci * proper solution is to queue up requests on the 6558c2ecf20Sopenharmony_ci * other end? */ 6568c2ecf20Sopenharmony_ci 6578c2ecf20Sopenharmony_ci /* do we want to yield(); ?? */ 6588c2ecf20Sopenharmony_ci msleep(50); 6598c2ecf20Sopenharmony_ci 6608c2ecf20Sopenharmony_ci mlog(0, "retrying unlock due to pending recovery/" 6618c2ecf20Sopenharmony_ci "migration/in-progress/reconnect\n"); 6628c2ecf20Sopenharmony_ci goto retry; 6638c2ecf20Sopenharmony_ci } 6648c2ecf20Sopenharmony_ci 6658c2ecf20Sopenharmony_ci if (call_ast) { 6668c2ecf20Sopenharmony_ci mlog(0, "calling unlockast(%p, %d)\n", data, status); 6678c2ecf20Sopenharmony_ci if (is_master) { 6688c2ecf20Sopenharmony_ci /* it is possible that there is one last bast 6698c2ecf20Sopenharmony_ci * pending. make sure it is flushed, then 6708c2ecf20Sopenharmony_ci * call the unlockast. 6718c2ecf20Sopenharmony_ci * not an issue if this is a mastered remotely, 6728c2ecf20Sopenharmony_ci * since this lock has been removed from the 6738c2ecf20Sopenharmony_ci * lockres queues and cannot be found. */ 6748c2ecf20Sopenharmony_ci dlm_kick_thread(dlm, NULL); 6758c2ecf20Sopenharmony_ci wait_event(dlm->ast_wq, 6768c2ecf20Sopenharmony_ci dlm_lock_basts_flushed(dlm, lock)); 6778c2ecf20Sopenharmony_ci } 6788c2ecf20Sopenharmony_ci (*unlockast)(data, status); 6798c2ecf20Sopenharmony_ci } 6808c2ecf20Sopenharmony_ci 6818c2ecf20Sopenharmony_ci if (status == DLM_CANCELGRANT) 6828c2ecf20Sopenharmony_ci status = DLM_NORMAL; 6838c2ecf20Sopenharmony_ci 6848c2ecf20Sopenharmony_ci if (status == DLM_NORMAL) { 6858c2ecf20Sopenharmony_ci mlog(0, "kicking the thread\n"); 6868c2ecf20Sopenharmony_ci dlm_kick_thread(dlm, res); 6878c2ecf20Sopenharmony_ci } else 6888c2ecf20Sopenharmony_ci dlm_error(status); 6898c2ecf20Sopenharmony_ci 6908c2ecf20Sopenharmony_ci dlm_lockres_calc_usage(dlm, res); 6918c2ecf20Sopenharmony_ci dlm_lockres_put(res); 6928c2ecf20Sopenharmony_ci dlm_lock_put(lock); 6938c2ecf20Sopenharmony_ci 6948c2ecf20Sopenharmony_ci mlog(0, "returning status=%d!\n", status); 6958c2ecf20Sopenharmony_ci return status; 6968c2ecf20Sopenharmony_ci} 6978c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(dlmunlock); 6988c2ecf20Sopenharmony_ci 699