Lines Matching refs:res
50 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
58 struct dlm_lock_resource *res,
61 struct dlm_lock_resource *res,
78 struct dlm_lock_resource *res,
231 struct dlm_lock_resource *res;
269 list_for_each_entry(res, &dlm->reco.resources, recovering) {
271 dlm->name, res->lockname.len, res->lockname.name);
866 struct dlm_lock_resource *res;
912 list_for_each_entry(res, &resources, recovering) {
913 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
1038 struct dlm_lock_resource *res, *next;
1042 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
1045 if (dlm_is_recovery_lock(res->lockname.name,
1046 res->lockname.len)) {
1047 spin_lock(&res->spinlock);
1048 list_for_each_entry(lock, &res->granted, list) {
1062 spin_unlock(&res->spinlock);
1066 if (res->owner == dead_node) {
1070 list_move_tail(&res->recovering, list);
1071 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1074 list_move_tail(&res->recovering, list);
1080 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1083 struct list_head *iter, *queue = &res->granted;
1098 struct dlm_lock_resource *res,
1118 dlm->name, res->lockname.len, res->lockname.name,
1129 mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to "
1149 dlm_init_migratable_lockres(mres, res->lockname.name,
1150 res->lockname.len, mres_total_locks,
1260 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1274 total_locks = dlm_num_locks_in_lockres(res);
1283 dlm_init_migratable_lockres(mres, res->lockname.name,
1284 res->lockname.len, total_locks,
1285 mig_cookie, flags, res->owner);
1289 queue = dlm_list_idx_to_ptr(res, i);
1299 res, total_locks);
1307 dlm->name, res->lockname.len, res->lockname.name,
1313 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1326 res->lockname.len, res->lockname.name);
1356 struct dlm_lock_resource *res = NULL;
1396 res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
1398 if (res) {
1399 /* this will get a ref on res */
1401 spin_lock(&res->spinlock);
1402 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
1408 spin_unlock(&res->spinlock);
1410 dlm_lockres_put(res);
1415 res->state |= DLM_LOCK_RES_RECOVERING;
1417 if (res->state & DLM_LOCK_RES_MIGRATING) {
1423 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1429 spin_unlock(&res->spinlock);
1431 dlm_lockres_put(res);
1434 res->state |= DLM_LOCK_RES_MIGRATING;
1436 spin_unlock(&res->spinlock);
1442 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1443 if (!res)
1448 dlm_lockres_get(res);
1452 res->state |= DLM_LOCK_RES_RECOVERING;
1454 res->state |= DLM_LOCK_RES_MIGRATING;
1457 __dlm_insert_lockres(dlm, res);
1463 dlm_lockres_get(res);
1478 spin_lock(&res->spinlock);
1479 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1480 spin_unlock(&res->spinlock);
1481 wake_up(&res->wq);
1486 * the proper res->state flags. */
1488 spin_lock(&res->spinlock);
1491 dlm_lockres_grab_inflight_ref(dlm, res);
1501 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1503 spin_unlock(&res->spinlock);
1509 item->u.ml.lockres = res; /* already have a ref */
1520 dlm_lockres_put(res);
1538 struct dlm_lock_resource *res;
1545 res = item->u.ml.lockres;
1553 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1562 res->lockname.len, res->lockname.name);
1564 spin_lock(&res->spinlock);
1565 dlm_lockres_drop_inflight_ref(dlm, res);
1566 spin_unlock(&res->spinlock);
1569 real_master, res->lockname.len,
1570 res->lockname.name);
1576 ret = dlm_process_recovery_data(dlm, res, mres);
1584 ret = dlm_finish_migration(dlm, res, mres->master);
1591 if (res) {
1593 dlm_lockres_put(res);
1594 dlm_lockres_put(res);
1602 struct dlm_lock_resource *res,
1642 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1659 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1668 req.namelen = res->lockname.len;
1669 memcpy(req.name, res->lockname.name, res->lockname.len);
1702 struct dlm_lock_resource *res = NULL;
1717 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1718 if (res) {
1719 spin_lock(&res->spinlock);
1720 master = res->owner;
1722 int ret = dlm_dispatch_assert_master(dlm, res,
1726 spin_unlock(&res->spinlock);
1727 dlm_lockres_put(res);
1734 __dlm_lockres_grab_inflight_worker(dlm, res);
1735 spin_unlock(&res->spinlock);
1739 spin_unlock(&res->spinlock);
1740 dlm_lockres_put(res);
1751 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1756 ret = &(res->granted);
1788 struct dlm_lock_resource *res,
1812 spin_lock(&res->spinlock);
1813 dlm_lockres_set_refmap_bit(dlm, res, from);
1814 spin_unlock(&res->spinlock);
1821 queue = dlm_list_num_to_pointer(res, ml->list);
1832 spin_lock(&res->spinlock);
1834 tmpq = dlm_list_idx_to_ptr(res, j);
1858 __dlm_print_one_lock_resource(res);
1868 res->lockname.len, res->lockname.name,
1878 __dlm_print_one_lock_resource(res);
1888 j, ml->list, res->lockname.len,
1889 res->lockname.name);
1890 __dlm_print_one_lock_resource(res);
1891 spin_unlock(&res->spinlock);
1901 spin_unlock(&res->spinlock);
1915 dlm_lock_attach_lockres(newlock, res);
1918 BUG_ON(queue != &res->converting);
1944 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1950 if (!dlm_lvb_is_empty(res->lvb) &&
1952 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1956 res->lockname.len,
1957 res->lockname.name, ml->type);
1960 printk("%02x", res->lvb[i]);
1965 dlm_print_one_lock_resource(res);
1968 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1990 spin_lock(&res->spinlock);
1996 res->lockname.len, res->lockname.name,
2007 __dlm_print_one_lock_resource(res);
2025 res->lockname.len, res->lockname.name, ml->node);
2026 dlm_lockres_set_refmap_bit(dlm, res, ml->node);
2028 spin_unlock(&res->spinlock);
2034 spin_lock(&res->spinlock);
2035 dlm_lockres_drop_inflight_ref(dlm, res);
2036 spin_unlock(&res->spinlock);
2045 struct dlm_lock_resource *res)
2052 assert_spin_locked(&res->spinlock);
2053 res->state |= DLM_LOCK_RES_RECOVERING;
2054 if (!list_empty(&res->recovering)) {
2056 "Recovering res %s:%.*s, is already on recovery list!\n",
2057 dlm->name, res->lockname.len, res->lockname.name);
2058 list_del_init(&res->recovering);
2059 dlm_lockres_put(res);
2062 dlm_lockres_get(res);
2063 list_add_tail(&res->recovering, &dlm->reco.resources);
2067 queue = dlm_list_idx_to_ptr(res, i);
2074 res->lockname.len, res->lockname.name);
2075 dlm_revert_pending_convert(res, lock);
2082 res->lockname.len, res->lockname.name);
2088 dlm_revert_pending_lock(res, lock);
2103 res->lockname.len, res->lockname.name);
2104 dlm_commit_pending_unlock(res, lock);
2114 res->lockname.len, res->lockname.name);
2115 dlm_commit_pending_cancel(res, lock);
2126 * sets the res->owner to the new master.
2133 struct dlm_lock_resource *res, *next;
2137 list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) {
2138 if (res->owner == dead_node) {
2139 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2140 dlm->name, res->lockname.len, res->lockname.name,
2141 res->owner, new_master);
2142 list_del_init(&res->recovering);
2143 spin_lock(&res->spinlock);
2146 dlm_change_lockres_owner(dlm, res, new_master);
2147 res->state &= ~DLM_LOCK_RES_RECOVERING;
2148 if (__dlm_lockres_has_locks(res))
2149 __dlm_dirty_lockres(dlm, res);
2150 spin_unlock(&res->spinlock);
2151 wake_up(&res->wq);
2152 dlm_lockres_put(res);
2162 hlist_for_each_entry(res, bucket, hash_node) {
2163 if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
2164 spin_lock(&res->spinlock);
2165 res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
2166 spin_unlock(&res->spinlock);
2167 wake_up(&res->wq);
2170 if (!(res->state & DLM_LOCK_RES_RECOVERING))
2173 if (res->owner != dead_node &&
2174 res->owner != dlm->node_num)
2177 if (!list_empty(&res->recovering)) {
2178 list_del_init(&res->recovering);
2179 dlm_lockres_put(res);
2184 mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
2185 dlm->name, res->lockname.len, res->lockname.name,
2186 res->owner, new_master);
2187 spin_lock(&res->spinlock);
2188 dlm_change_lockres_owner(dlm, res, new_master);
2189 res->state &= ~DLM_LOCK_RES_RECOVERING;
2190 if (__dlm_lockres_has_locks(res))
2191 __dlm_dirty_lockres(dlm, res);
2192 spin_unlock(&res->spinlock);
2193 wake_up(&res->wq);
2210 struct dlm_lock_resource *res, u8 dead_node)
2219 assert_spin_locked(&res->spinlock);
2221 if (res->owner == dlm->node_num)
2233 queue = dlm_list_idx_to_ptr(res, i);
2247 res->lockname.len, res->lockname.name, dead_node);
2248 memset(res->lvb, 0, DLM_LVB_LEN);
2253 struct dlm_lock_resource *res, u8 dead_node)
2263 assert_spin_locked(&res->spinlock);
2269 list_for_each_entry_safe(lock, next, &res->granted, list) {
2278 list_for_each_entry_safe(lock, next, &res->converting, list) {
2287 list_for_each_entry_safe(lock, next, &res->blocked, list) {
2300 res->lockname.len, res->lockname.name, freed, dead_node);
2301 if(!test_bit(dead_node, res->refmap)) {
2304 res->lockname.len, res->lockname.name, freed, dead_node);
2305 __dlm_print_one_lock_resource(res);
2307 res->state |= DLM_LOCK_RES_RECOVERY_WAITING;
2308 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2309 } else if (test_bit(dead_node, res->refmap)) {
2312 res->lockname.len, res->lockname.name, dead_node);
2313 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2317 __dlm_dirty_lockres(dlm, res);
2322 struct dlm_lock_resource *res;
2348 hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
2351 if (dlm_is_recovery_lock(res->lockname.name,
2352 res->lockname.len)) {
2353 spin_lock(&res->spinlock);
2354 list_for_each_entry(lock, &res->granted, list) {
2370 if ((res->owner == dead_node) &&
2371 (res->state & DLM_LOCK_RES_DROPPING_REF)) {
2372 dlm_lockres_get(res);
2373 __dlm_do_purge_lockres(dlm, res);
2374 spin_unlock(&res->spinlock);
2375 wake_up(&res->wq);
2376 dlm_lockres_put(res);
2378 } else if (res->owner == dlm->node_num)
2379 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2380 spin_unlock(&res->spinlock);
2383 spin_lock(&res->spinlock);
2385 dlm_revalidate_lvb(dlm, res, dead_node);
2386 if (res->owner == dead_node) {
2387 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
2392 dlm->name, res->lockname.len,
2393 res->lockname.name, dead_node);
2394 dlm_lockres_get(res);
2395 __dlm_do_purge_lockres(dlm, res);
2396 spin_unlock(&res->spinlock);
2397 wake_up(&res->wq);
2398 dlm_lockres_put(res);
2401 dlm_move_lockres_to_recovery_list(dlm, res);
2402 } else if (res->owner == dlm->node_num) {
2403 dlm_free_dead_locks(dlm, res, dead_node);
2404 __dlm_lockres_calc_usage(dlm, res);
2405 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2406 if (test_bit(dead_node, res->refmap)) {
2409 dlm->name, res->lockname.len,
2410 res->lockname.name, dead_node);
2411 dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
2414 spin_unlock(&res->spinlock);
2649 struct dlm_lock_resource *res;
2655 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2657 if (res) {
2658 dlm_print_one_lock_resource(res);
2659 dlm_lockres_put(res);
2730 struct dlm_lock_resource *res;
2737 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2739 if (res) {
2740 dlm_print_one_lock_resource(res);
2741 dlm_lockres_put(res);