Lines Matching refs:mle
39 struct dlm_master_list_entry *mle,
43 struct dlm_master_list_entry *mle,
54 struct dlm_master_list_entry *mle,
58 if (dlm != mle->dlm)
61 if (namelen != mle->mnamelen ||
62 memcmp(name, mle->mname, namelen) != 0)
73 static void dlm_init_mle(struct dlm_master_list_entry *mle,
79 static void dlm_put_mle(struct dlm_master_list_entry *mle);
80 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
82 struct dlm_master_list_entry **mle,
86 struct dlm_master_list_entry *mle, int to);
91 struct dlm_master_list_entry *mle,
95 struct dlm_master_list_entry *mle,
99 struct dlm_master_list_entry *mle,
150 * dlm's established heartbeat callbacks. the mle is attached
153 * by the mle. the mle needs to be detached from the
155 * longer useful to the mle, and before the mle is freed.
158 * the mle once an "answer" regarding the lock master has been
162 struct dlm_master_list_entry *mle)
166 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
171 struct dlm_master_list_entry *mle)
173 if (!list_empty(&mle->hb_events))
174 list_del_init(&mle->hb_events);
179 struct dlm_master_list_entry *mle)
182 __dlm_mle_detach_hb_events(dlm, mle);
186 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
189 dlm = mle->dlm;
193 mle->inuse++;
194 kref_get(&mle->mle_refs);
197 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
200 dlm = mle->dlm;
204 mle->inuse--;
205 __dlm_put_mle(mle);
212 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
215 dlm = mle->dlm;
219 if (!kref_read(&mle->mle_refs)) {
222 mlog(ML_ERROR, "bad mle: %p\n", mle);
223 dlm_print_one_mle(mle);
226 kref_put(&mle->mle_refs, dlm_mle_release);
231 static void dlm_put_mle(struct dlm_master_list_entry *mle)
234 dlm = mle->dlm;
238 __dlm_put_mle(mle);
243 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
245 kref_get(&mle->mle_refs);
248 static void dlm_init_mle(struct dlm_master_list_entry *mle,
257 mle->dlm = dlm;
258 mle->type = type;
259 INIT_HLIST_NODE(&mle->master_hash_node);
260 INIT_LIST_HEAD(&mle->hb_events);
261 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
262 spin_lock_init(&mle->spinlock);
263 init_waitqueue_head(&mle->wq);
264 atomic_set(&mle->woken, 0);
265 kref_init(&mle->mle_refs);
266 bitmap_zero(mle->response_map, O2NM_MAX_NODES);
267 mle->master = O2NM_MAX_NODES;
268 mle->new_master = O2NM_MAX_NODES;
269 mle->inuse = 0;
271 BUG_ON(mle->type != DLM_MLE_BLOCK &&
272 mle->type != DLM_MLE_MASTER &&
273 mle->type != DLM_MLE_MIGRATION);
275 if (mle->type == DLM_MLE_MASTER) {
277 mle->mleres = res;
278 memcpy(mle->mname, res->lockname.name, res->lockname.len);
279 mle->mnamelen = res->lockname.len;
280 mle->mnamehash = res->lockname.hash;
283 mle->mleres = NULL;
284 memcpy(mle->mname, name, namelen);
285 mle->mnamelen = namelen;
286 mle->mnamehash = dlm_lockid_hash(name, namelen);
289 atomic_inc(&dlm->mle_tot_count[mle->type]);
290 atomic_inc(&dlm->mle_cur_count[mle->type]);
293 bitmap_copy(mle->node_map, dlm->domain_map, O2NM_MAX_NODES);
294 bitmap_copy(mle->vote_map, dlm->domain_map, O2NM_MAX_NODES);
295 clear_bit(dlm->node_num, mle->vote_map);
296 clear_bit(dlm->node_num, mle->node_map);
298 /* attach the mle to the domain node up/down events */
299 __dlm_mle_attach_hb_events(dlm, mle);
302 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
307 if (!hlist_unhashed(&mle->master_hash_node))
308 hlist_del_init(&mle->master_hash_node);
311 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
317 bucket = dlm_master_hash(dlm, mle->mnamehash);
318 hlist_add_head(&mle->master_hash_node, bucket);
323 struct dlm_master_list_entry **mle,
338 *mle = tmpmle;
346 struct dlm_master_list_entry *mle;
350 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
352 dlm_mle_node_up(dlm, mle, NULL, idx);
354 dlm_mle_node_down(dlm, mle, NULL, idx);
359 struct dlm_master_list_entry *mle,
362 spin_lock(&mle->spinlock);
364 if (!test_bit(idx, mle->node_map))
367 clear_bit(idx, mle->node_map);
369 spin_unlock(&mle->spinlock);
373 struct dlm_master_list_entry *mle,
376 spin_lock(&mle->spinlock);
378 if (test_bit(idx, mle->node_map))
381 set_bit(idx, mle->node_map);
383 spin_unlock(&mle->spinlock);
405 struct dlm_master_list_entry *mle;
408 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
409 dlm = mle->dlm;
414 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
415 mle->type);
418 __dlm_unlink_mle(dlm, mle);
420 /* detach the mle from the domain node up/down events */
421 __dlm_mle_detach_hb_events(dlm, mle);
423 atomic_dec(&dlm->mle_cur_count[mle->type]);
427 kmem_cache_free(dlm_mle_cache, mle);
707 struct dlm_master_list_entry *mle = NULL;
814 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
817 if (mle->type == DLM_MLE_MASTER) {
821 mig = (mle->type == DLM_MLE_MIGRATION);
824 * of the MIGRATION mle: either the migrate finished or
825 * one of the nodes died and the mle was cleaned up.
828 * for us in the refmap. detach the mle and drop it.
830 if (mig || mle->master != O2NM_MAX_NODES) {
831 BUG_ON(mig && mle->master == dlm->node_num);
842 dlm_mle_detach_hb_events(dlm, mle);
843 dlm_put_mle(mle);
844 mle = NULL;
846 * the mle or lockres waitqueue here */
853 mle = alloc_mle;
856 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
857 set_bit(dlm->node_num, mle->maybe_map);
858 __dlm_insert_mle(dlm, mle);
862 * considered. these will not appear in the mle nodemap
884 /* get an extra ref on the mle in case this is a BLOCK
888 dlm_get_mle_inuse(mle);
895 * dlm spinlock would be detectable be a change on the mle,
934 dlm_node_iter_init(mle->vote_map, &iter);
936 ret = dlm_do_master_request(res, mle, nodenum);
939 if (mle->master != O2NM_MAX_NODES) {
941 if (mle->master <= nodenum)
949 lockid, nodenum, mle->master);
955 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
967 dlm_print_one_mle(mle);
979 dlm_mle_detach_hb_events(dlm, mle);
980 dlm_put_mle(mle);
982 dlm_put_mle_inuse(mle);
991 /* need to free the unused mle */
1003 struct dlm_master_list_entry *mle,
1024 ret = dlm_do_master_request(res, mle, res->owner);
1037 spin_lock(&mle->spinlock);
1038 m = mle->master;
1039 map_changed = !bitmap_equal(mle->vote_map, mle->node_map,
1041 voting_done = bitmap_equal(mle->vote_map, mle->response_map,
1049 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1050 b = (mle->type == DLM_MLE_BLOCK);
1057 spin_unlock(&mle->spinlock);
1082 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
1087 mle->master = dlm->node_num;
1098 spin_unlock(&mle->spinlock);
1103 atomic_set(&mle->woken, 0);
1104 (void)wait_event_timeout(mle->wq,
1105 (atomic_read(&mle->woken) == 1),
1122 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1208 struct dlm_master_list_entry *mle,
1219 assert_spin_locked(&mle->spinlock);
1221 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1232 clear_bit(node, mle->response_map);
1233 set_bit(node, mle->vote_map);
1237 int lowest = find_first_bit(mle->maybe_map,
1241 clear_bit(node, mle->maybe_map);
1247 lowest = find_next_bit(mle->maybe_map,
1258 /* mle is an MLE_BLOCK, but
1264 * has already run, so the mle
1272 mle->type = DLM_MLE_MASTER;
1273 mle->mleres = res;
1280 bitmap_zero(mle->maybe_map, O2NM_MAX_NODES);
1281 bitmap_zero(mle->response_map, O2NM_MAX_NODES);
1283 bitmap_copy(mle->vote_map, mle->node_map,
1286 if (mle->type != DLM_MLE_BLOCK)
1287 set_bit(dlm->node_num, mle->maybe_map);
1307 struct dlm_master_list_entry *mle, int to)
1309 struct dlm_ctxt *dlm = mle->dlm;
1316 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1318 request.namelen = (u8)mle->mnamelen;
1319 memcpy(request.name, mle->mname, request.namelen);
1352 spin_lock(&mle->spinlock);
1355 set_bit(to, mle->response_map);
1360 mle->master = to;
1364 set_bit(to, mle->response_map);
1368 set_bit(to, mle->response_map);
1369 set_bit(to, mle->maybe_map);
1380 spin_unlock(&mle->spinlock);
1395 * mle->spinlock
1407 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1458 if (mle)
1459 kmem_cache_free(dlm_mle_cache, mle);
1467 if (mle)
1468 kmem_cache_free(dlm_mle_cache, mle);
1482 if (mle)
1483 kmem_cache_free(dlm_mle_cache, mle);
1500 mlog(ML_ERROR, "no mle found for this lock!\n");
1545 /* keep the mle attached to heartbeat events */
1547 if (mle)
1548 kmem_cache_free(dlm_mle_cache, mle);
1562 // mlog(0, "no mle found\n");
1563 if (!mle) {
1567 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1568 if (!mle) {
1578 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1579 set_bit(request->node_idx, mle->maybe_map);
1580 __dlm_insert_mle(dlm, mle);
1585 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1591 mlog(0, "migration mle was found (%u->%u)\n",
1604 /* keep the mle attached to heartbeat events */
1675 struct dlm_master_list_entry *mle = NULL;
1706 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1708 dlm_print_one_mle(mle);
1709 __dlm_put_mle(mle);
1754 * mle->spinlock
1763 struct dlm_master_list_entry *mle = NULL;
1792 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1798 int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
1821 if (mle->type == DLM_MLE_MIGRATION) {
1832 __dlm_put_mle(mle);
1851 if (!mle) {
1861 } else if (mle->type != DLM_MLE_MIGRATION) {
1884 } else /* mle->type == DLM_MLE_MIGRATION */ {
1886 if (assert->node_idx != mle->new_master) {
1890 assert->node_idx, mle->new_master,
1891 mle->master, namelen, name);
1902 if (mle) {
1907 spin_lock(&mle->spinlock);
1908 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1911 /* MASTER mle: if any bits set in the response map
1914 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1922 mle->master = assert->node_idx;
1923 atomic_set(&mle->woken, 1);
1924 wake_up(&mle->wq);
1925 spin_unlock(&mle->spinlock);
1930 if (mle->type == DLM_MLE_MIGRATION) {
1934 dlm->node_num, mle->new_master);
1937 dlm_change_lockres_owner(dlm, res, mle->new_master);
1940 dlm_change_lockres_owner(dlm, res, mle->master);
1950 * on this mle. */
1953 rr = kref_read(&mle->mle_refs);
1954 if (mle->inuse > 0) {
1969 assert->node_idx, rr, extra_ref, mle->inuse);
1970 dlm_print_one_mle(mle);
1972 __dlm_unlink_mle(dlm, mle);
1973 __dlm_mle_detach_hb_events(dlm, mle);
1974 __dlm_put_mle(mle);
1980 __dlm_put_mle(mle);
1986 "owner is %u (%.*s), no mle\n", assert->node_idx,
2007 "mle present here for %s:%.*s, but no lockres!\n",
2026 if (mle)
2027 __dlm_put_mle(mle);
2543 struct dlm_master_list_entry *mle = NULL;
2569 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2570 if (!mle) {
2578 * add the migration mle to the list
2582 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2584 /* get an extra reference on the mle.
2589 dlm_get_mle_inuse(mle);
2624 dlm_mle_detach_hb_events(dlm, mle);
2625 dlm_put_mle(mle);
2626 dlm_put_mle_inuse(mle);
2627 } else if (mle) {
2628 kmem_cache_free(dlm_mle_cache, mle);
2629 mle = NULL;
2635 * at this point, we have a migration target, an mle
2654 /* migration failed, detach and clean up mle */
2655 dlm_mle_detach_hb_events(dlm, mle);
2656 dlm_put_mle(mle);
2657 dlm_put_mle_inuse(mle);
2670 * we had to put an mle in the list to begin the process. this
2676 * mle and sets the master to UNKNOWN. */
2681 ret = wait_event_interruptible_timeout(mle->wq,
2682 (atomic_read(&mle->woken) == 1),
2686 if (atomic_read(&mle->woken) == 1 ||
2700 /* migration failed, detach and clean up mle */
2701 dlm_mle_detach_hb_events(dlm, mle);
2702 dlm_put_mle(mle);
2703 dlm_put_mle_inuse(mle);
2724 dlm_mle_detach_hb_events(dlm, mle);
2725 dlm_put_mle_inuse(mle);
3092 /* if there is an existing mle for this lockres, we now know who the master is.
3094 * since the process that put the mle on the list still has a reference to it,
3096 * we will have no mle in the list to start with. now we can add an mle for
3105 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3118 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3120 if (!mle) {
3137 kmem_cache_free(dlm_mle_cache, mle);
3147 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3153 kmem_cache_free(dlm_mle_cache, mle);
3173 * when adding a migration mle, we can clear any other mles
3175 * the master is "master". so we remove any old mle from
3177 * the new migration mle. this way we can hold with the rule
3178 * of having only one mle for a given lock name at all times. */
3181 struct dlm_master_list_entry *mle,
3209 mlog(ML_ERROR, "migration error mle: "
3223 /* remove it so that only one mle will be found */
3230 "for cleared out mle during "
3239 /* now add a migration mle to the tail of the list */
3240 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3241 mle->new_master = new_master;
3244 mle->master = master;
3245 /* do this for consistency with other mle types */
3246 set_bit(new_master, mle->maybe_map);
3247 __dlm_insert_mle(dlm, mle);
3253 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3256 struct dlm_master_list_entry *mle)
3260 /* Find the lockres associated to the mle and set its owner to UNK */
3261 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3262 mle->mnamehash);
3273 /* about to get rid of mle, detach from heartbeat */
3274 __dlm_mle_detach_hb_events(dlm, mle);
3276 /* dump the mle */
3278 __dlm_put_mle(mle);
3286 struct dlm_master_list_entry *mle)
3288 __dlm_mle_detach_hb_events(dlm, mle);
3290 spin_lock(&mle->spinlock);
3291 __dlm_unlink_mle(dlm, mle);
3292 atomic_set(&mle->woken, 1);
3293 spin_unlock(&mle->spinlock);
3295 wake_up(&mle->wq);
3299 struct dlm_master_list_entry *mle, u8 dead_node)
3303 BUG_ON(mle->type != DLM_MLE_BLOCK);
3305 spin_lock(&mle->spinlock);
3306 bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
3308 mlog(0, "mle found, but dead node %u would not have been "
3310 spin_unlock(&mle->spinlock);
3313 * never arrive. This may result in the mle being unlinked and
3317 atomic_set(&mle->woken, 1);
3318 spin_unlock(&mle->spinlock);
3319 wake_up(&mle->wq);
3322 __dlm_mle_detach_hb_events(dlm, mle);
3323 __dlm_put_mle(mle);
3329 struct dlm_master_list_entry *mle;
3343 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3344 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3345 mle->type != DLM_MLE_MASTER &&
3346 mle->type != DLM_MLE_MIGRATION);
3351 if (mle->type == DLM_MLE_MASTER)
3357 if (mle->type == DLM_MLE_BLOCK) {
3358 dlm_clean_block_mle(dlm, mle, dead_node);
3362 /* Everything else is a MIGRATION mle */
3373 if (mle->master != dead_node &&
3374 mle->new_master != dead_node)
3377 if (mle->new_master == dead_node && mle->inuse) {
3382 mle->master);
3386 /* If we have reached this point, this mle needs to be
3388 dlm_clean_migration_mle(dlm, mle);
3391 "%u to %u!\n", dlm->name, dead_node, mle->master,
3392 mle->new_master);
3394 /* If we find a lockres associated with the mle, we've
3399 res = dlm_reset_mleres_owner(dlm, mle);
3405 __dlm_put_mle(mle);
3532 struct dlm_master_list_entry *mle;
3549 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3550 if (mle->type != DLM_MLE_BLOCK) {
3551 mlog(ML_ERROR, "bad mle: %p\n", mle);
3552 dlm_print_one_mle(mle);
3554 atomic_set(&mle->woken, 1);
3555 wake_up(&mle->wq);
3557 __dlm_unlink_mle(dlm, mle);
3558 __dlm_mle_detach_hb_events(dlm, mle);
3559 __dlm_put_mle(mle);