Lines Matching refs:mle

41 			      struct dlm_master_list_entry *mle,
45 struct dlm_master_list_entry *mle,
56 struct dlm_master_list_entry *mle,
60 if (dlm != mle->dlm)
63 if (namelen != mle->mnamelen ||
64 memcmp(name, mle->mname, namelen) != 0)
75 static void dlm_init_mle(struct dlm_master_list_entry *mle,
81 static void dlm_put_mle(struct dlm_master_list_entry *mle);
82 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
84 struct dlm_master_list_entry **mle,
88 struct dlm_master_list_entry *mle, int to);
93 struct dlm_master_list_entry *mle,
97 struct dlm_master_list_entry *mle,
101 struct dlm_master_list_entry *mle,
152 * dlm's established heartbeat callbacks. the mle is attached
155 * by the mle. the mle needs to be detached from the
157 * longer useful to the mle, and before the mle is freed.
160 * the mle once an "answer" regarding the lock master has been
164 struct dlm_master_list_entry *mle)
168 list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
173 struct dlm_master_list_entry *mle)
175 if (!list_empty(&mle->hb_events))
176 list_del_init(&mle->hb_events);
181 struct dlm_master_list_entry *mle)
184 __dlm_mle_detach_hb_events(dlm, mle);
188 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
191 dlm = mle->dlm;
195 mle->inuse++;
196 kref_get(&mle->mle_refs);
199 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
202 dlm = mle->dlm;
206 mle->inuse--;
207 __dlm_put_mle(mle);
214 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
217 dlm = mle->dlm;
221 if (!kref_read(&mle->mle_refs)) {
224 mlog(ML_ERROR, "bad mle: %p\n", mle);
225 dlm_print_one_mle(mle);
228 kref_put(&mle->mle_refs, dlm_mle_release);
233 static void dlm_put_mle(struct dlm_master_list_entry *mle)
236 dlm = mle->dlm;
240 __dlm_put_mle(mle);
245 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
247 kref_get(&mle->mle_refs);
250 static void dlm_init_mle(struct dlm_master_list_entry *mle,
259 mle->dlm = dlm;
260 mle->type = type;
261 INIT_HLIST_NODE(&mle->master_hash_node);
262 INIT_LIST_HEAD(&mle->hb_events);
263 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
264 spin_lock_init(&mle->spinlock);
265 init_waitqueue_head(&mle->wq);
266 atomic_set(&mle->woken, 0);
267 kref_init(&mle->mle_refs);
268 memset(mle->response_map, 0, sizeof(mle->response_map));
269 mle->master = O2NM_MAX_NODES;
270 mle->new_master = O2NM_MAX_NODES;
271 mle->inuse = 0;
273 BUG_ON(mle->type != DLM_MLE_BLOCK &&
274 mle->type != DLM_MLE_MASTER &&
275 mle->type != DLM_MLE_MIGRATION);
277 if (mle->type == DLM_MLE_MASTER) {
279 mle->mleres = res;
280 memcpy(mle->mname, res->lockname.name, res->lockname.len);
281 mle->mnamelen = res->lockname.len;
282 mle->mnamehash = res->lockname.hash;
285 mle->mleres = NULL;
286 memcpy(mle->mname, name, namelen);
287 mle->mnamelen = namelen;
288 mle->mnamehash = dlm_lockid_hash(name, namelen);
291 atomic_inc(&dlm->mle_tot_count[mle->type]);
292 atomic_inc(&dlm->mle_cur_count[mle->type]);
295 memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
296 memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
297 clear_bit(dlm->node_num, mle->vote_map);
298 clear_bit(dlm->node_num, mle->node_map);
300 /* attach the mle to the domain node up/down events */
301 __dlm_mle_attach_hb_events(dlm, mle);
304 void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
309 if (!hlist_unhashed(&mle->master_hash_node))
310 hlist_del_init(&mle->master_hash_node);
313 void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle)
319 bucket = dlm_master_hash(dlm, mle->mnamehash);
320 hlist_add_head(&mle->master_hash_node, bucket);
325 struct dlm_master_list_entry **mle,
340 *mle = tmpmle;
348 struct dlm_master_list_entry *mle;
352 list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
354 dlm_mle_node_up(dlm, mle, NULL, idx);
356 dlm_mle_node_down(dlm, mle, NULL, idx);
361 struct dlm_master_list_entry *mle,
364 spin_lock(&mle->spinlock);
366 if (!test_bit(idx, mle->node_map))
369 clear_bit(idx, mle->node_map);
371 spin_unlock(&mle->spinlock);
375 struct dlm_master_list_entry *mle,
378 spin_lock(&mle->spinlock);
380 if (test_bit(idx, mle->node_map))
383 set_bit(idx, mle->node_map);
385 spin_unlock(&mle->spinlock);
407 struct dlm_master_list_entry *mle;
410 mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
411 dlm = mle->dlm;
416 mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname,
417 mle->type);
420 __dlm_unlink_mle(dlm, mle);
422 /* detach the mle from the domain node up/down events */
423 __dlm_mle_detach_hb_events(dlm, mle);
425 atomic_dec(&dlm->mle_cur_count[mle->type]);
429 kmem_cache_free(dlm_mle_cache, mle);
709 struct dlm_master_list_entry *mle = NULL;
816 blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
819 if (mle->type == DLM_MLE_MASTER) {
823 mig = (mle->type == DLM_MLE_MIGRATION);
826 * of the MIGRATION mle: either the migrate finished or
827 * one of the nodes died and the mle was cleaned up.
830 * for us in the refmap. detach the mle and drop it.
832 if (mig || mle->master != O2NM_MAX_NODES) {
833 BUG_ON(mig && mle->master == dlm->node_num);
844 dlm_mle_detach_hb_events(dlm, mle);
845 dlm_put_mle(mle);
846 mle = NULL;
848 * the mle or lockres waitqueue here */
855 mle = alloc_mle;
858 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
859 set_bit(dlm->node_num, mle->maybe_map);
860 __dlm_insert_mle(dlm, mle);
864 * considered. these will not appear in the mle nodemap
886 /* get an extra ref on the mle in case this is a BLOCK
890 dlm_get_mle_inuse(mle);
897 * dlm spinlock would be detectable be a change on the mle,
936 dlm_node_iter_init(mle->vote_map, &iter);
938 ret = dlm_do_master_request(res, mle, nodenum);
941 if (mle->master != O2NM_MAX_NODES) {
943 if (mle->master <= nodenum)
951 lockid, nodenum, mle->master);
957 ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
969 dlm_print_one_mle(mle);
981 dlm_mle_detach_hb_events(dlm, mle);
982 dlm_put_mle(mle);
984 dlm_put_mle_inuse(mle);
993 /* need to free the unused mle */
1005 struct dlm_master_list_entry *mle,
1026 ret = dlm_do_master_request(res, mle, res->owner);
1039 spin_lock(&mle->spinlock);
1040 m = mle->master;
1041 map_changed = (memcmp(mle->vote_map, mle->node_map,
1042 sizeof(mle->vote_map)) != 0);
1043 voting_done = (memcmp(mle->vote_map, mle->response_map,
1044 sizeof(mle->vote_map)) == 0);
1051 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1052 b = (mle->type == DLM_MLE_BLOCK);
1059 spin_unlock(&mle->spinlock);
1084 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1089 mle->master = dlm->node_num;
1100 spin_unlock(&mle->spinlock);
1105 atomic_set(&mle->woken, 0);
1106 (void)wait_event_timeout(mle->wq,
1107 (atomic_read(&mle->woken) == 1),
1124 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1210 struct dlm_master_list_entry *mle,
1221 assert_spin_locked(&mle->spinlock);
1223 dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1234 clear_bit(node, mle->response_map);
1235 set_bit(node, mle->vote_map);
1239 int lowest = find_next_bit(mle->maybe_map,
1243 clear_bit(node, mle->maybe_map);
1249 lowest = find_next_bit(mle->maybe_map,
1260 /* mle is an MLE_BLOCK, but
1266 * has already run, so the mle
1274 mle->type = DLM_MLE_MASTER;
1275 mle->mleres = res;
1282 memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1283 memset(mle->response_map, 0, sizeof(mle->response_map));
1285 memcpy(mle->vote_map, mle->node_map,
1286 sizeof(mle->node_map));
1288 if (mle->type != DLM_MLE_BLOCK)
1289 set_bit(dlm->node_num, mle->maybe_map);
1309 struct dlm_master_list_entry *mle, int to)
1311 struct dlm_ctxt *dlm = mle->dlm;
1318 BUG_ON(mle->type == DLM_MLE_MIGRATION);
1320 request.namelen = (u8)mle->mnamelen;
1321 memcpy(request.name, mle->mname, request.namelen);
1354 spin_lock(&mle->spinlock);
1357 set_bit(to, mle->response_map);
1362 mle->master = to;
1366 set_bit(to, mle->response_map);
1370 set_bit(to, mle->response_map);
1371 set_bit(to, mle->maybe_map);
1382 spin_unlock(&mle->spinlock);
1397 * mle->spinlock
1409 struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1460 if (mle)
1461 kmem_cache_free(dlm_mle_cache, mle);
1469 if (mle)
1470 kmem_cache_free(dlm_mle_cache, mle);
1484 if (mle)
1485 kmem_cache_free(dlm_mle_cache, mle);
1502 mlog(ML_ERROR, "no mle found for this lock!\n");
1547 /* keep the mle attached to heartbeat events */
1549 if (mle)
1550 kmem_cache_free(dlm_mle_cache, mle);
1564 // mlog(0, "no mle found\n");
1565 if (!mle) {
1569 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1570 if (!mle) {
1580 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1581 set_bit(request->node_idx, mle->maybe_map);
1582 __dlm_insert_mle(dlm, mle);
1587 mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1593 mlog(0, "migration mle was found (%u->%u)\n",
1606 /* keep the mle attached to heartbeat events */
1677 struct dlm_master_list_entry *mle = NULL;
1708 if (dlm_find_mle(dlm, &mle, (char *)lockname,
1710 dlm_print_one_mle(mle);
1711 __dlm_put_mle(mle);
1756 * mle->spinlock
1765 struct dlm_master_list_entry *mle = NULL;
1794 if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1800 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1823 if (mle->type == DLM_MLE_MIGRATION) {
1834 __dlm_put_mle(mle);
1853 if (!mle) {
1863 } else if (mle->type != DLM_MLE_MIGRATION) {
1886 } else /* mle->type == DLM_MLE_MIGRATION */ {
1888 if (assert->node_idx != mle->new_master) {
1892 assert->node_idx, mle->new_master,
1893 mle->master, namelen, name);
1904 if (mle) {
1909 spin_lock(&mle->spinlock);
1910 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1913 /* MASTER mle: if any bits set in the response map
1916 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
1924 mle->master = assert->node_idx;
1925 atomic_set(&mle->woken, 1);
1926 wake_up(&mle->wq);
1927 spin_unlock(&mle->spinlock);
1932 if (mle->type == DLM_MLE_MIGRATION) {
1936 dlm->node_num, mle->new_master);
1939 dlm_change_lockres_owner(dlm, res, mle->new_master);
1942 dlm_change_lockres_owner(dlm, res, mle->master);
1952 * on this mle. */
1955 rr = kref_read(&mle->mle_refs);
1956 if (mle->inuse > 0) {
1971 assert->node_idx, rr, extra_ref, mle->inuse);
1972 dlm_print_one_mle(mle);
1974 __dlm_unlink_mle(dlm, mle);
1975 __dlm_mle_detach_hb_events(dlm, mle);
1976 __dlm_put_mle(mle);
1982 __dlm_put_mle(mle);
1988 "owner is %u (%.*s), no mle\n", assert->node_idx,
2009 "mle present here for %s:%.*s, but no lockres!\n",
2028 if (mle)
2029 __dlm_put_mle(mle);
2545 struct dlm_master_list_entry *mle = NULL;
2571 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
2572 if (!mle) {
2580 * add the migration mle to the list
2584 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2586 /* get an extra reference on the mle.
2591 dlm_get_mle_inuse(mle);
2626 dlm_mle_detach_hb_events(dlm, mle);
2627 dlm_put_mle(mle);
2628 dlm_put_mle_inuse(mle);
2629 } else if (mle) {
2630 kmem_cache_free(dlm_mle_cache, mle);
2631 mle = NULL;
2637 * at this point, we have a migration target, an mle
2656 /* migration failed, detach and clean up mle */
2657 dlm_mle_detach_hb_events(dlm, mle);
2658 dlm_put_mle(mle);
2659 dlm_put_mle_inuse(mle);
2672 * we had to put an mle in the list to begin the process. this
2678 * mle and sets the master to UNKNOWN. */
2683 ret = wait_event_interruptible_timeout(mle->wq,
2684 (atomic_read(&mle->woken) == 1),
2688 if (atomic_read(&mle->woken) == 1 ||
2702 /* migration failed, detach and clean up mle */
2703 dlm_mle_detach_hb_events(dlm, mle);
2704 dlm_put_mle(mle);
2705 dlm_put_mle_inuse(mle);
2726 dlm_mle_detach_hb_events(dlm, mle);
2727 dlm_put_mle_inuse(mle);
3094 /* if there is an existing mle for this lockres, we now know who the master is.
3096 * since the process that put the mle on the list still has a reference to it,
3098 * we will have no mle in the list to start with. now we can add an mle for
3107 struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3120 mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
3122 if (!mle) {
3139 kmem_cache_free(dlm_mle_cache, mle);
3149 ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3155 kmem_cache_free(dlm_mle_cache, mle);
3175 * when adding a migration mle, we can clear any other mles
3177 * the master is "master". so we remove any old mle from
3179 * the new migration mle. this way we can hold with the rule
3180 * of having only one mle for a given lock name at all times. */
3183 struct dlm_master_list_entry *mle,
3211 mlog(ML_ERROR, "migration error mle: "
3225 /* remove it so that only one mle will be found */
3232 "for cleared out mle during "
3241 /* now add a migration mle to the tail of the list */
3242 dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3243 mle->new_master = new_master;
3246 mle->master = master;
3247 /* do this for consistency with other mle types */
3248 set_bit(new_master, mle->maybe_map);
3249 __dlm_insert_mle(dlm, mle);
3255 * Sets the owner of the lockres, associated to the mle, to UNKNOWN
3258 struct dlm_master_list_entry *mle)
3262 /* Find the lockres associated to the mle and set its owner to UNK */
3263 res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen,
3264 mle->mnamehash);
3275 /* about to get rid of mle, detach from heartbeat */
3276 __dlm_mle_detach_hb_events(dlm, mle);
3278 /* dump the mle */
3280 __dlm_put_mle(mle);
3288 struct dlm_master_list_entry *mle)
3290 __dlm_mle_detach_hb_events(dlm, mle);
3292 spin_lock(&mle->spinlock);
3293 __dlm_unlink_mle(dlm, mle);
3294 atomic_set(&mle->woken, 1);
3295 spin_unlock(&mle->spinlock);
3297 wake_up(&mle->wq);
3301 struct dlm_master_list_entry *mle, u8 dead_node)
3305 BUG_ON(mle->type != DLM_MLE_BLOCK);
3307 spin_lock(&mle->spinlock);
3308 bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3310 mlog(0, "mle found, but dead node %u would not have been "
3312 spin_unlock(&mle->spinlock);
3315 * never arrive. This may result in the mle being unlinked and
3319 atomic_set(&mle->woken, 1);
3320 spin_unlock(&mle->spinlock);
3321 wake_up(&mle->wq);
3324 __dlm_mle_detach_hb_events(dlm, mle);
3325 __dlm_put_mle(mle);
3331 struct dlm_master_list_entry *mle;
3345 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3346 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3347 mle->type != DLM_MLE_MASTER &&
3348 mle->type != DLM_MLE_MIGRATION);
3353 if (mle->type == DLM_MLE_MASTER)
3359 if (mle->type == DLM_MLE_BLOCK) {
3360 dlm_clean_block_mle(dlm, mle, dead_node);
3364 /* Everything else is a MIGRATION mle */
3375 if (mle->master != dead_node &&
3376 mle->new_master != dead_node)
3379 if (mle->new_master == dead_node && mle->inuse) {
3384 mle->master);
3388 /* If we have reached this point, this mle needs to be
3390 dlm_clean_migration_mle(dlm, mle);
3393 "%u to %u!\n", dlm->name, dead_node, mle->master,
3394 mle->new_master);
3396 /* If we find a lockres associated with the mle, we've
3401 res = dlm_reset_mleres_owner(dlm, mle);
3407 __dlm_put_mle(mle);
3534 struct dlm_master_list_entry *mle;
3551 hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) {
3552 if (mle->type != DLM_MLE_BLOCK) {
3553 mlog(ML_ERROR, "bad mle: %p\n", mle);
3554 dlm_print_one_mle(mle);
3556 atomic_set(&mle->woken, 1);
3557 wake_up(&mle->wq);
3559 __dlm_unlink_mle(dlm, mle);
3560 __dlm_mle_detach_hb_events(dlm, mle);
3561 __dlm_put_mle(mle);