Lines Matching refs:memb
29 struct dlm_member *memb)
36 memb->slot = le16_to_cpu(rf->rf_our_slot);
37 memb->generation = le32_to_cpu(rf->rf_generation);
99 struct dlm_member *memb;
125 list_for_each_entry(memb, &ls->ls_nodes, list) {
127 if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid)
129 memb->slot = le16_to_cpu(ro->ro_slot);
130 memb->slot_prev = memb->slot;
134 if (memb->nodeid == our_nodeid) {
135 if (ls->ls_slot && ls->ls_slot != memb->slot) {
138 memb->slot);
143 ls->ls_slot = memb->slot;
146 if (!memb->slot) {
148 memb->nodeid);
156 /* for any nodes that do not support slots, we will not have set memb->slot
157 in wait_status_all(), so memb->slot will remain -1, and we will not
163 struct dlm_member *memb;
172 /* our own memb struct will have slot -1 gen 0 */
174 list_for_each_entry(memb, &ls->ls_nodes, list) {
175 if (memb->nodeid == our_nodeid) {
176 memb->slot = ls->ls_slot;
177 memb->generation = ls->ls_generation;
182 list_for_each_entry(memb, &ls->ls_nodes, list) {
183 if (memb->generation > gen)
184 gen = memb->generation;
188 if (memb->slot == -1)
193 if (!memb->slot)
200 if (!max || max < memb->slot)
201 max = memb->slot;
205 if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) {
207 memb->nodeid, memb->slot_prev, memb->slot);
210 memb->slot_prev = memb->slot;
222 list_for_each_entry(memb, &ls->ls_nodes, list) {
223 if (!memb->slot)
226 if (memb->slot > array_size) {
227 log_error(ls, "invalid slot number %d", memb->slot);
232 array[memb->slot - 1].nodeid = memb->nodeid;
233 array[memb->slot - 1].slot = memb->slot;
239 list_for_each_entry(memb, &ls->ls_nodes, list) {
240 if (memb->slot)
247 memb->slot = i + 1;
248 memb->slot_prev = memb->slot;
249 array[i].nodeid = memb->nodeid;
250 array[i].slot = memb->slot;
253 if (!ls->ls_slot && memb->nodeid == our_nodeid)
254 ls->ls_slot = memb->slot;
258 if (!memb->slot) {
288 struct dlm_member *memb = NULL;
294 memb = list_entry(tmp, struct dlm_member, list);
295 if (new->nodeid < memb->nodeid)
299 if (!memb)
327 struct dlm_member *memb;
330 memb = kzalloc(sizeof(*memb), GFP_NOFS);
331 if (!memb)
334 memb->nodeid = node->nodeid;
335 memb->weight = node->weight;
336 memb->comm_seq = node->comm_seq;
340 kfree(memb);
344 add_ordered_member(ls, memb);
351 struct dlm_member *memb;
353 list_for_each_entry(memb, head, list) {
354 if (memb->nodeid == nodeid)
355 return memb;
377 struct dlm_member *memb;
380 memb = list_entry(head->next, struct dlm_member, list);
381 list_del(&memb->list);
383 after_del(memb->nodeid);
384 kfree(memb);
409 struct dlm_member *memb;
415 list_for_each_entry(memb, &ls->ls_nodes, list) {
416 if (memb->weight)
417 total += memb->weight;
432 list_for_each_entry(memb, &ls->ls_nodes, list) {
433 if (!all_zero && !memb->weight)
439 w = memb->weight;
444 array[x++] = memb->nodeid;
454 struct dlm_member *memb;
457 list_for_each_entry(memb, &ls->ls_nodes, list) {
462 error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
479 static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
494 error = dlm_comm_seq(memb->nodeid, &seq);
496 if (!error && seq == memb->comm_seq)
499 slot.nodeid = memb->nodeid;
500 slot.slot = memb->slot;
507 struct dlm_member *memb;
520 list_for_each_entry(memb, &ls->ls_nodes, list) {
525 slots[i].nodeid = memb->nodeid;
526 slots[i].slot = memb->slot;
550 struct dlm_member *memb, *safe;
561 list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
562 log_rinfo(ls, "prev removed member %d", memb->nodeid);
568 list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
569 node = find_config_node(rv, memb->nodeid);
574 log_rinfo(ls, "remove member %d", memb->nodeid);
578 memb->nodeid, memb->comm_seq, node->comm_seq);
582 list_move(&memb->list, &ls->ls_nodes_gone);
583 remove_remote_member(memb->nodeid);
585 dlm_lsop_recover_slot(ls, memb);
601 list_for_each_entry(memb, &ls->ls_nodes, list) {
602 if (low == -1 || memb->nodeid < low)
603 low = memb->nodeid;