Lines Matching refs:ls

35 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 ls = dlm_find_lockspace_local(ls->ls_local_handle);
44 if (!ls)
49 dlm_ls_stop(ls);
52 dlm_ls_start(ls);
57 dlm_put_lockspace(ls);
61 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
63 int rc = kstrtoint(buf, 0, &ls->ls_uevent_result);
67 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
68 wake_up(&ls->ls_uevent_wait);
72 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
77 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79 int rc = kstrtouint(buf, 0, &ls->ls_global_id);
86 static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf)
88 return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
91 static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len)
99 set_bit(LSFL_NODIR, &ls->ls_flags);
103 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
105 uint32_t status = dlm_recover_status(ls);
109 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
111 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
166 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
168 return a->show ? a->show(ls, buf) : 0;
174 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
176 return a->store ? a->store(ls, buf, len) : len;
181 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
182 kfree(ls);
198 static int do_uevent(struct dlm_ls *ls, int in)
201 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
203 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
205 log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
210 wait_event(ls->ls_uevent_wait,
211 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
213 log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
215 return ls->ls_uevent_result;
220 const struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
222 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
252 struct dlm_ls *ls;
255 list_for_each_entry(ls, &lslist, ls_list) {
256 if (time_after_eq(jiffies, ls->ls_scan_time +
259 return ls;
268 struct dlm_ls *ls;
271 ls = find_ls_to_scan();
272 if (ls) {
273 if (dlm_lock_recovery_try(ls)) {
274 ls->ls_scan_time = jiffies;
275 dlm_scan_rsbs(ls);
276 dlm_unlock_recovery(ls);
278 ls->ls_scan_time += HZ;
307 struct dlm_ls *ls;
311 list_for_each_entry(ls, &lslist, ls_list) {
312 if (ls->ls_global_id == id) {
313 atomic_inc(&ls->ls_count);
317 ls = NULL;
320 return ls;
325 struct dlm_ls *ls;
328 list_for_each_entry(ls, &lslist, ls_list) {
329 if (ls->ls_local_handle == lockspace) {
330 atomic_inc(&ls->ls_count);
334 ls = NULL;
337 return ls;
342 struct dlm_ls *ls;
345 list_for_each_entry(ls, &lslist, ls_list) {
346 if (ls->ls_device.minor == minor) {
347 atomic_inc(&ls->ls_count);
351 ls = NULL;
354 return ls;
357 void dlm_put_lockspace(struct dlm_ls *ls)
359 if (atomic_dec_and_test(&ls->ls_count))
360 wake_up(&ls->ls_count_wait);
363 static void remove_lockspace(struct dlm_ls *ls)
366 wait_event(ls->ls_count_wait, atomic_read(&ls->ls_count) == 0);
369 if (atomic_read(&ls->ls_count) != 0) {
374 WARN_ON(ls->ls_create_count != 0);
375 list_del(&ls->ls_list);
409 struct dlm_ls *ls;
452 list_for_each_entry(ls, &lslist, ls_list) {
453 WARN_ON(ls->ls_create_count <= 0);
454 if (ls->ls_namelen != namelen)
456 if (memcmp(ls->ls_name, name, namelen))
462 ls->ls_create_count++;
463 *lockspace = ls;
474 ls = kzalloc(sizeof(*ls), GFP_NOFS);
475 if (!ls)
477 memcpy(ls->ls_name, name, namelen);
478 ls->ls_namelen = namelen;
479 ls->ls_lvblen = lvblen;
480 atomic_set(&ls->ls_count, 0);
481 init_waitqueue_head(&ls->ls_count_wait);
482 ls->ls_flags = 0;
483 ls->ls_scan_time = jiffies;
486 ls->ls_ops = ops;
487 ls->ls_ops_arg = ops_arg;
493 ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
496 ls->ls_rsbtbl_size = size;
498 ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
499 if (!ls->ls_rsbtbl)
502 ls->ls_rsbtbl[i].keep.rb_node = NULL;
503 ls->ls_rsbtbl[i].toss.rb_node = NULL;
504 spin_lock_init(&ls->ls_rsbtbl[i].lock);
508 ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
510 if (!ls->ls_remove_names[i])
514 idr_init(&ls->ls_lkbidr);
515 spin_lock_init(&ls->ls_lkbidr_spin);
517 INIT_LIST_HEAD(&ls->ls_waiters);
518 mutex_init(&ls->ls_waiters_mutex);
519 INIT_LIST_HEAD(&ls->ls_orphans);
520 mutex_init(&ls->ls_orphans_mutex);
522 INIT_LIST_HEAD(&ls->ls_new_rsb);
523 spin_lock_init(&ls->ls_new_rsb_spin);
525 INIT_LIST_HEAD(&ls->ls_nodes);
526 INIT_LIST_HEAD(&ls->ls_nodes_gone);
527 ls->ls_num_nodes = 0;
528 ls->ls_low_nodeid = 0;
529 ls->ls_total_weight = 0;
530 ls->ls_node_array = NULL;
532 memset(&ls->ls_local_rsb, 0, sizeof(struct dlm_rsb));
533 ls->ls_local_rsb.res_ls = ls;
535 ls->ls_debug_rsb_dentry = NULL;
536 ls->ls_debug_waiters_dentry = NULL;
538 init_waitqueue_head(&ls->ls_uevent_wait);
539 ls->ls_uevent_result = 0;
540 init_completion(&ls->ls_recovery_done);
541 ls->ls_recovery_result = -1;
543 spin_lock_init(&ls->ls_cb_lock);
544 INIT_LIST_HEAD(&ls->ls_cb_delay);
546 ls->ls_recoverd_task = NULL;
547 mutex_init(&ls->ls_recoverd_active);
548 spin_lock_init(&ls->ls_recover_lock);
549 spin_lock_init(&ls->ls_rcom_spin);
550 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
551 ls->ls_recover_status = 0;
552 ls->ls_recover_seq = get_random_u64();
553 ls->ls_recover_args = NULL;
554 init_rwsem(&ls->ls_in_recovery);
555 init_rwsem(&ls->ls_recv_active);
556 INIT_LIST_HEAD(&ls->ls_requestqueue);
557 atomic_set(&ls->ls_requestqueue_cnt, 0);
558 init_waitqueue_head(&ls->ls_requestqueue_wait);
559 mutex_init(&ls->ls_requestqueue_mutex);
560 spin_lock_init(&ls->ls_clear_proc_locks);
567 ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
568 if (!ls->ls_recover_buf)
571 ls->ls_slot = 0;
572 ls->ls_num_slots = 0;
573 ls->ls_slots_size = 0;
574 ls->ls_slots = NULL;
576 INIT_LIST_HEAD(&ls->ls_recover_list);
577 spin_lock_init(&ls->ls_recover_list_lock);
578 idr_init(&ls->ls_recover_idr);
579 spin_lock_init(&ls->ls_recover_idr_lock);
580 ls->ls_recover_list_count = 0;
581 ls->ls_local_handle = ls;
582 init_waitqueue_head(&ls->ls_wait_general);
583 INIT_LIST_HEAD(&ls->ls_root_list);
584 init_rwsem(&ls->ls_root_sem);
587 ls->ls_create_count = 1;
588 list_add(&ls->ls_list, &lslist);
592 error = dlm_callback_start(ls);
594 log_error(ls, "can't start dlm_callback %d", error);
599 init_waitqueue_head(&ls->ls_recover_lock_wait);
602 * Once started, dlm_recoverd first looks for ls in lslist, then
608 error = dlm_recoverd_start(ls);
610 log_error(ls, "can't start dlm_recoverd %d", error);
614 wait_event(ls->ls_recover_lock_wait,
615 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
617 /* let kobject handle freeing of ls if there's an error */
620 ls->ls_kobj.kset = dlm_kset;
621 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
622 "%s", ls->ls_name);
625 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
633 error = do_uevent(ls, 1);
638 wait_for_completion(&ls->ls_recovery_done);
639 error = ls->ls_recovery_result;
643 dlm_create_debug_file(ls);
645 log_rinfo(ls, "join complete");
646 *lockspace = ls;
650 do_uevent(ls, 0);
651 dlm_clear_members(ls);
652 kfree(ls->ls_node_array);
654 dlm_recoverd_stop(ls);
656 dlm_callback_stop(ls);
659 list_del(&ls->ls_list);
661 idr_destroy(&ls->ls_recover_idr);
662 kfree(ls->ls_recover_buf);
664 idr_destroy(&ls->ls_lkbidr);
667 kfree(ls->ls_remove_names[i]);
668 vfree(ls->ls_rsbtbl);
671 kobject_put(&ls->ls_kobj);
673 kfree(ls);
755 static int lockspace_busy(struct dlm_ls *ls, int force)
759 spin_lock(&ls->ls_lkbidr_spin);
761 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
763 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
767 spin_unlock(&ls->ls_lkbidr_spin);
771 static int release_lockspace(struct dlm_ls *ls, int force)
777 busy = lockspace_busy(ls, force);
780 if (ls->ls_create_count == 1) {
784 /* remove_lockspace takes ls off lslist */
785 ls->ls_create_count = 0;
788 } else if (ls->ls_create_count > 1) {
789 rv = --ls->ls_create_count;
796 log_debug(ls, "release_lockspace no remove %d", rv);
803 dlm_device_deregister(ls);
806 do_uevent(ls, 0);
808 dlm_recoverd_stop(ls);
812 dlm_clear_members(ls);
816 dlm_callback_stop(ls);
818 remove_lockspace(ls);
820 dlm_delete_debug_file(ls);
822 idr_destroy(&ls->ls_recover_idr);
823 kfree(ls->ls_recover_buf);
829 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
830 idr_destroy(&ls->ls_lkbidr);
836 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
837 while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
839 rb_erase(n, &ls->ls_rsbtbl[i].keep);
843 while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
845 rb_erase(n, &ls->ls_rsbtbl[i].toss);
850 vfree(ls->ls_rsbtbl);
853 kfree(ls->ls_remove_names[i]);
855 while (!list_empty(&ls->ls_new_rsb)) {
856 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
866 dlm_purge_requestqueue(ls);
867 kfree(ls->ls_recover_args);
868 dlm_clear_members(ls);
869 dlm_clear_members_gone(ls);
870 kfree(ls->ls_node_array);
871 log_rinfo(ls, "release_lockspace final free");
872 kobject_put(&ls->ls_kobj);
873 /* The ls structure will be freed when the kobject is done with */
895 struct dlm_ls *ls;
898 ls = dlm_find_lockspace_local(lockspace);
899 if (!ls)
901 dlm_put_lockspace(ls);
904 error = release_lockspace(ls, force);
916 struct dlm_ls *ls;
922 list_for_each_entry(ls, &lslist, ls_list) {
923 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
928 log_error(ls, "no userland control daemon, stopping lockspace");
929 dlm_ls_stop(ls);