Lines Matching refs:ls

35 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 ls = dlm_find_lockspace_local(ls->ls_local_handle);
44 if (!ls)
49 dlm_ls_stop(ls);
52 dlm_ls_start(ls);
57 dlm_put_lockspace(ls);
61 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
63 int rc = kstrtoint(buf, 0, &ls->ls_uevent_result);
67 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
68 wake_up(&ls->ls_uevent_wait);
72 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
77 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79 int rc = kstrtouint(buf, 0, &ls->ls_global_id);
86 static ssize_t dlm_nodir_show(struct dlm_ls *ls, char *buf)
88 return snprintf(buf, PAGE_SIZE, "%u\n", dlm_no_directory(ls));
91 static ssize_t dlm_nodir_store(struct dlm_ls *ls, const char *buf, size_t len)
99 set_bit(LSFL_NODIR, &ls->ls_flags);
103 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
105 uint32_t status = dlm_recover_status(ls);
109 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
111 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
166 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
168 return a->show ? a->show(ls, buf) : 0;
174 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
176 return a->store ? a->store(ls, buf, len) : len;
181 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
182 kfree(ls);
198 static int do_uevent(struct dlm_ls *ls, int in)
201 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
203 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
205 log_rinfo(ls, "%s the lockspace group...", in ? "joining" : "leaving");
210 wait_event(ls->ls_uevent_wait,
211 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
213 log_rinfo(ls, "group event done %d", ls->ls_uevent_result);
215 return ls->ls_uevent_result;
221 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
223 add_uevent_var(env, "LOCKSPACE=%s", ls->ls_name);
253 struct dlm_ls *ls;
256 list_for_each_entry(ls, &lslist, ls_list) {
257 if (time_after_eq(jiffies, ls->ls_scan_time +
260 return ls;
269 struct dlm_ls *ls;
272 ls = find_ls_to_scan();
273 if (ls) {
274 if (dlm_lock_recovery_try(ls)) {
275 ls->ls_scan_time = jiffies;
276 dlm_scan_rsbs(ls);
277 dlm_scan_timeout(ls);
278 dlm_scan_waiters(ls);
279 dlm_unlock_recovery(ls);
281 ls->ls_scan_time += HZ;
310 struct dlm_ls *ls;
314 list_for_each_entry(ls, &lslist, ls_list) {
315 if (ls->ls_global_id == id) {
316 ls->ls_count++;
320 ls = NULL;
323 return ls;
328 struct dlm_ls *ls;
331 list_for_each_entry(ls, &lslist, ls_list) {
332 if (ls->ls_local_handle == lockspace) {
333 ls->ls_count++;
337 ls = NULL;
340 return ls;
345 struct dlm_ls *ls;
348 list_for_each_entry(ls, &lslist, ls_list) {
349 if (ls->ls_device.minor == minor) {
350 ls->ls_count++;
354 ls = NULL;
357 return ls;
360 void dlm_put_lockspace(struct dlm_ls *ls)
363 ls->ls_count--;
367 static void remove_lockspace(struct dlm_ls *ls)
371 if (ls->ls_count == 0) {
372 WARN_ON(ls->ls_create_count != 0);
373 list_del(&ls->ls_list);
418 struct dlm_ls *ls;
461 list_for_each_entry(ls, &lslist, ls_list) {
462 WARN_ON(ls->ls_create_count <= 0);
463 if (ls->ls_namelen != namelen)
465 if (memcmp(ls->ls_name, name, namelen))
471 ls->ls_create_count++;
472 *lockspace = ls;
483 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
484 if (!ls)
486 memcpy(ls->ls_name, name, namelen);
487 ls->ls_namelen = namelen;
488 ls->ls_lvblen = lvblen;
489 ls->ls_count = 0;
490 ls->ls_flags = 0;
491 ls->ls_scan_time = jiffies;
494 ls->ls_ops = ops;
495 ls->ls_ops_arg = ops_arg;
499 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
503 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
507 ls->ls_rsbtbl_size = size;
509 ls->ls_rsbtbl = vmalloc(array_size(size, sizeof(struct dlm_rsbtable)));
510 if (!ls->ls_rsbtbl)
513 ls->ls_rsbtbl[i].keep.rb_node = NULL;
514 ls->ls_rsbtbl[i].toss.rb_node = NULL;
515 spin_lock_init(&ls->ls_rsbtbl[i].lock);
518 spin_lock_init(&ls->ls_remove_spin);
521 ls->ls_remove_names[i] = kzalloc(DLM_RESNAME_MAXLEN+1,
523 if (!ls->ls_remove_names[i])
527 idr_init(&ls->ls_lkbidr);
528 spin_lock_init(&ls->ls_lkbidr_spin);
530 INIT_LIST_HEAD(&ls->ls_waiters);
531 mutex_init(&ls->ls_waiters_mutex);
532 INIT_LIST_HEAD(&ls->ls_orphans);
533 mutex_init(&ls->ls_orphans_mutex);
534 INIT_LIST_HEAD(&ls->ls_timeout);
535 mutex_init(&ls->ls_timeout_mutex);
537 INIT_LIST_HEAD(&ls->ls_new_rsb);
538 spin_lock_init(&ls->ls_new_rsb_spin);
540 INIT_LIST_HEAD(&ls->ls_nodes);
541 INIT_LIST_HEAD(&ls->ls_nodes_gone);
542 ls->ls_num_nodes = 0;
543 ls->ls_low_nodeid = 0;
544 ls->ls_total_weight = 0;
545 ls->ls_node_array = NULL;
547 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
548 ls->ls_stub_rsb.res_ls = ls;
550 ls->ls_debug_rsb_dentry = NULL;
551 ls->ls_debug_waiters_dentry = NULL;
553 init_waitqueue_head(&ls->ls_uevent_wait);
554 ls->ls_uevent_result = 0;
555 init_completion(&ls->ls_members_done);
556 ls->ls_members_result = -1;
558 mutex_init(&ls->ls_cb_mutex);
559 INIT_LIST_HEAD(&ls->ls_cb_delay);
561 ls->ls_recoverd_task = NULL;
562 mutex_init(&ls->ls_recoverd_active);
563 spin_lock_init(&ls->ls_recover_lock);
564 spin_lock_init(&ls->ls_rcom_spin);
565 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
566 ls->ls_recover_status = 0;
567 ls->ls_recover_seq = 0;
568 ls->ls_recover_args = NULL;
569 init_rwsem(&ls->ls_in_recovery);
570 init_rwsem(&ls->ls_recv_active);
571 INIT_LIST_HEAD(&ls->ls_requestqueue);
572 mutex_init(&ls->ls_requestqueue_mutex);
573 mutex_init(&ls->ls_clear_proc_locks);
575 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
576 if (!ls->ls_recover_buf)
579 ls->ls_slot = 0;
580 ls->ls_num_slots = 0;
581 ls->ls_slots_size = 0;
582 ls->ls_slots = NULL;
584 INIT_LIST_HEAD(&ls->ls_recover_list);
585 spin_lock_init(&ls->ls_recover_list_lock);
586 idr_init(&ls->ls_recover_idr);
587 spin_lock_init(&ls->ls_recover_idr_lock);
588 ls->ls_recover_list_count = 0;
589 ls->ls_local_handle = ls;
590 init_waitqueue_head(&ls->ls_wait_general);
591 INIT_LIST_HEAD(&ls->ls_root_list);
592 init_rwsem(&ls->ls_root_sem);
595 ls->ls_create_count = 1;
596 list_add(&ls->ls_list, &lslist);
600 error = dlm_callback_start(ls);
602 log_error(ls, "can't start dlm_callback %d", error);
607 init_waitqueue_head(&ls->ls_recover_lock_wait);
610 * Once started, dlm_recoverd first looks for ls in lslist, then
616 error = dlm_recoverd_start(ls);
618 log_error(ls, "can't start dlm_recoverd %d", error);
622 wait_event(ls->ls_recover_lock_wait,
623 test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
625 /* let kobject handle freeing of ls if there's an error */
628 ls->ls_kobj.kset = dlm_kset;
629 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
630 "%s", ls->ls_name);
633 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
641 error = do_uevent(ls, 1);
645 wait_for_completion(&ls->ls_members_done);
646 error = ls->ls_members_result;
650 dlm_create_debug_file(ls);
652 log_rinfo(ls, "join complete");
653 *lockspace = ls;
657 do_uevent(ls, 0);
658 dlm_clear_members(ls);
659 kfree(ls->ls_node_array);
661 dlm_recoverd_stop(ls);
663 dlm_callback_stop(ls);
666 list_del(&ls->ls_list);
668 idr_destroy(&ls->ls_recover_idr);
669 kfree(ls->ls_recover_buf);
671 idr_destroy(&ls->ls_lkbidr);
674 kfree(ls->ls_remove_names[i]);
675 vfree(ls->ls_rsbtbl);
678 kobject_put(&ls->ls_kobj);
680 kfree(ls);
739 static int lockspace_busy(struct dlm_ls *ls, int force)
743 spin_lock(&ls->ls_lkbidr_spin);
745 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
747 rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
751 spin_unlock(&ls->ls_lkbidr_spin);
755 static int release_lockspace(struct dlm_ls *ls, int force)
761 busy = lockspace_busy(ls, force);
764 if (ls->ls_create_count == 1) {
768 /* remove_lockspace takes ls off lslist */
769 ls->ls_create_count = 0;
772 } else if (ls->ls_create_count > 1) {
773 rv = --ls->ls_create_count;
780 log_debug(ls, "release_lockspace no remove %d", rv);
784 dlm_device_deregister(ls);
787 do_uevent(ls, 0);
789 dlm_recoverd_stop(ls);
791 dlm_callback_stop(ls);
793 remove_lockspace(ls);
795 dlm_delete_debug_file(ls);
797 idr_destroy(&ls->ls_recover_idr);
798 kfree(ls->ls_recover_buf);
804 idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
805 idr_destroy(&ls->ls_lkbidr);
811 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
812 while ((n = rb_first(&ls->ls_rsbtbl[i].keep))) {
814 rb_erase(n, &ls->ls_rsbtbl[i].keep);
818 while ((n = rb_first(&ls->ls_rsbtbl[i].toss))) {
820 rb_erase(n, &ls->ls_rsbtbl[i].toss);
825 vfree(ls->ls_rsbtbl);
828 kfree(ls->ls_remove_names[i]);
830 while (!list_empty(&ls->ls_new_rsb)) {
831 rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
841 dlm_purge_requestqueue(ls);
842 kfree(ls->ls_recover_args);
843 dlm_clear_members(ls);
844 dlm_clear_members_gone(ls);
845 kfree(ls->ls_node_array);
846 log_rinfo(ls, "release_lockspace final free");
847 kobject_put(&ls->ls_kobj);
848 /* The ls structure will be freed when the kobject is done with */
870 struct dlm_ls *ls;
873 ls = dlm_find_lockspace_local(lockspace);
874 if (!ls)
876 dlm_put_lockspace(ls);
879 error = release_lockspace(ls, force);
891 struct dlm_ls *ls;
897 list_for_each_entry(ls, &lslist, ls_list) {
898 if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
903 log_error(ls, "no userland control daemon, stopping lockspace");
904 dlm_ls_stop(ls);