Lines Matching refs:cinfo

188 	struct md_cluster_info *cinfo = mddev->cluster_info;
195 res->ls = cinfo->lockspace;
272 struct md_cluster_info *cinfo = mddev->cluster_info;
278 cinfo->suspend_hi = le64_to_cpu(ri.hi);
279 cinfo->suspend_lo = le64_to_cpu(ri.lo);
289 struct md_cluster_info *cinfo = mddev->cluster_info;
295 while (cinfo->recovery_map) {
296 slot = fls64((u64)cinfo->recovery_map) - 1;
318 spin_lock_irq(&cinfo->suspend_lock);
319 cinfo->suspend_hi = 0;
320 cinfo->suspend_lo = 0;
321 cinfo->suspend_from = -1;
322 spin_unlock_irq(&cinfo->suspend_lock);
348 clear_bit(slot, &cinfo->recovery_map);
355 struct md_cluster_info *cinfo = mddev->cluster_info;
356 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
361 struct md_cluster_info *cinfo = mddev->cluster_info;
363 set_bit(slot, &cinfo->recovery_map);
364 if (!cinfo->recovery_thread) {
365 cinfo->recovery_thread = md_register_thread(recover_bitmaps,
367 if (!cinfo->recovery_thread) {
372 md_wakeup_thread(cinfo->recovery_thread);
378 struct md_cluster_info *cinfo = mddev->cluster_info;
383 cinfo->slot_number);
394 struct md_cluster_info *cinfo = mddev->cluster_info;
396 cinfo->slot_number = our_slot;
399 if (test_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state)) {
400 complete(&cinfo->completion);
401 clear_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
403 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
422 struct md_cluster_info *cinfo = res->mddev->cluster_info;
425 if (test_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state))
426 md_wakeup_thread(cinfo->recv_thread);
428 set_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state);
434 struct md_cluster_info *cinfo = mddev->cluster_info;
436 spin_lock_irq(&cinfo->suspend_lock);
437 cinfo->suspend_hi = 0;
438 cinfo->suspend_lo = 0;
439 spin_unlock_irq(&cinfo->suspend_lock);
446 struct md_cluster_info *cinfo = mddev->cluster_info;
490 md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
491 cinfo->sync_hi, lo, hi);
492 cinfo->sync_low = lo;
493 cinfo->sync_hi = hi;
496 spin_lock_irq(&cinfo->suspend_lock);
497 cinfo->suspend_from = slot;
498 cinfo->suspend_lo = lo;
499 cinfo->suspend_hi = hi;
500 spin_unlock_irq(&cinfo->suspend_lock);
507 struct md_cluster_info *cinfo = mddev->cluster_info;
517 init_completion(&cinfo->newdisk_completion);
518 set_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
520 wait_for_completion_timeout(&cinfo->newdisk_completion,
522 clear_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state);
529 struct md_cluster_info *cinfo = mddev->cluster_info;
532 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
535 test_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state));
623 struct md_cluster_info *cinfo = thread->mddev->cluster_info;
624 struct dlm_lock_resource *ack_lockres = cinfo->ack_lockres;
625 struct dlm_lock_resource *message_lockres = cinfo->message_lockres;
629 mutex_lock(&cinfo->recv_mutex);
633 mutex_unlock(&cinfo->recv_mutex);
660 mutex_unlock(&cinfo->recv_mutex);
667 static int lock_token(struct md_cluster_info *cinfo)
671 error = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
677 mutex_lock(&cinfo->recv_mutex);
685 static int lock_comm(struct md_cluster_info *cinfo, bool mddev_locked)
688 struct mddev *mddev = cinfo->mddev;
697 &cinfo->state)) {
699 &cinfo->state);
705 wait_event(cinfo->wait,
706 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state));
707 rv = lock_token(cinfo);
709 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
713 static void unlock_comm(struct md_cluster_info *cinfo)
715 WARN_ON(cinfo->token_lockres->mode != DLM_LOCK_EX);
716 mutex_unlock(&cinfo->recv_mutex);
717 dlm_unlock_sync(cinfo->token_lockres);
718 clear_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state);
719 wake_up(&cinfo->wait);
734 static int __sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg)
737 int slot = cinfo->slot_number - 1;
741 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_EX);
747 memcpy(cinfo->message_lockres->lksb.sb_lvbptr, (void *)cmsg,
750 error = dlm_lock_sync(cinfo->message_lockres, DLM_LOCK_CW);
758 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_EX);
766 error = dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR);
774 error = dlm_unlock_sync(cinfo->message_lockres);
785 static int sendmsg(struct md_cluster_info *cinfo, struct cluster_msg *cmsg,
790 ret = lock_comm(cinfo, mddev_locked);
792 ret = __sendmsg(cinfo, cmsg);
793 unlock_comm(cinfo);
800 struct md_cluster_info *cinfo = mddev->cluster_info;
813 if (i == (cinfo->slot_number - 1)) {
824 (unsigned long long) cinfo->suspend_lo,
825 (unsigned long long) cinfo->suspend_hi,
827 cinfo->suspend_from = i;
859 struct md_cluster_info *cinfo;
863 cinfo = kzalloc(sizeof(struct md_cluster_info), GFP_KERNEL);
864 if (!cinfo)
867 INIT_LIST_HEAD(&cinfo->suspend_list);
868 spin_lock_init(&cinfo->suspend_lock);
869 init_completion(&cinfo->completion);
870 set_bit(MD_CLUSTER_BEGIN_JOIN_CLUSTER, &cinfo->state);
871 init_waitqueue_head(&cinfo->wait);
872 mutex_init(&cinfo->recv_mutex);
874 mddev->cluster_info = cinfo;
875 cinfo->mddev = mddev;
881 &md_ls_ops, mddev, &ops_rv, &cinfo->lockspace);
884 wait_for_completion(&cinfo->completion);
885 if (nodes < cinfo->slot_number) {
887 cinfo->slot_number, nodes);
893 cinfo->recv_thread = md_register_thread(recv_daemon, mddev, "cluster_recv");
894 if (!cinfo->recv_thread) {
898 cinfo->message_lockres = lockres_init(mddev, "message", NULL, 1);
899 if (!cinfo->message_lockres)
901 cinfo->token_lockres = lockres_init(mddev, "token", NULL, 0);
902 if (!cinfo->token_lockres)
904 cinfo->no_new_dev_lockres = lockres_init(mddev, "no-new-dev", NULL, 0);
905 if (!cinfo->no_new_dev_lockres)
908 ret = dlm_lock_sync(cinfo->token_lockres, DLM_LOCK_EX);
914 cinfo->ack_lockres = lockres_init(mddev, "ack", ack_bast, 0);
915 if (!cinfo->ack_lockres) {
920 if (dlm_lock_sync(cinfo->ack_lockres, DLM_LOCK_CR))
923 dlm_unlock_sync(cinfo->token_lockres);
925 if (dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR))
929 pr_info("md-cluster: Joined cluster %s slot %d\n", str, cinfo->slot_number);
930 snprintf(str, 64, "bitmap%04d", cinfo->slot_number - 1);
931 cinfo->bitmap_lockres = lockres_init(mddev, str, NULL, 1);
932 if (!cinfo->bitmap_lockres) {
936 if (dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW)) {
942 cinfo->resync_lockres = lockres_init(mddev, "resync", NULL, 0);
943 if (!cinfo->resync_lockres) {
950 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
951 md_unregister_thread(&cinfo->recovery_thread);
952 md_unregister_thread(&cinfo->recv_thread);
953 lockres_free(cinfo->message_lockres);
954 lockres_free(cinfo->token_lockres);
955 lockres_free(cinfo->ack_lockres);
956 lockres_free(cinfo->no_new_dev_lockres);
957 lockres_free(cinfo->resync_lockres);
958 lockres_free(cinfo->bitmap_lockres);
959 if (cinfo->lockspace)
960 dlm_release_lockspace(cinfo->lockspace, 2);
962 kfree(cinfo);
968 struct md_cluster_info *cinfo = mddev->cluster_info;
973 set_bit(MD_CLUSTER_ALREADY_IN_CLUSTER, &cinfo->state);
975 if (test_and_clear_bit(MD_CLUSTER_PENDING_RECV_EVENT, &cinfo->state))
976 md_wakeup_thread(cinfo->recv_thread);
981 struct md_cluster_info *cinfo = mddev->cluster_info;
986 err = sendmsg(cinfo, &cmsg, 1);
995 struct md_cluster_info *cinfo = mddev->cluster_info;
997 if (!cinfo)
1008 if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
1013 set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
1014 md_unregister_thread(&cinfo->recovery_thread);
1015 md_unregister_thread(&cinfo->recv_thread);
1016 lockres_free(cinfo->message_lockres);
1017 lockres_free(cinfo->token_lockres);
1018 lockres_free(cinfo->ack_lockres);
1019 lockres_free(cinfo->no_new_dev_lockres);
1020 lockres_free(cinfo->resync_lockres);
1021 lockres_free(cinfo->bitmap_lockres);
1023 dlm_release_lockspace(cinfo->lockspace, 2);
1024 kfree(cinfo);
1034 struct md_cluster_info *cinfo = mddev->cluster_info;
1036 return cinfo->slot_number - 1;
1047 struct md_cluster_info *cinfo = mddev->cluster_info;
1055 &cinfo->state);
1059 wait_event(cinfo->wait,
1060 !test_and_set_bit(MD_CLUSTER_SEND_LOCK, &cinfo->state) ||
1061 test_and_clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state));
1064 if (cinfo->token_lockres->mode == DLM_LOCK_EX) {
1065 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
1069 ret = lock_token(cinfo);
1070 clear_bit_unlock(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
1076 struct md_cluster_info *cinfo = mddev->cluster_info;
1093 ret = __sendmsg(cinfo, &cmsg);
1096 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1097 unlock_comm(cinfo);
1103 struct md_cluster_info *cinfo = mddev->cluster_info;
1104 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1105 unlock_comm(cinfo);
1110 struct md_cluster_info *cinfo = mddev->cluster_info;
1116 ret = sendmsg(cinfo, &cmsg, 0);
1256 struct md_cluster_info *cinfo = mddev->cluster_info;
1263 if (lock_comm(cinfo, 1)) {
1282 ret = __sendmsg(cinfo, &cmsg);
1286 unlock_comm(cinfo);
1291 unlock_comm(cinfo);
1303 ret = __sendmsg(cinfo, &cmsg);
1314 ret = __sendmsg(cinfo, &cmsg);
1319 unlock_comm(cinfo);
1324 struct md_cluster_info *cinfo = mddev->cluster_info;
1325 return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
1330 struct md_cluster_info *cinfo = mddev->cluster_info;
1332 spin_lock_irq(&cinfo->suspend_lock);
1333 *lo = cinfo->suspend_lo;
1334 *hi = cinfo->suspend_hi;
1335 spin_unlock_irq(&cinfo->suspend_lock);
1340 struct md_cluster_info *cinfo = mddev->cluster_info;
1346 memcpy(&ri, cinfo->bitmap_lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
1351 add_resync_info(cinfo->bitmap_lockres, lo, hi);
1353 dlm_lock_sync(cinfo->bitmap_lockres, DLM_LOCK_PW);
1363 return sendmsg(cinfo, &cmsg, 1);
1365 return sendmsg(cinfo, &cmsg, 0);
1370 struct md_cluster_info *cinfo = mddev->cluster_info;
1381 dlm_unlock_sync(cinfo->resync_lockres);
1388 struct md_cluster_info *cinfo = mddev->cluster_info;
1392 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
1395 spin_lock_irq(&cinfo->suspend_lock);
1396 if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi)
1398 spin_unlock_irq(&cinfo->suspend_lock);
1408 struct md_cluster_info *cinfo = mddev->cluster_info;
1418 if (lock_comm(cinfo, 1))
1420 ret = __sendmsg(cinfo, &cmsg);
1422 unlock_comm(cinfo);
1425 cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE;
1426 ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX);
1427 cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE;
1432 unlock_comm(cinfo);
1434 dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_CR);
1447 set_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1448 wake_up(&cinfo->wait);
1455 struct md_cluster_info *cinfo = mddev->cluster_info;
1456 clear_bit(MD_CLUSTER_SEND_LOCKED_ALREADY, &cinfo->state);
1457 unlock_comm(cinfo);
1462 struct md_cluster_info *cinfo = mddev->cluster_info;
1464 if (!test_bit(MD_CLUSTER_WAITING_FOR_NEWDISK, &cinfo->state)) {
1470 dlm_unlock_sync(cinfo->no_new_dev_lockres);
1471 complete(&cinfo->newdisk_completion);
1478 struct md_cluster_info *cinfo = mddev->cluster_info;
1481 return sendmsg(cinfo, &cmsg, 1);
1488 struct md_cluster_info *cinfo = mddev->cluster_info;
1490 cinfo->other_bitmap_lockres =
1493 if (!cinfo->other_bitmap_lockres) {
1505 cinfo->other_bitmap_lockres[i] = lockres_init(mddev, str, NULL, 1);
1506 if (!cinfo->other_bitmap_lockres[i])
1509 cinfo->other_bitmap_lockres[i]->flags |= DLM_LKF_NOQUEUE;
1510 ret = dlm_lock_sync(cinfo->other_bitmap_lockres[i], DLM_LOCK_PW);
1521 struct md_cluster_info *cinfo = mddev->cluster_info;
1525 if (cinfo->other_bitmap_lockres) {
1527 if (cinfo->other_bitmap_lockres[i]) {
1528 lockres_free(cinfo->other_bitmap_lockres[i]);
1531 kfree(cinfo->other_bitmap_lockres);
1532 cinfo->other_bitmap_lockres = NULL;
1542 struct md_cluster_info *cinfo = mddev->cluster_info;
1546 err = sendmsg(cinfo, &cmsg, 1);
1551 if (sn == (cinfo->slot_number - 1))