Lines Matching defs:state
853 struct bch_dirty_init_state *state = info->state;
854 struct cache_set *c = state->c;
869 spin_lock(&state->idx_lock);
870 cur_idx = state->key_idx;
871 state->key_idx++;
872 spin_unlock(&state->idx_lock);
883 atomic_set(&state->enough, 1);
884 /* Update state->enough earlier */
892 if (bch_root_node_dirty_init(c, state->d, p) < 0)
901 /* In order to wake up state->wait in time */
903 if (atomic_dec_and_test(&state->started))
904 wake_up(&state->wait);
929 struct bch_dirty_init_state state;
956 memset(&state, 0, sizeof(struct bch_dirty_init_state));
957 state.c = c;
958 state.d = d;
959 state.total_threads = bch_btre_dirty_init_thread_nr();
960 state.key_idx = 0;
961 spin_lock_init(&state.idx_lock);
962 atomic_set(&state.started, 0);
963 atomic_set(&state.enough, 0);
964 init_waitqueue_head(&state.wait);
966 for (i = 0; i < state.total_threads; i++) {
967 /* Fetch latest state.enough earlier */
969 if (atomic_read(&state.enough))
972 atomic_inc(&state.started);
973 state.infos[i].state = &state;
974 state.infos[i].thread =
975 kthread_run(bch_dirty_init_thread, &state.infos[i],
977 if (IS_ERR(state.infos[i].thread)) {
979 atomic_dec(&state.started);
981 kthread_stop(state.infos[i].thread);
988 wait_event(state.wait, atomic_read(&state.started) == 0);